Skip to content

Commit

Permalink
update: source github.com/jsonnet-libs/k8s@38a92f14
Browse files Browse the repository at this point in the history
  • Loading branch information
jakubhajek authored and jsonnet-libs-bot committed Mar 19, 2024
1 parent e204904 commit b92ee21
Show file tree
Hide file tree
Showing 46 changed files with 564 additions and 23,757 deletions.
81 changes: 0 additions & 81 deletions 1.20.6/_gen/postgresql/v1/scheduledBackup.libsonnet

This file was deleted.

2,917 changes: 0 additions & 2,917 deletions 1.21.3/_gen/postgresql/v1/pooler.libsonnet

This file was deleted.

File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,16 @@
'#withWaitForArchive':: d.fn(help='"If false, the function will return immediately after the backup is completed,\\nwithout waiting for WAL to be archived.\\nThis behavior is only useful with backup software that independently monitors WAL archiving.\\nOtherwise, WAL required to make the backup consistent might be missing and make the backup useless.\\nBy default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is\\nenabled.\\nOn a standby, this means that it will wait only when archive_mode = always.\\nIf write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger\\nan immediate segment switch."', args=[d.arg(name='waitForArchive', type=d.T.boolean)]),
withWaitForArchive(waitForArchive): { spec+: { onlineConfiguration+: { waitForArchive: waitForArchive } } },
},
'#withMethod':: d.fn(help='"The backup method to be used, possible options are `barmanObjectStore`\\nand `volumeSnapshot`. Defaults to: `barmanObjectStore`."', args=[d.arg(name='method', type=d.T.string)]),
'#pluginConfiguration':: d.obj(help='"Configuration parameters passed to the plugin managing this backup"'),
pluginConfiguration: {
'#withName':: d.fn(help='"Name is the name of the plugin managing this backup"', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { spec+: { pluginConfiguration+: { name: name } } },
'#withParameters':: d.fn(help='"Parameters are the configuration parameters passed to the backup\\nplugin for this backup"', args=[d.arg(name='parameters', type=d.T.object)]),
withParameters(parameters): { spec+: { pluginConfiguration+: { parameters: parameters } } },
'#withParametersMixin':: d.fn(help='"Parameters are the configuration parameters passed to the backup\\nplugin for this backup"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]),
withParametersMixin(parameters): { spec+: { pluginConfiguration+: { parameters+: parameters } } },
},
'#withMethod':: d.fn(help='"The backup method to be used, possible options are `barmanObjectStore`,\\n`volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`."', args=[d.arg(name='method', type=d.T.string)]),
withMethod(method): { spec+: { method: method } },
'#withOnline':: d.fn(help="\"Whether the default type of backup with volume snapshots is\\nonline/hot (`true`, default) or offline/cold (`false`)\\nOverrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'\"", args=[d.arg(name='online', type=d.T.boolean)]),
withOnline(online): { spec+: { online: online } },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,10 @@
},
'#data':: d.obj(help='"The configuration to be used to backup the data files\\nWhen not defined, base backups files will be stored uncompressed and may\\nbe unencrypted in the object store, according to the bucket default\\npolicy."'),
data: {
'#withAdditionalCommandArgs':: d.fn(help="\"AdditionalCommandArgs represents additional arguments that can be appended\\nto the 'barman-cloud-backup' command-line invocation. These arguments\\nprovide flexibility to customize the backup process further according to\\nspecific requirements or configurations.\\n\\n\\nExample:\\nIn a scenario where specialized backup options are required, such as setting\\na specific timeout or defining custom behavior, users can use this field\\nto specify additional command arguments.\\n\\n\\nNote:\\nIt's essential to ensure that the provided arguments are valid and supported\\nby the 'barman-cloud-backup' command, to avoid potential errors or unintended\\nbehavior during execution.\"", args=[d.arg(name='additionalCommandArgs', type=d.T.array)]),
withAdditionalCommandArgs(additionalCommandArgs): { spec+: { backup+: { barmanObjectStore+: { data+: { additionalCommandArgs: if std.isArray(v=additionalCommandArgs) then additionalCommandArgs else [additionalCommandArgs] } } } } },
'#withAdditionalCommandArgsMixin':: d.fn(help="\"AdditionalCommandArgs represents additional arguments that can be appended\\nto the 'barman-cloud-backup' command-line invocation. These arguments\\nprovide flexibility to customize the backup process further according to\\nspecific requirements or configurations.\\n\\n\\nExample:\\nIn a scenario where specialized backup options are required, such as setting\\na specific timeout or defining custom behavior, users can use this field\\nto specify additional command arguments.\\n\\n\\nNote:\\nIt's essential to ensure that the provided arguments are valid and supported\\nby the 'barman-cloud-backup' command, to avoid potential errors or unintended\\nbehavior during execution.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='additionalCommandArgs', type=d.T.array)]),
withAdditionalCommandArgsMixin(additionalCommandArgs): { spec+: { backup+: { barmanObjectStore+: { data+: { additionalCommandArgs+: if std.isArray(v=additionalCommandArgs) then additionalCommandArgs else [additionalCommandArgs] } } } } },
'#withCompression':: d.fn(help='"Compress a backup file (a tar file per tablespace) while streaming it\\nto the object store. Available options are empty string (no\\ncompression, default), `gzip`, `bzip2` or `snappy`."', args=[d.arg(name='compression', type=d.T.string)]),
withCompression(compression): { spec+: { backup+: { barmanObjectStore+: { data+: { compression: compression } } } } },
'#withEncryption':: d.fn(help='"Whenever to force the encryption of files (if the bucket is\\nnot already configured for that).\\nAllowed options are empty string (use the bucket policy, default),\\n`AES256` and `aws:kms`"', args=[d.arg(name='encryption', type=d.T.string)]),
Expand Down Expand Up @@ -975,6 +979,10 @@
},
'#data':: d.obj(help='"The configuration to be used to backup the data files\\nWhen not defined, base backups files will be stored uncompressed and may\\nbe unencrypted in the object store, according to the bucket default\\npolicy."'),
data: {
'#withAdditionalCommandArgs':: d.fn(help="\"AdditionalCommandArgs represents additional arguments that can be appended\\nto the 'barman-cloud-backup' command-line invocation. These arguments\\nprovide flexibility to customize the backup process further according to\\nspecific requirements or configurations.\\n\\n\\nExample:\\nIn a scenario where specialized backup options are required, such as setting\\na specific timeout or defining custom behavior, users can use this field\\nto specify additional command arguments.\\n\\n\\nNote:\\nIt's essential to ensure that the provided arguments are valid and supported\\nby the 'barman-cloud-backup' command, to avoid potential errors or unintended\\nbehavior during execution.\"", args=[d.arg(name='additionalCommandArgs', type=d.T.array)]),
withAdditionalCommandArgs(additionalCommandArgs): { barmanObjectStore+: { data+: { additionalCommandArgs: if std.isArray(v=additionalCommandArgs) then additionalCommandArgs else [additionalCommandArgs] } } },
'#withAdditionalCommandArgsMixin':: d.fn(help="\"AdditionalCommandArgs represents additional arguments that can be appended\\nto the 'barman-cloud-backup' command-line invocation. These arguments\\nprovide flexibility to customize the backup process further according to\\nspecific requirements or configurations.\\n\\n\\nExample:\\nIn a scenario where specialized backup options are required, such as setting\\na specific timeout or defining custom behavior, users can use this field\\nto specify additional command arguments.\\n\\n\\nNote:\\nIt's essential to ensure that the provided arguments are valid and supported\\nby the 'barman-cloud-backup' command, to avoid potential errors or unintended\\nbehavior during execution.\"\n\n**Note:** This function appends passed data to existing values", args=[d.arg(name='additionalCommandArgs', type=d.T.array)]),
withAdditionalCommandArgsMixin(additionalCommandArgs): { barmanObjectStore+: { data+: { additionalCommandArgs+: if std.isArray(v=additionalCommandArgs) then additionalCommandArgs else [additionalCommandArgs] } } },
'#withCompression':: d.fn(help='"Compress a backup file (a tar file per tablespace) while streaming it\\nto the object store. Available options are empty string (no\\ncompression, default), `gzip`, `bzip2` or `snappy`."', args=[d.arg(name='compression', type=d.T.string)]),
withCompression(compression): { barmanObjectStore+: { data+: { compression: compression } } },
'#withEncryption':: d.fn(help='"Whenever to force the encryption of files (if the bucket is\\nnot already configured for that).\\nAllowed options are empty string (use the bucket policy, default),\\n`AES256` and `aws:kms`"', args=[d.arg(name='encryption', type=d.T.string)]),
Expand Down Expand Up @@ -1103,6 +1111,17 @@
'#withName':: d.fn(help='"The server name, required"', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { name: name },
},
'#imageCatalogRef':: d.obj(help='"Defines the major PostgreSQL version we want to use within an ImageCatalog"'),
imageCatalogRef: {
'#withApiGroup':: d.fn(help='"APIGroup is the group for the resource being referenced.\\nIf APIGroup is not specified, the specified Kind must be in the core API group.\\nFor any other third-party types, APIGroup is required."', args=[d.arg(name='apiGroup', type=d.T.string)]),
withApiGroup(apiGroup): { spec+: { imageCatalogRef+: { apiGroup: apiGroup } } },
'#withKind':: d.fn(help='"Kind is the type of resource being referenced"', args=[d.arg(name='kind', type=d.T.string)]),
withKind(kind): { spec+: { imageCatalogRef+: { kind: kind } } },
'#withMajor':: d.fn(help='"The major version of PostgreSQL we want to use from the ImageCatalog"', args=[d.arg(name='major', type=d.T.integer)]),
withMajor(major): { spec+: { imageCatalogRef+: { major: major } } },
'#withName':: d.fn(help='"Name is the name of resource being referenced"', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { spec+: { imageCatalogRef+: { name: name } } },
},
'#imagePullSecrets':: d.obj(help='"The list of pull secrets to be used to pull the images"'),
imagePullSecrets: {
'#withName':: d.fn(help='"Name of the referent."', args=[d.arg(name='name', type=d.T.string)]),
Expand Down Expand Up @@ -1246,6 +1265,15 @@
'#withReusePVC':: d.fn(help='"Reuse the existing PVC (wait for the node to come\\nup again) or not (recreate it elsewhere - when `instances` >1)"', args=[d.arg(name='reusePVC', type=d.T.boolean)]),
withReusePVC(reusePVC): { spec+: { nodeMaintenanceWindow+: { reusePVC: reusePVC } } },
},
'#plugins':: d.obj(help='"The plugins configuration, containing\\nany plugin to be loaded with the corresponding configuration"'),
plugins: {
'#withName':: d.fn(help='"Name is the plugin name"', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { name: name },
'#withParameters':: d.fn(help='"Parameters is the configuration of the plugin"', args=[d.arg(name='parameters', type=d.T.object)]),
withParameters(parameters): { parameters: parameters },
'#withParametersMixin':: d.fn(help='"Parameters is the configuration of the plugin"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]),
withParametersMixin(parameters): { parameters+: parameters },
},
'#postgresql':: d.obj(help='"Configuration of the PostgreSQL server"'),
postgresql: {
'#ldap':: d.obj(help='"Options to specify LDAP configuration"'),
Expand Down Expand Up @@ -1800,6 +1828,8 @@
},
'#withDescription':: d.fn(help='"Description of this PostgreSQL cluster"', args=[d.arg(name='description', type=d.T.string)]),
withDescription(description): { spec+: { description: description } },
'#withEnablePDB':: d.fn(help='"Manage the `PodDisruptionBudget` resources within the cluster. When\\nconfigured as `true` (default setting), the pod disruption budgets\\nwill safeguard the primary node from being terminated. Conversely,\\nsetting it to `false` will result in the absence of any\\n`PodDisruptionBudget` resource, permitting the shutdown of all nodes\\nhosting the PostgreSQL cluster. This latter configuration is\\nadvisable for any PostgreSQL cluster employed for\\ndevelopment/staging purposes."', args=[d.arg(name='enablePDB', type=d.T.boolean)]),
withEnablePDB(enablePDB): { spec+: { enablePDB: enablePDB } },
'#withEnableSuperuserAccess':: d.fn(help='"When this option is enabled, the operator will use the `SuperuserSecret`\\nto update the `postgres` user password (if the secret is\\nnot present, the operator will automatically create one). When this\\noption is disabled, the operator will ignore the `SuperuserSecret` content, delete\\nit when automatically created, and then blank the password of the `postgres`\\nuser by setting it to `NULL`. Disabled by default."', args=[d.arg(name='enableSuperuserAccess', type=d.T.boolean)]),
withEnableSuperuserAccess(enableSuperuserAccess): { spec+: { enableSuperuserAccess: enableSuperuserAccess } },
'#withEnv':: d.fn(help='"Env follows the Env format to pass environment variables\\nto the pods created in the cluster"', args=[d.arg(name='env', type=d.T.array)]),
Expand Down Expand Up @@ -1832,6 +1862,10 @@
withMaxSyncReplicas(maxSyncReplicas): { spec+: { maxSyncReplicas: maxSyncReplicas } },
'#withMinSyncReplicas':: d.fn(help='"Minimum number of instances required in synchronous replication with the\\nprimary. Undefined or 0 allow writes to complete when no standby is\\navailable."', args=[d.arg(name='minSyncReplicas', type=d.T.integer)]),
withMinSyncReplicas(minSyncReplicas): { spec+: { minSyncReplicas: minSyncReplicas } },
'#withPlugins':: d.fn(help='"The plugins configuration, containing\\nany plugin to be loaded with the corresponding configuration"', args=[d.arg(name='plugins', type=d.T.array)]),
withPlugins(plugins): { spec+: { plugins: if std.isArray(v=plugins) then plugins else [plugins] } },
'#withPluginsMixin':: d.fn(help='"The plugins configuration, containing\\nany plugin to be loaded with the corresponding configuration"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='plugins', type=d.T.array)]),
withPluginsMixin(plugins): { spec+: { plugins+: if std.isArray(v=plugins) then plugins else [plugins] } },
'#withPostgresGID':: d.fn(help='"The GID of the `postgres` user inside the image, defaults to `26`"', args=[d.arg(name='postgresGID', type=d.T.integer)]),
withPostgresGID(postgresGID): { spec+: { postgresGID: postgresGID } },
'#withPostgresUID':: d.fn(help='"The UID of the `postgres` user inside the image, defaults to `26`"', args=[d.arg(name='postgresUID', type=d.T.integer)]),
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,12 @@
},
'#pgbouncer':: d.obj(help='"The PgBouncer configuration"'),
pgbouncer: {
'#authQuerySecret':: d.obj(help='"The credentials of the user that need to be used for the authentication\\nquery. In case it is specified, also an AuthQuery\\n(e.g. \\"SELECT usename, passwd FROM pg_shadow WHERE usename=$1\\")\\nhas to be specified and no automatic CNPG Cluster integration will be triggered."'),
'#authQuerySecret':: d.obj(help='"The credentials of the user that need to be used for the authentication\\nquery. In case it is specified, also an AuthQuery\\n(e.g. \\"SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1\\")\\nhas to be specified and no automatic CNPG Cluster integration will be triggered."'),
authQuerySecret: {
'#withName':: d.fn(help='"Name of the referent."', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { spec+: { pgbouncer+: { authQuerySecret+: { name: name } } } },
},
'#withAuthQuery':: d.fn(help='"The query that will be used to download the hash of the password\\nof a certain user. Default: \\"SELECT usename, passwd FROM user_search($1)\\".\\nIn case it is specified, also an AuthQuerySecret has to be specified and\\nno automatic CNPG Cluster integration will be triggered."', args=[d.arg(name='authQuery', type=d.T.string)]),
'#withAuthQuery':: d.fn(help='"The query that will be used to download the hash of the password\\nof a certain user. Default: \\"SELECT usename, passwd FROM public.user_search($1)\\".\\nIn case it is specified, also an AuthQuerySecret has to be specified and\\nno automatic CNPG Cluster integration will be triggered."', args=[d.arg(name='authQuery', type=d.T.string)]),
withAuthQuery(authQuery): { spec+: { pgbouncer+: { authQuery: authQuery } } },
'#withParameters':: d.fn(help='"Additional parameters to be passed to PgBouncer - please check\\nthe CNPG documentation for a list of options you can configure"', args=[d.arg(name='parameters', type=d.T.object)]),
withParameters(parameters): { spec+: { pgbouncer+: { parameters: parameters } } },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,15 @@
'#withWaitForArchive':: d.fn(help='"If false, the function will return immediately after the backup is completed,\\nwithout waiting for WAL to be archived.\\nThis behavior is only useful with backup software that independently monitors WAL archiving.\\nOtherwise, WAL required to make the backup consistent might be missing and make the backup useless.\\nBy default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is\\nenabled.\\nOn a standby, this means that it will wait only when archive_mode = always.\\nIf write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger\\nan immediate segment switch."', args=[d.arg(name='waitForArchive', type=d.T.boolean)]),
withWaitForArchive(waitForArchive): { spec+: { onlineConfiguration+: { waitForArchive: waitForArchive } } },
},
'#pluginConfiguration':: d.obj(help='"Configuration parameters passed to the plugin managing this backup"'),
pluginConfiguration: {
'#withName':: d.fn(help='"Name is the name of the plugin managing this backup"', args=[d.arg(name='name', type=d.T.string)]),
withName(name): { spec+: { pluginConfiguration+: { name: name } } },
'#withParameters':: d.fn(help='"Parameters are the configuration parameters passed to the backup\\nplugin for this backup"', args=[d.arg(name='parameters', type=d.T.object)]),
withParameters(parameters): { spec+: { pluginConfiguration+: { parameters: parameters } } },
'#withParametersMixin':: d.fn(help='"Parameters are the configuration parameters passed to the backup\\nplugin for this backup"\n\n**Note:** This function appends passed data to existing values', args=[d.arg(name='parameters', type=d.T.object)]),
withParametersMixin(parameters): { spec+: { pluginConfiguration+: { parameters+: parameters } } },
},
'#withBackupOwnerReference':: d.fn(help='"Indicates which ownerReference should be put inside the created backup resources.<br />\\n- none: no owner reference for created backup objects (same behavior as before the field was introduced)<br />\\n- self: sets the Scheduled backup object as owner of the backup<br />\\n- cluster: set the cluster as owner of the backup<br />"', args=[d.arg(name='backupOwnerReference', type=d.T.string)]),
withBackupOwnerReference(backupOwnerReference): { spec+: { backupOwnerReference: backupOwnerReference } },
'#withImmediate':: d.fn(help='"If the first backup has to be immediately start after creation or not"', args=[d.arg(name='immediate', type=d.T.boolean)]),
Expand Down
2 changes: 1 addition & 1 deletion 1.20.6/gen.libsonnet → 1.21.4/gen.libsonnet
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
local d = (import 'doc-util/main.libsonnet'),
'#':: d.pkg(name='cloudnative-pg', url='github.com/jsonnet-libs/cloudnative-pg-libsonnet/1.20.6/main.libsonnet', help=''),
'#':: d.pkg(name='cloudnative-pg', url='github.com/jsonnet-libs/cloudnative-pg-libsonnet/1.21.4/main.libsonnet', help=''),
postgresql:: (import '_gen/postgresql/main.libsonnet'),
}
File renamed without changes.
5 changes: 0 additions & 5 deletions 1.22.1/_gen/postgresql/main.libsonnet

This file was deleted.

Loading

0 comments on commit b92ee21

Please sign in to comment.