diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index cde9c4982..e4c7192b7 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -74,7 +74,7 @@ jobs: strategy: matrix: node-version: [14.x] - lib-name: [showcase, kms, translate, monitoring, dlp, texttospeech, showcase-legacy, compute, logging] + lib-name: [showcase, kms, translate, monitoring, dlp, texttospeech, showcase-legacy, compute, logging, bigquery-v2] steps: - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 @@ -110,7 +110,7 @@ jobs: strategy: matrix: node-version: [14.x] - lib-name: [showcase, kms, translate, monitoring, dlp, texttospeech, showcase-legacy, compute, logging] + lib-name: [showcase, kms, translate, monitoring, dlp, texttospeech, showcase-legacy, compute, logging, bigquery-v2] steps: - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 diff --git a/baselines/bigquery-v2-esm/.babelrc.json.baseline b/baselines/bigquery-v2-esm/.babelrc.json.baseline new file mode 100644 index 000000000..940d91b0a --- /dev/null +++ b/baselines/bigquery-v2-esm/.babelrc.json.baseline @@ -0,0 +1,19 @@ +{ + "presets": [ + "@babel/preset-typescript", + "@babel/env" + ], + "plugins": [ + [ + "replace-import-extension", + { + "extMapping": { + ".js": ".cjs" + } + } + ], + "./node_modules/gapic-tools/build/src/replaceESMMockingLib.js", + "./node_modules/gapic-tools/build/src/replaceImportMetaUrl.js", + "./node_modules/gapic-tools/build/src/toggleESMFlagVariable.js" + ] +} diff --git a/baselines/bigquery-v2-esm/.eslintignore.baseline b/baselines/bigquery-v2-esm/.eslintignore.baseline new file mode 100644 index 000000000..a73044f80 --- /dev/null +++ b/baselines/bigquery-v2-esm/.eslintignore.baseline @@ -0,0 +1,11 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/ +esm/src/**/*.d.ts +esm/test/**/*.d.ts +esm/system-test/**/*.d.ts +esm/system-test/fixtures/sample/src/*.ts \ No newline at end of file diff --git a/baselines/bigquery-v2-esm/.eslintrc.json.baseline b/baselines/bigquery-v2-esm/.eslintrc.json.baseline new file mode 100644 index 000000000..782153495 --- /dev/null +++ b/baselines/bigquery-v2-esm/.eslintrc.json.baseline @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/baselines/bigquery-v2-esm/.gitignore.baseline b/baselines/bigquery-v2-esm/.gitignore.baseline new file mode 100644 index 000000000..b391f5f9c --- /dev/null +++ b/baselines/bigquery-v2-esm/.gitignore.baseline @@ -0,0 +1,15 @@ +**/*.log +**/node_modules +/.coverage +/coverage +/.nyc_output +/docs/ +/out/ +/build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ +esm/**/*.d.ts \ No newline at end of file diff --git a/baselines/bigquery-v2-esm/.jsdoc.cjs.baseline b/baselines/bigquery-v2-esm/.jsdoc.cjs.baseline new file mode 100644 index 000000000..9be507a8a --- /dev/null +++ b/baselines/bigquery-v2-esm/.jsdoc.cjs.baseline @@ -0,0 +1,54 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2024 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: 'bigquery', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/baselines/bigquery-v2-esm/.mocharc.cjs.baseline b/baselines/bigquery-v2-esm/.mocharc.cjs.baseline new file mode 100644 index 000000000..13b67c34e --- /dev/null +++ b/baselines/bigquery-v2-esm/.mocharc.cjs.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/baselines/bigquery-v2-esm/.prettierrc.cjs.baseline b/baselines/bigquery-v2-esm/.prettierrc.cjs.baseline new file mode 100644 index 000000000..9a8fd6909 --- /dev/null +++ b/baselines/bigquery-v2-esm/.prettierrc.cjs.baseline @@ -0,0 +1,22 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/baselines/bigquery-v2-esm/README.md.baseline b/baselines/bigquery-v2-esm/README.md.baseline new file mode 100644 index 000000000..39a954110 --- /dev/null +++ b/baselines/bigquery-v2-esm/README.md.baseline @@ -0,0 +1 @@ +Bigquery: Nodejs Client diff --git a/baselines/bigquery-v2-esm/esm/src/index.ts.baseline b/baselines/bigquery-v2-esm/esm/src/index.ts.baseline new file mode 100644 index 000000000..88a91bffd --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/index.ts.baseline @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v2 from './v2/index.js'; +const DatasetServiceClient = v2.DatasetServiceClient; +type DatasetServiceClient = v2.DatasetServiceClient; +const JobServiceClient = v2.JobServiceClient; +type JobServiceClient = v2.JobServiceClient; +const ModelServiceClient = v2.ModelServiceClient; +type ModelServiceClient = v2.ModelServiceClient; +const ProjectServiceClient = v2.ProjectServiceClient; +type ProjectServiceClient = v2.ProjectServiceClient; +const RoutineServiceClient = v2.RoutineServiceClient; +type RoutineServiceClient = v2.RoutineServiceClient; +const RowAccessPolicyServiceClient = v2.RowAccessPolicyServiceClient; +type RowAccessPolicyServiceClient = v2.RowAccessPolicyServiceClient; +const TableServiceClient = v2.TableServiceClient; +type TableServiceClient = v2.TableServiceClient; +export {v2, DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient}; +export default {v2, DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient}; +// @ts-ignore +import * as protos from '../../protos/protos.js'; +export {protos} diff --git a/baselines/bigquery-v2-esm/esm/src/json-helper.cjs.baseline b/baselines/bigquery-v2-esm/esm/src/json-helper.cjs.baseline new file mode 100644 index 000000000..3c1fc7302 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/json-helper.cjs.baseline @@ -0,0 +1,20 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* eslint-disable node/no-missing-require */ +function getJSON(path) { + return require(path); +} + +exports.getJSON = getJSON; diff --git a/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client.ts.baseline new file mode 100644 index 000000000..f396f7be4 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client.ts.baseline @@ -0,0 +1,1043 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as dataset_service_client_config from './dataset_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/dataset_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'dataset_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Dataset Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class DatasetServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + datasetServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of DatasetServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new DatasetServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof DatasetServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listDatasets: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'datasets') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.DatasetService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.datasetServiceStub) { + return this.datasetServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.DatasetService. + this.datasetServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.DatasetService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.DatasetService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const datasetServiceStubMethods = + ['getDataset', 'insertDataset', 'patchDataset', 'updateDataset', 'deleteDataset', 'listDatasets', 'undeleteDataset']; + for (const methodName of datasetServiceStubMethods) { + const callPromise = this.datasetServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.datasetServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Returns the dataset specified by datasetID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested dataset + * @param {string} request.datasetId + * Required. Dataset ID of the requested dataset + * @param {google.cloud.bigquery.v2.GetDatasetRequest.DatasetView} [request.datasetView] + * Optional. Specifies the view that determines which dataset information is + * returned. By default, metadata and ACL information are returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.get_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_GetDataset_async + */ + getDataset( + request?: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|undefined, {}|undefined + ]>; + getDataset( + request: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): void; + getDataset( + request: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): void; + getDataset( + request?: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getDataset(request, options, callback); + } +/** + * Creates a new empty dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new dataset + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource to use for the new dataset + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.insert_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_InsertDataset_async + */ + insertDataset( + request?: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|undefined, {}|undefined + ]>; + insertDataset( + request: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): void; + insertDataset( + request: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): void; + insertDataset( + request?: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertDataset(request, options, callback); + } +/** + * Updates information in an existing dataset. The update method replaces the + * entire dataset resource, whereas the patch method only replaces fields that + * are provided in the submitted dataset resource. + * This method supports RFC5789 patch semantics. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being updated + * @param {string} request.datasetId + * Required. Dataset ID of the dataset being updated + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource which will replace or patch the specified + * dataset. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.patch_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_PatchDataset_async + */ + patchDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>; + patchDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + patchDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + patchDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchDataset(request, options, callback); + } +/** + * Updates information in an existing dataset. The update method replaces the + * entire dataset resource, whereas the patch method only replaces fields that + * are provided in the submitted dataset resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being updated + * @param {string} request.datasetId + * Required. Dataset ID of the dataset being updated + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource which will replace or patch the specified + * dataset. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.update_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_UpdateDataset_async + */ + updateDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>; + updateDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + updateDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + updateDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateDataset(request, options, callback); + } +/** + * Deletes the dataset specified by the datasetId value. Before you can delete + * a dataset, you must delete all its tables, either manually or by specifying + * deleteContents. Immediately after deletion, you can create another dataset + * with the same name. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being deleted + * @param {string} request.datasetId + * Required. Dataset ID of dataset being deleted + * @param {boolean} request.deleteContents + * If True, delete all the tables in the dataset. + * If False and the dataset contains tables, the request will fail. + * Default is False + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.delete_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_DeleteDataset_async + */ + deleteDataset( + request?: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|undefined, {}|undefined + ]>; + deleteDataset( + request: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + deleteDataset( + request: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + deleteDataset( + request?: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteDataset(request, options, callback); + } +/** + * Undeletes a dataset which is within time travel window based on datasetId. + * If a time is specified, the dataset version deleted at that time is + * undeleted, else the last live version is undeleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset to be undeleted + * @param {string} request.datasetId + * Required. Dataset ID of dataset being deleted + * @param {google.protobuf.Timestamp} [request.deletionTime] + * Optional. The exact time when the dataset was deleted. If not specified, + * the most recently deleted version is undeleted. Undeleting a dataset + * using deletion time is not supported. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.undelete_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_UndeleteDataset_async + */ + undeleteDataset( + request?: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|undefined, {}|undefined + ]>; + undeleteDataset( + request: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + undeleteDataset( + request: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + undeleteDataset( + request?: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.undeleteDataset(request, options, callback); + } + + /** + * Lists all datasets in the specified project to which the user has been + * granted the READER dataset role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listDatasetsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listDatasets( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatDataset[], + protos.google.cloud.bigquery.v2.IListDatasetsRequest|null, + protos.google.cloud.bigquery.v2.IDatasetList + ]>; + listDatasets( + request: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): void; + listDatasets( + request: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): void; + listDatasets( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatDataset[], + protos.google.cloud.bigquery.v2.IListDatasetsRequest|null, + protos.google.cloud.bigquery.v2.IDatasetList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listDatasets(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listDatasetsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listDatasetsStream( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listDatasets']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listDatasets.createStream( + this.innerApiCalls.listDatasets as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listDatasets`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.list_datasets.js + * region_tag:bigquery_v2_generated_DatasetService_ListDatasets_async + */ + listDatasetsAsync( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listDatasets']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listDatasets.asyncIterate( + this.innerApiCalls['listDatasets'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.datasetServiceStub && !this._terminated) { + return this.datasetServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client_config.json.baseline new file mode 100644 index 000000000..74c64e606 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_client_config.json.baseline @@ -0,0 +1,54 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.DatasetService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListDatasets": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UndeleteDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/dataset_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/index.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/index.ts.baseline new file mode 100644 index 000000000..13df246b1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/index.ts.baseline @@ -0,0 +1,25 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {DatasetServiceClient} from './dataset_service_client.js'; +export {JobServiceClient} from './job_service_client.js'; +export {ModelServiceClient} from './model_service_client.js'; +export {ProjectServiceClient} from './project_service_client.js'; +export {RoutineServiceClient} from './routine_service_client.js'; +export {RowAccessPolicyServiceClient} from './row_access_policy_service_client.js'; +export {TableServiceClient} from './table_service_client.js'; diff --git a/baselines/bigquery-v2-esm/esm/src/v2/job_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/job_service_client.ts.baseline new file mode 100644 index 000000000..df484f3ee --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/job_service_client.ts.baseline @@ -0,0 +1,1116 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as job_service_client_config from './job_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/job_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'job_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Job Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class JobServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + jobServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of JobServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new JobServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof JobServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listJobs: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'jobs') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.JobService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.jobServiceStub) { + return this.jobServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.JobService. + this.jobServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.JobService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.JobService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const jobServiceStubMethods = + ['cancelJob', 'getJob', 'insertJob', 'deleteJob', 'listJobs', 'getQueryResults', 'query']; + for (const methodName of jobServiceStubMethods) { + const callPromise = this.jobServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.jobServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + 'https://www.googleapis.com/auth/devstorage.full_control', + 'https://www.googleapis.com/auth/devstorage.read_only', + 'https://www.googleapis.com/auth/devstorage.read_write' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Requests that a job be cancelled. This call will return immediately, and + * the client will need to poll for the job status to see if the cancel + * completed successfully. Cancelled jobs may still incur costs. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the job to cancel + * @param {string} request.jobId + * Required. Job ID of the job to cancel + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.JobCancelResponse|JobCancelResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.cancel_job.js + * region_tag:bigquery_v2_generated_JobService_CancelJob_async + */ + cancelJob( + request?: protos.google.cloud.bigquery.v2.ICancelJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|undefined, {}|undefined + ]>; + cancelJob( + request: protos.google.cloud.bigquery.v2.ICancelJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request: protos.google.cloud.bigquery.v2.ICancelJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request?: protos.google.cloud.bigquery.v2.ICancelJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.cancelJob(request, options, callback); + } +/** + * Returns information about a specific job. Job information is available for + * a six month period after creation. Requires that you're the person who ran + * the job, or have the Is Owner project role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested job. + * @param {string} request.jobId + * Required. Job ID of the requested job. + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Job|Job}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.get_job.js + * region_tag:bigquery_v2_generated_JobService_GetJob_async + */ + getJob( + request?: protos.google.cloud.bigquery.v2.IGetJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|undefined, {}|undefined + ]>; + getJob( + request: protos.google.cloud.bigquery.v2.IGetJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request: protos.google.cloud.bigquery.v2.IGetJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request?: protos.google.cloud.bigquery.v2.IGetJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getJob(request, options, callback); + } +/** + * Starts a new asynchronous job. + * + * This API has two different kinds of endpoint URIs, as this method supports + * a variety of use cases. + * + * * The *Metadata* URI is used for most interactions, as it accepts the job + * configuration directly. + * * The *Upload* URI is ONLY for the case when you're sending both a load job + * configuration and a data stream together. In this case, the Upload URI + * accepts the job configuration and the data as two distinct multipart MIME + * parts. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of project that will be billed for the job. + * @param {google.cloud.bigquery.v2.Job} request.job + * Jobs resource to insert. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Job|Job}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.insert_job.js + * region_tag:bigquery_v2_generated_JobService_InsertJob_async + */ + insertJob( + request?: protos.google.cloud.bigquery.v2.IInsertJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|undefined, {}|undefined + ]>; + insertJob( + request: protos.google.cloud.bigquery.v2.IInsertJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): void; + insertJob( + request: protos.google.cloud.bigquery.v2.IInsertJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): void; + insertJob( + request?: protos.google.cloud.bigquery.v2.IInsertJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertJob(request, options, callback); + } +/** + * Requests the deletion of the metadata of a job. This call returns when the + * job's metadata is deleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the job for which metadata is to be deleted. + * @param {string} request.jobId + * Required. Job ID of the job for which metadata is to be deleted. If this is + * a parent job which has child jobs, the metadata from all child jobs will be + * deleted as well. Direct deletion of the metadata of child jobs is not + * allowed. + * @param {string} request.location + * The geographic location of the job. Required. + * See details at: + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.delete_job.js + * region_tag:bigquery_v2_generated_JobService_DeleteJob_async + */ + deleteJob( + request?: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|undefined, {}|undefined + ]>; + deleteJob( + request: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request?: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteJob(request, options, callback); + } +/** + * RPC to get the results of a query job. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the query job. + * @param {string} request.jobId + * Required. Job ID of the query job. + * @param {google.protobuf.UInt64Value} request.startIndex + * Zero-based index of the starting row. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.protobuf.UInt32Value} request.maxResults + * Maximum number of results to read. + * @param {google.protobuf.UInt32Value} request.timeoutMs + * Optional: Specifies the maximum amount of time, in milliseconds, that the + * client is willing to wait for the query to complete. By default, this limit + * is 10 seconds (10,000 milliseconds). If the query is complete, the + * jobComplete field in the response is true. If the query has not yet + * completed, jobComplete is false. + * + * You can request a longer timeout period in the timeoutMs field. However, + * the call is not guaranteed to wait for the specified timeout; it typically + * returns after around 200 seconds (200,000 milliseconds), even if the query + * is not complete. + * + * If jobComplete is false, you can continue to wait for the query to complete + * by calling the getQueryResults method until the jobComplete field in the + * getQueryResults response is true. + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {google.cloud.bigquery.v2.DataFormatOptions} [request.formatOptions] + * Optional. Output format adjustments. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.GetQueryResultsResponse|GetQueryResultsResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.get_query_results.js + * region_tag:bigquery_v2_generated_JobService_GetQueryResults_async + */ + getQueryResults( + request?: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|undefined, {}|undefined + ]>; + getQueryResults( + request: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): void; + getQueryResults( + request: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): void; + getQueryResults( + request?: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getQueryResults(request, options, callback); + } +/** + * Runs a BigQuery SQL query synchronously and returns query results if the + * query completes within a specified timeout. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the query request. + * @param {google.cloud.bigquery.v2.QueryRequest} request.queryRequest + * The query request body. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.QueryResponse|QueryResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.query.js + * region_tag:bigquery_v2_generated_JobService_Query_async + */ + query( + request?: protos.google.cloud.bigquery.v2.IPostQueryRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|undefined, {}|undefined + ]>; + query( + request: protos.google.cloud.bigquery.v2.IPostQueryRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): void; + query( + request: protos.google.cloud.bigquery.v2.IPostQueryRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): void; + query( + request?: protos.google.cloud.bigquery.v2.IPostQueryRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.query(request, options, callback); + } + + /** + * Lists all jobs that you started in the specified project. Job information + * is available for a six month period after creation. The job list is sorted + * in reverse chronological order, by job creation time. Requires the Can View + * project role, or the Is Owner project role if you set the allUsers + * property. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listJobs( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatJob[], + protos.google.cloud.bigquery.v2.IListJobsRequest|null, + protos.google.cloud.bigquery.v2.IJobList + ]>; + listJobs( + request: protos.google.cloud.bigquery.v2.IListJobsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): void; + listJobs( + request: protos.google.cloud.bigquery.v2.IListJobsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): void; + listJobs( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatJob[], + protos.google.cloud.bigquery.v2.IListJobsRequest|null, + protos.google.cloud.bigquery.v2.IJobList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listJobs(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listJobsStream( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.createStream( + this.innerApiCalls.listJobs as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listJobs`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.list_jobs.js + * region_tag:bigquery_v2_generated_JobService_ListJobs_async + */ + listJobsAsync( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.asyncIterate( + this.innerApiCalls['listJobs'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.jobServiceStub && !this._terminated) { + return this.jobServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/job_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/job_service_client_config.json.baseline new file mode 100644 index 000000000..7b1d7cc93 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/job_service_client_config.json.baseline @@ -0,0 +1,54 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.JobService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CancelJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListJobs": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetQueryResults": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "Query": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/job_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/job_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/job_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/model_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/model_service_client.ts.baseline new file mode 100644 index 000000000..976493849 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/model_service_client.ts.baseline @@ -0,0 +1,791 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as model_service_client_config from './model_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/model_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'model_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Model Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class ModelServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + modelServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of ModelServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new ModelServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof ModelServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listModels: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'models') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.ModelService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.modelServiceStub) { + return this.modelServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.ModelService. + this.modelServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.ModelService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.ModelService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const modelServiceStubMethods = + ['getModel', 'listModels', 'patchModel', 'deleteModel']; + for (const methodName of modelServiceStubMethods) { + const callPromise = this.modelServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.modelServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified model resource by model ID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested model. + * @param {string} request.datasetId + * Required. Dataset ID of the requested model. + * @param {string} request.modelId + * Required. Model ID of the requested model. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Model|Model}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.get_model.js + * region_tag:bigquery_v2_generated_ModelService_GetModel_async + */ + getModel( + request?: protos.google.cloud.bigquery.v2.IGetModelRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|undefined, {}|undefined + ]>; + getModel( + request: protos.google.cloud.bigquery.v2.IGetModelRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): void; + getModel( + request: protos.google.cloud.bigquery.v2.IGetModelRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): void; + getModel( + request?: protos.google.cloud.bigquery.v2.IGetModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getModel(request, options, callback); + } +/** + * Patch specific fields in the specified model. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the model to patch. + * @param {string} request.datasetId + * Required. Dataset ID of the model to patch. + * @param {string} request.modelId + * Required. Model ID of the model to patch. + * @param {google.cloud.bigquery.v2.Model} request.model + * Required. Patched model. + * Follows RFC5789 patch semantics. Missing fields are not updated. + * To clear a field, explicitly set to default value. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Model|Model}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.patch_model.js + * region_tag:bigquery_v2_generated_ModelService_PatchModel_async + */ + patchModel( + request?: protos.google.cloud.bigquery.v2.IPatchModelRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|undefined, {}|undefined + ]>; + patchModel( + request: protos.google.cloud.bigquery.v2.IPatchModelRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): void; + patchModel( + request: protos.google.cloud.bigquery.v2.IPatchModelRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): void; + patchModel( + request?: protos.google.cloud.bigquery.v2.IPatchModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchModel(request, options, callback); + } +/** + * Deletes the model specified by modelId from the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the model to delete. + * @param {string} request.datasetId + * Required. Dataset ID of the model to delete. + * @param {string} request.modelId + * Required. Model ID of the model to delete. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.delete_model.js + * region_tag:bigquery_v2_generated_ModelService_DeleteModel_async + */ + deleteModel( + request?: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|undefined, {}|undefined + ]>; + deleteModel( + request: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): void; + deleteModel( + request: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): void; + deleteModel( + request?: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteModel(request, options, callback); + } + + /** + * Lists all models in the specified dataset. Requires the READER dataset + * role. After retrieving the list of models, you can get information about a + * particular model by calling the models.get method. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.Model|Model}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listModelsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listModels( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel[], + protos.google.cloud.bigquery.v2.IListModelsRequest|null, + protos.google.cloud.bigquery.v2.IListModelsResponse + ]>; + listModels( + request: protos.google.cloud.bigquery.v2.IListModelsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): void; + listModels( + request: protos.google.cloud.bigquery.v2.IListModelsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): void; + listModels( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel[], + protos.google.cloud.bigquery.v2.IListModelsRequest|null, + protos.google.cloud.bigquery.v2.IListModelsResponse + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listModels(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.Model|Model} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listModelsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listModelsStream( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listModels']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listModels.createStream( + this.innerApiCalls.listModels as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listModels`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.Model|Model}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.list_models.js + * region_tag:bigquery_v2_generated_ModelService_ListModels_async + */ + listModelsAsync( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listModels']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listModels.asyncIterate( + this.innerApiCalls['listModels'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.modelServiceStub && !this._terminated) { + return this.modelServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/model_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/model_service_client_config.json.baseline new file mode 100644 index 000000000..76a66c9cb --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/model_service_client_config.json.baseline @@ -0,0 +1,42 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.ModelService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListModels": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/model_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/model_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/model_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/project_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/project_service_client.ts.baseline new file mode 100644 index 000000000..d792cf6d9 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/project_service_client.ts.baseline @@ -0,0 +1,429 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; + +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as project_service_client_config from './project_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/project_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'project_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Project Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class ProjectServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + projectServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of ProjectServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new ProjectServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof ProjectServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.ProjectService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.projectServiceStub) { + return this.projectServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.ProjectService. + this.projectServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.ProjectService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.ProjectService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const projectServiceStubMethods = + ['getServiceAccount']; + for (const methodName of projectServiceStubMethods) { + const callPromise = this.projectServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.projectServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * RPC to get the service account for a project used for interactions with + * Google Cloud KMS + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. ID of the project. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.GetServiceAccountResponse|GetServiceAccountResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/project_service.get_service_account.js + * region_tag:bigquery_v2_generated_ProjectService_GetServiceAccount_async + */ + getServiceAccount( + request?: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|undefined, {}|undefined + ]>; + getServiceAccount( + request: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): void; + getServiceAccount( + request: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): void; + getServiceAccount( + request?: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getServiceAccount(request, options, callback); + } + + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.projectServiceStub && !this._terminated) { + return this.projectServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/project_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/project_service_client_config.json.baseline new file mode 100644 index 000000000..043d875e7 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/project_service_client_config.json.baseline @@ -0,0 +1,30 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.ProjectService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetServiceAccount": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/project_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/project_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/project_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client.ts.baseline new file mode 100644 index 000000000..17528e356 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client.ts.baseline @@ -0,0 +1,953 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as routine_service_client_config from './routine_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/routine_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'routine_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Routine Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class RoutineServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + routineServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of RoutineServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new RoutineServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof RoutineServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listRoutines: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'routines') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.RoutineService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.routineServiceStub) { + return this.routineServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.RoutineService. + this.routineServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.RoutineService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.RoutineService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const routineServiceStubMethods = + ['getRoutine', 'insertRoutine', 'updateRoutine', 'patchRoutine', 'deleteRoutine', 'listRoutines']; + for (const methodName of routineServiceStubMethods) { + const callPromise = this.routineServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.routineServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified routine resource by routine ID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested routine + * @param {string} request.datasetId + * Required. Dataset ID of the requested routine + * @param {string} request.routineId + * Required. Routine ID of the requested routine + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.get_routine.js + * region_tag:bigquery_v2_generated_RoutineService_GetRoutine_async + */ + getRoutine( + request?: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|undefined, {}|undefined + ]>; + getRoutine( + request: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): void; + getRoutine( + request: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): void; + getRoutine( + request?: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getRoutine(request, options, callback); + } +/** + * Creates a new routine in the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new routine + * @param {string} request.datasetId + * Required. Dataset ID of the new routine + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource to insert + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.insert_routine.js + * region_tag:bigquery_v2_generated_RoutineService_InsertRoutine_async + */ + insertRoutine( + request?: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|undefined, {}|undefined + ]>; + insertRoutine( + request: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): void; + insertRoutine( + request: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): void; + insertRoutine( + request?: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertRoutine(request, options, callback); + } +/** + * Updates information in an existing routine. The update method replaces the + * entire Routine resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to update + * @param {string} request.datasetId + * Required. Dataset ID of the routine to update + * @param {string} request.routineId + * Required. Routine ID of the routine to update + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource which will replace the specified routine + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.update_routine.js + * region_tag:bigquery_v2_generated_RoutineService_UpdateRoutine_async + */ + updateRoutine( + request?: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|undefined, {}|undefined + ]>; + updateRoutine( + request: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): void; + updateRoutine( + request: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): void; + updateRoutine( + request?: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateRoutine(request, options, callback); + } +/** + * Patches information in an existing routine. The patch method does a partial + * update to an existing Routine resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to update + * @param {string} request.datasetId + * Required. Dataset ID of the routine to update + * @param {string} request.routineId + * Required. Routine ID of the routine to update + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource which will be used to partially + * update the specified routine + * @param {google.protobuf.FieldMask} request.fieldMask + * Only the Routine fields in the field mask are updated + * by the given routine. Repeated routine fields will be fully replaced + * if contained in the field mask. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.patch_routine.js + * region_tag:bigquery_v2_generated_RoutineService_PatchRoutine_async + */ + patchRoutine( + request?: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|undefined, {}|undefined + ]>; + patchRoutine( + request: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): void; + patchRoutine( + request: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): void; + patchRoutine( + request?: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.patchRoutine(request, options, callback); + } +/** + * Deletes the routine specified by routineId from the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to delete + * @param {string} request.datasetId + * Required. Dataset ID of the routine to delete + * @param {string} request.routineId + * Required. Routine ID of the routine to delete + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.delete_routine.js + * region_tag:bigquery_v2_generated_RoutineService_DeleteRoutine_async + */ + deleteRoutine( + request?: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|undefined, {}|undefined + ]>; + deleteRoutine( + request: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): void; + deleteRoutine( + request: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): void; + deleteRoutine( + request?: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteRoutine(request, options, callback); + } + + /** + * Lists all routines in the specified dataset. Requires the READER dataset + * role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listRoutinesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRoutines( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine[], + protos.google.cloud.bigquery.v2.IListRoutinesRequest|null, + protos.google.cloud.bigquery.v2.IListRoutinesResponse + ]>; + listRoutines( + request: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): void; + listRoutines( + request: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): void; + listRoutines( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine[], + protos.google.cloud.bigquery.v2.IListRoutinesRequest|null, + protos.google.cloud.bigquery.v2.IListRoutinesResponse + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listRoutines(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listRoutinesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRoutinesStream( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listRoutines']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRoutines.createStream( + this.innerApiCalls.listRoutines as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listRoutines`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.Routine|Routine}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.list_routines.js + * region_tag:bigquery_v2_generated_RoutineService_ListRoutines_async + */ + listRoutinesAsync( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listRoutines']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRoutines.asyncIterate( + this.innerApiCalls['listRoutines'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.routineServiceStub && !this._terminated) { + return this.routineServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client_config.json.baseline new file mode 100644 index 000000000..7d0cea2ba --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_client_config.json.baseline @@ -0,0 +1,50 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.RoutineService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListRoutines": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/routine_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/routine_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client.ts.baseline new file mode 100644 index 000000000..2f358bd3d --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client.ts.baseline @@ -0,0 +1,558 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as row_access_policy_service_client_config from './row_access_policy_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/row_access_policy_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'row_access_policy_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * Service for interacting with row access policies. + * @class + * @memberof v2 + */ +export class RowAccessPolicyServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + rowAccessPolicyServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of RowAccessPolicyServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new RowAccessPolicyServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof RowAccessPolicyServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listRowAccessPolicies: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'rowAccessPolicies') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.RowAccessPolicyService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.rowAccessPolicyServiceStub) { + return this.rowAccessPolicyServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.RowAccessPolicyService. + this.rowAccessPolicyServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.RowAccessPolicyService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.RowAccessPolicyService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const rowAccessPolicyServiceStubMethods = + ['listRowAccessPolicies']; + for (const methodName of rowAccessPolicyServiceStubMethods) { + const callPromise = this.rowAccessPolicyServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.rowAccessPolicyServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + + /** + * Lists all row access policies on the specified table. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listRowAccessPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRowAccessPolicies( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRowAccessPolicy[], + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest|null, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse + ]>; + listRowAccessPolicies( + request: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): void; + listRowAccessPolicies( + request: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): void; + listRowAccessPolicies( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): + Promise<[ + protos.google.cloud.bigquery.v2.IRowAccessPolicy[], + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest|null, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listRowAccessPolicies(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listRowAccessPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRowAccessPoliciesStream( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + const defaultCallSettings = this._defaults['listRowAccessPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRowAccessPolicies.createStream( + this.innerApiCalls.listRowAccessPolicies as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listRowAccessPolicies`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/row_access_policy_service.list_row_access_policies.js + * region_tag:bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async + */ + listRowAccessPoliciesAsync( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + const defaultCallSettings = this._defaults['listRowAccessPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRowAccessPolicies.asyncIterate( + this.innerApiCalls['listRowAccessPolicies'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.rowAccessPolicyServiceStub && !this._terminated) { + return this.rowAccessPolicyServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client_config.json.baseline new file mode 100644 index 000000000..6f88ac20f --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_client_config.json.baseline @@ -0,0 +1,30 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.RowAccessPolicyService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "ListRowAccessPolicies": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/row_access_policy_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/src/v2/table_service_client.ts.baseline b/baselines/bigquery-v2-esm/esm/src/v2/table_service_client.ts.baseline new file mode 100644 index 000000000..0a13f9ce7 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/table_service_client.ts.baseline @@ -0,0 +1,964 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +// @ts-ignore +import type * as protos from '../../../protos/protos.js'; +import * as table_service_client_config from './table_service_client_config.json'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; +import {getJSON} from '../json-helper.cjs'; +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); + +/** + * Client JSON configuration object, loaded from + * `src/v2/table_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +const gapicConfig = getJSON( + path.join(dirname, 'table_service_client_config.json') +); + +const jsonProtos = getJSON( + path.join(dirname, '..', '..', '..', 'protos/protos.json') +); + +const version = getJSON( + path.join(dirname, '..', '..', '..', '..', 'package.json') +).version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Table Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class TableServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + tableServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TableServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean | "rest"} [options.fallback] - Use HTTP fallback mode. + * Pass "rest" to use HTTP/1.1 REST API instead of gRPC. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TableServiceClient({fallback: 'rest'}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TableServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = gax as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Add ESM headers + const isEsm = true; + const isEsmString = isEsm ? '-esm' : '-cjs'; + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/{process.versions.node}${isEsmString}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else if (opts.fallback === 'rest' ) { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos as gax.protobuf.INamespace); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listTables: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'tables') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.TableService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.tableServiceStub) { + return this.tableServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.TableService. + this.tableServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.TableService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.TableService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const tableServiceStubMethods = + ['getTable', 'insertTable', 'patchTable', 'updateTable', 'deleteTable', 'listTables']; + for (const methodName of tableServiceStubMethods) { + const callPromise = this.tableServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.tableServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath, + * exists for compatibility reasons. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified table resource by table ID. + * This method does not return the data in the table, it only returns the + * table resource, which describes the structure of this table. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested table + * @param {string} request.datasetId + * Required. Dataset ID of the requested table + * @param {string} request.tableId + * Required. Table ID of the requested table + * @param {string} request.selectedFields + * List of table schema fields to return (comma-separated). + * If unspecified, all fields are returned. + * A fieldMask cannot be used here because the fields will automatically be + * converted from camelCase to snake_case and the conversion will fail if + * there are underscores. Since these are fields in BigQuery table schemas, + * underscores are allowed. + * @param {google.cloud.bigquery.v2.GetTableRequest.TableMetadataView} [request.view] + * Optional. Specifies the view that determines which table information is + * returned. By default, basic table information and storage statistics + * (STORAGE_STATS) are returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.get_table.js + * region_tag:bigquery_v2_generated_TableService_GetTable_async + */ + getTable( + request?: protos.google.cloud.bigquery.v2.IGetTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|undefined, {}|undefined + ]>; + getTable( + request: protos.google.cloud.bigquery.v2.IGetTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): void; + getTable( + request: protos.google.cloud.bigquery.v2.IGetTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): void; + getTable( + request?: protos.google.cloud.bigquery.v2.IGetTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getTable(request, options, callback); + } +/** + * Creates a new, empty table in the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new table + * @param {string} request.datasetId + * Required. Dataset ID of the new table + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource to insert + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.insert_table.js + * region_tag:bigquery_v2_generated_TableService_InsertTable_async + */ + insertTable( + request?: protos.google.cloud.bigquery.v2.IInsertTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|undefined, {}|undefined + ]>; + insertTable( + request: protos.google.cloud.bigquery.v2.IInsertTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): void; + insertTable( + request: protos.google.cloud.bigquery.v2.IInsertTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): void; + insertTable( + request?: protos.google.cloud.bigquery.v2.IInsertTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertTable(request, options, callback); + } +/** + * Updates information in an existing table. The update method replaces the + * entire table resource, whereas the patch method only replaces fields that + * are provided in the submitted table resource. + * This method supports RFC5789 patch semantics. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to update + * @param {string} request.datasetId + * Required. Dataset ID of the table to update + * @param {string} request.tableId + * Required. Table ID of the table to update + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource which will replace or patch the specified table + * @param {boolean} [request.autodetectSchema] + * Optional. When true will autodetect schema, else will keep original schema. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.patch_table.js + * region_tag:bigquery_v2_generated_TableService_PatchTable_async + */ + patchTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>; + patchTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + patchTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + patchTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchTable(request, options, callback); + } +/** + * Updates information in an existing table. The update method replaces the + * entire Table resource, whereas the patch method only replaces fields that + * are provided in the submitted Table resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to update + * @param {string} request.datasetId + * Required. Dataset ID of the table to update + * @param {string} request.tableId + * Required. Table ID of the table to update + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource which will replace or patch the specified table + * @param {boolean} [request.autodetectSchema] + * Optional. When true will autodetect schema, else will keep original schema. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.update_table.js + * region_tag:bigquery_v2_generated_TableService_UpdateTable_async + */ + updateTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>; + updateTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + updateTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + updateTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateTable(request, options, callback); + } +/** + * Deletes the table specified by tableId from the dataset. + * If the table contains data, all the data will be deleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to delete + * @param {string} request.datasetId + * Required. Dataset ID of the table to delete + * @param {string} request.tableId + * Required. Table ID of the table to delete + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.delete_table.js + * region_tag:bigquery_v2_generated_TableService_DeleteTable_async + */ + deleteTable( + request?: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|undefined, {}|undefined + ]>; + deleteTable( + request: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): void; + deleteTable( + request: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): void; + deleteTable( + request?: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteTable(request, options, callback); + } + + /** + * Lists all tables in the specified dataset. Requires the READER dataset + * role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listTablesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listTables( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatTable[], + protos.google.cloud.bigquery.v2.IListTablesRequest|null, + protos.google.cloud.bigquery.v2.ITableList + ]>; + listTables( + request: protos.google.cloud.bigquery.v2.IListTablesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): void; + listTables( + request: protos.google.cloud.bigquery.v2.IListTablesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): void; + listTables( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatTable[], + protos.google.cloud.bigquery.v2.IListTablesRequest|null, + protos.google.cloud.bigquery.v2.ITableList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listTables(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listTablesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listTablesStream( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listTables']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listTables.createStream( + this.innerApiCalls.listTables as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listTables`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.list_tables.js + * region_tag:bigquery_v2_generated_TableService_ListTables_async + */ + listTablesAsync( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listTables']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listTables.asyncIterate( + this.innerApiCalls['listTables'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.tableServiceStub && !this._terminated) { + return this.tableServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/table_service_client_config.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/table_service_client_config.json.baseline new file mode 100644 index 000000000..3141de741 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/table_service_client_config.json.baseline @@ -0,0 +1,50 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.TableService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListTables": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2-esm/esm/src/v2/table_service_proto_list.json.baseline b/baselines/bigquery-v2-esm/esm/src/v2/table_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/src/v2/table_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.cjs.baseline b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.cjs.baseline new file mode 100644 index 000000000..0a07e236d --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.cjs.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars, no-undef */ +const bigquery = require('bigquery'); + +function main() { + const datasetServiceClient = new bigquery.DatasetServiceClient(); + const jobServiceClient = new bigquery.JobServiceClient(); + const modelServiceClient = new bigquery.ModelServiceClient(); + const projectServiceClient = new bigquery.ProjectServiceClient(); + const routineServiceClient = new bigquery.RoutineServiceClient(); + const rowAccessPolicyServiceClient = new bigquery.RowAccessPolicyServiceClient(); + const tableServiceClient = new bigquery.TableServiceClient(); +} + +main(); diff --git a/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.js.baseline b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.js.baseline new file mode 100644 index 000000000..02f5b44e1 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.js.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars, no-undef */ +import * as bigquery from 'bigquery'; + +function main() { + const datasetServiceClient = new bigquery.DatasetServiceClient(); + const jobServiceClient = new bigquery.JobServiceClient(); + const modelServiceClient = new bigquery.ModelServiceClient(); + const projectServiceClient = new bigquery.ProjectServiceClient(); + const routineServiceClient = new bigquery.RoutineServiceClient(); + const rowAccessPolicyServiceClient = new bigquery.RowAccessPolicyServiceClient(); + const tableServiceClient = new bigquery.TableServiceClient(); +} + +main(); diff --git a/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.ts.baseline b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.ts.baseline new file mode 100644 index 000000000..86325aa90 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/system-test/fixtures/sample/src/index.ts.baseline @@ -0,0 +1,69 @@ +/* eslint-disable node/no-missing-require, no-unused-vars, no-undef */ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient} from 'bigquery'; + +// check that the client class type name can be used +function doStuffWithDatasetServiceClient(client: DatasetServiceClient) { + client.close(); +} +function doStuffWithJobServiceClient(client: JobServiceClient) { + client.close(); +} +function doStuffWithModelServiceClient(client: ModelServiceClient) { + client.close(); +} +function doStuffWithProjectServiceClient(client: ProjectServiceClient) { + client.close(); +} +function doStuffWithRoutineServiceClient(client: RoutineServiceClient) { + client.close(); +} +function doStuffWithRowAccessPolicyServiceClient(client: RowAccessPolicyServiceClient) { + client.close(); +} +function doStuffWithTableServiceClient(client: TableServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const datasetServiceClient = new DatasetServiceClient(); + doStuffWithDatasetServiceClient(datasetServiceClient); + // check that the client instance can be created + const jobServiceClient = new JobServiceClient(); + doStuffWithJobServiceClient(jobServiceClient); + // check that the client instance can be created + const modelServiceClient = new ModelServiceClient(); + doStuffWithModelServiceClient(modelServiceClient); + // check that the client instance can be created + const projectServiceClient = new ProjectServiceClient(); + doStuffWithProjectServiceClient(projectServiceClient); + // check that the client instance can be created + const routineServiceClient = new RoutineServiceClient(); + doStuffWithRoutineServiceClient(routineServiceClient); + // check that the client instance can be created + const rowAccessPolicyServiceClient = new RowAccessPolicyServiceClient(); + doStuffWithRowAccessPolicyServiceClient(rowAccessPolicyServiceClient); + // check that the client instance can be created + const tableServiceClient = new TableServiceClient(); + doStuffWithTableServiceClient(tableServiceClient); +} + +main(); diff --git a/baselines/bigquery-v2-esm/esm/system-test/install.ts.baseline b/baselines/bigquery-v2-esm/esm/system-test/install.ts.baseline new file mode 100644 index 000000000..e1ba14622 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/system-test/install.ts.baseline @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {packNTest} from 'pack-n-play'; +import {readFileSync} from 'fs'; +import {describe, it} from 'mocha'; + +describe('📦 pack-n-play test', () => { + it('TypeScript', async function() { + this.timeout(300000); + await packNTest({ + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./esm/system-test/fixtures/sample/src/index.ts').toString() + } + }); + }); + + it('ESM module', async function() { + this.timeout(300000); + await packNTest({ + sample: { + description: 'Should be able to import using ESM', + esm: readFileSync('./esm/system-test/fixtures/sample/src/index.js').toString(), + }, + }); + }); + + it('CJS module', async function() { + this.timeout(300000); + await packNTest({ + sample: { + description: 'Should be able to import using CJS', + cjs: readFileSync('./esm/system-test/fixtures/sample/src/index.cjs').toString(), + }, + }); + }); + +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_dataset_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_dataset_service_v2.ts.baseline new file mode 100644 index 000000000..1267b9552 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_dataset_service_v2.ts.baseline @@ -0,0 +1,1223 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as datasetserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.DatasetServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = datasetserviceModule.v2.DatasetServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = datasetserviceModule.v2.DatasetServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new datasetserviceModule.v2.DatasetServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new datasetserviceModule.v2.DatasetServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = datasetserviceModule.v2.DatasetServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.datasetServiceStub, undefined); + await client.initialize(); + assert(client.datasetServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.datasetServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.datasetServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getDataset', () => { + it('invokes getDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.getDataset = stubSimpleCall(expectedResponse); + const [response] = await client.getDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.getDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getDataset(request), expectedError); + }); + }); + + describe('insertDataset', () => { + it('invokes insertDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.insertDataset = stubSimpleCall(expectedResponse); + const [response] = await client.insertDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.insertDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertDataset(request), expectedError); + }); + }); + + describe('patchDataset', () => { + it('invokes patchDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.patchDataset = stubSimpleCall(expectedResponse); + const [response] = await client.patchDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.patchDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchDataset(request), expectedError); + }); + }); + + describe('updateDataset', () => { + it('invokes updateDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.updateDataset = stubSimpleCall(expectedResponse); + const [response] = await client.updateDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.updateDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateDataset(request), expectedError); + }); + }); + + describe('deleteDataset', () => { + it('invokes deleteDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteDataset = stubSimpleCall(expectedResponse); + const [response] = await client.deleteDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteDataset( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteDataset(request), expectedError); + }); + }); + + describe('undeleteDataset', () => { + it('invokes undeleteDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.undeleteDataset = stubSimpleCall(expectedResponse); + const [response] = await client.undeleteDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.undeleteDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.undeleteDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.undeleteDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.undeleteDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.undeleteDataset(request), expectedError); + }); + }); + + describe('listDatasets', () => { + it('invokes listDatasets without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.innerApiCalls.listDatasets = stubSimpleCall(expectedResponse); + const [response] = await client.listDatasets(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasets without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.innerApiCalls.listDatasets = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listDatasets( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatDataset[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasets with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listDatasets = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listDatasets(request), expectedError); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasetsStream without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.descriptors.page.listDatasets.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listDatasetsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatDataset[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatDataset) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listDatasets, request)); + assert( + (client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listDatasetsStream with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listDatasets.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listDatasetsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatDataset[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatDataset) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listDatasets, request)); + assert( + (client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listDatasets without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.descriptors.page.listDatasets.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatDataset[] = []; + const iterable = client.listDatasetsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listDatasets with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listDatasets.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listDatasetsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatDataset[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_job_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_job_service_v2.ts.baseline new file mode 100644 index 000000000..f3573a7dc --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_job_service_v2.ts.baseline @@ -0,0 +1,1211 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as jobserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.JobServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = jobserviceModule.v2.JobServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = jobserviceModule.v2.JobServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new jobserviceModule.v2.JobServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new jobserviceModule.v2.JobServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new jobserviceModule.v2.JobServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new jobserviceModule.v2.JobServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new jobserviceModule.v2.JobServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = jobserviceModule.v2.JobServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new jobserviceModule.v2.JobServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobServiceStub, undefined); + await client.initialize(); + assert(client.jobServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.jobServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('cancelJob', () => { + it('invokes cancelJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.JobCancelResponse() + ); + client.innerApiCalls.cancelJob = stubSimpleCall(expectedResponse); + const [response] = await client.cancelJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.JobCancelResponse() + ); + client.innerApiCalls.cancelJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.cancelJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJobCancelResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.cancelJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.cancelJob(request), expectedError); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.cancelJob(request), expectedError); + }); + }); + + describe('getJob', () => { + it('invokes getJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.getJob = stubSimpleCall(expectedResponse); + const [response] = await client.getJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.getJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getJob(request), expectedError); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getJob(request), expectedError); + }); + }); + + describe('insertJob', () => { + it('invokes insertJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.insertJob = stubSimpleCall(expectedResponse); + const [response] = await client.insertJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.insertJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertJob(request), expectedError); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertJob(request), expectedError); + }); + }); + + describe('deleteJob', () => { + it('invokes deleteJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCall(expectedResponse); + const [response] = await client.deleteJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteJob( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteJob(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteJob(request), expectedError); + }); + }); + + describe('getQueryResults', () => { + it('invokes getQueryResults without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsResponse() + ); + client.innerApiCalls.getQueryResults = stubSimpleCall(expectedResponse); + const [response] = await client.getQueryResults(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsResponse() + ); + client.innerApiCalls.getQueryResults = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getQueryResults( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IGetQueryResultsResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getQueryResults = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getQueryResults(request), expectedError); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getQueryResults(request), expectedError); + }); + }); + + describe('query', () => { + it('invokes query without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.QueryResponse() + ); + client.innerApiCalls.query = stubSimpleCall(expectedResponse); + const [response] = await client.query(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.QueryResponse() + ); + client.innerApiCalls.query = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.query( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IQueryResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.query = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.query(request), expectedError); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.query(request), expectedError); + }); + }); + + describe('listJobs', () => { + it('invokes listJobs without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.innerApiCalls.listJobs = stubSimpleCall(expectedResponse); + const [response] = await client.listJobs(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.innerApiCalls.listJobs = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listJobs( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatJob[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listJobs = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listJobs(request), expectedError); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobsStream without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatJob[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatJob) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listJobsStream with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatJob[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatJob) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatJob[] = []; + const iterable = client.listJobsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listJobsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatJob[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_model_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_model_service_v2.ts.baseline new file mode 100644 index 000000000..f87b9456d --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_model_service_v2.ts.baseline @@ -0,0 +1,932 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as modelserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.ModelServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = modelserviceModule.v2.ModelServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = modelserviceModule.v2.ModelServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new modelserviceModule.v2.ModelServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new modelserviceModule.v2.ModelServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new modelserviceModule.v2.ModelServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new modelserviceModule.v2.ModelServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new modelserviceModule.v2.ModelServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = modelserviceModule.v2.ModelServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.modelServiceStub, undefined); + await client.initialize(); + assert(client.modelServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.modelServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.modelServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getModel', () => { + it('invokes getModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.getModel = stubSimpleCall(expectedResponse); + const [response] = await client.getModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.getModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getModel( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getModel(request), expectedError); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getModel(request), expectedError); + }); + }); + + describe('patchModel', () => { + it('invokes patchModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.patchModel = stubSimpleCall(expectedResponse); + const [response] = await client.patchModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.patchModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchModel( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchModel(request), expectedError); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchModel(request), expectedError); + }); + }); + + describe('deleteModel', () => { + it('invokes deleteModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteModel = stubSimpleCall(expectedResponse); + const [response] = await client.deleteModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteModel( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteModel(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteModel(request), expectedError); + }); + }); + + describe('listModels', () => { + it('invokes listModels without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.innerApiCalls.listModels = stubSimpleCall(expectedResponse); + const [response] = await client.listModels(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModels without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.innerApiCalls.listModels = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listModels( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModels with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listModels = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listModels(request), expectedError); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModelsStream without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.descriptors.page.listModels.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listModelsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Model[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Model) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listModels, request)); + assert( + (client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listModelsStream with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listModels.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listModelsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Model[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Model) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listModels, request)); + assert( + (client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listModels without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.descriptors.page.listModels.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IModel[] = []; + const iterable = client.listModelsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listModels with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listModels.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listModelsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IModel[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_project_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_project_service_v2.ts.baseline new file mode 100644 index 000000000..4de717609 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_project_service_v2.ts.baseline @@ -0,0 +1,331 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as projectserviceModule from '../src/index.js'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +describe('v2.ProjectServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = projectserviceModule.v2.ProjectServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = projectserviceModule.v2.ProjectServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new projectserviceModule.v2.ProjectServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new projectserviceModule.v2.ProjectServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new projectserviceModule.v2.ProjectServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = projectserviceModule.v2.ProjectServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.projectServiceStub, undefined); + await client.initialize(); + assert(client.projectServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.projectServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.projectServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getServiceAccount', () => { + it('invokes getServiceAccount without error', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountResponse() + ); + client.innerApiCalls.getServiceAccount = stubSimpleCall(expectedResponse); + const [response] = await client.getServiceAccount(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount without error using callback', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountResponse() + ); + client.innerApiCalls.getServiceAccount = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getServiceAccount( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IGetServiceAccountResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount with error', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getServiceAccount = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getServiceAccount(request), expectedError); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount with closed client', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getServiceAccount(request), expectedError); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_routine_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_routine_service_v2.ts.baseline new file mode 100644 index 000000000..80853c361 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_routine_service_v2.ts.baseline @@ -0,0 +1,1127 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as routineserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.RoutineServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = routineserviceModule.v2.RoutineServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = routineserviceModule.v2.RoutineServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new routineserviceModule.v2.RoutineServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new routineserviceModule.v2.RoutineServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new routineserviceModule.v2.RoutineServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = routineserviceModule.v2.RoutineServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.routineServiceStub, undefined); + await client.initialize(); + assert(client.routineServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.routineServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.routineServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getRoutine', () => { + it('invokes getRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.getRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.getRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.getRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getRoutine(request), expectedError); + }); + }); + + describe('insertRoutine', () => { + it('invokes insertRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.insertRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.insertRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.insertRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertRoutine(request), expectedError); + }); + }); + + describe('updateRoutine', () => { + it('invokes updateRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.updateRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.updateRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.updateRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateRoutine(request), expectedError); + }); + }); + + describe('patchRoutine', () => { + it('invokes patchRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.patchRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.patchRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes patchRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.patchRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes patchRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.patchRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchRoutine(request), expectedError); + }); + + it('invokes patchRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchRoutine(request), expectedError); + }); + }); + + describe('deleteRoutine', () => { + it('invokes deleteRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.deleteRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteRoutine( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteRoutine(request), expectedError); + }); + }); + + describe('listRoutines', () => { + it('invokes listRoutines without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.innerApiCalls.listRoutines = stubSimpleCall(expectedResponse); + const [response] = await client.listRoutines(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutines without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.innerApiCalls.listRoutines = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listRoutines( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutines with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listRoutines = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listRoutines(request), expectedError); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutinesStream without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.descriptors.page.listRoutines.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listRoutinesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Routine[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Routine) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRoutines, request)); + assert( + (client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listRoutinesStream with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRoutines.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listRoutinesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Routine[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Routine) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRoutines, request)); + assert( + (client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRoutines without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.descriptors.page.listRoutines.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IRoutine[] = []; + const iterable = client.listRoutinesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRoutines with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRoutines.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listRoutinesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IRoutine[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_row_access_policy_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_row_access_policy_service_v2.ts.baseline new file mode 100644 index 000000000..f625d5d4b --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_row_access_policy_service_v2.ts.baseline @@ -0,0 +1,557 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as rowaccesspolicyserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.RowAccessPolicyServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.rowAccessPolicyServiceStub, undefined); + await client.initialize(); + assert(client.rowAccessPolicyServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.rowAccessPolicyServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.rowAccessPolicyServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('listRowAccessPolicies', () => { + it('invokes listRowAccessPolicies without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.innerApiCalls.listRowAccessPolicies = stubSimpleCall(expectedResponse); + const [response] = await client.listRowAccessPolicies(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPolicies without error using callback', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.innerApiCalls.listRowAccessPolicies = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listRowAccessPolicies( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRowAccessPolicy[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPolicies with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listRowAccessPolicies = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listRowAccessPolicies(request), expectedError); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPoliciesStream without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.descriptors.page.listRowAccessPolicies.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listRowAccessPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.RowAccessPolicy[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.RowAccessPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRowAccessPolicies, request)); + assert( + (client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listRowAccessPoliciesStream with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRowAccessPolicies.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listRowAccessPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.RowAccessPolicy[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.RowAccessPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRowAccessPolicies, request)); + assert( + (client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRowAccessPolicies without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.descriptors.page.listRowAccessPolicies.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IRowAccessPolicy[] = []; + const iterable = client.listRowAccessPoliciesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRowAccessPolicies with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRowAccessPolicies.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listRowAccessPoliciesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IRowAccessPolicy[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/esm/test/gapic_table_service_v2.ts.baseline b/baselines/bigquery-v2-esm/esm/test/gapic_table_service_v2.ts.baseline new file mode 100644 index 000000000..3fae1f092 --- /dev/null +++ b/baselines/bigquery-v2-esm/esm/test/gapic_table_service_v2.ts.baseline @@ -0,0 +1,1184 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +// @ts-ignore +import * as protos from '../../protos/protos.js'; +import assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as tableserviceModule from '../src/index.js'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; +import fs from 'fs'; +import path from 'path'; +import {fileURLToPath} from 'url'; + +// @ts-ignore +const dirname = path.dirname(fileURLToPath(import.meta.url)); +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON( + JSON.parse( + fs.readFileSync(path.join(dirname, '..', '..', 'protos/protos.json'), 'utf8') + )) + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type?.fields[field]?.resolvedType as protobuf.Type; + } + return type?.fields[fields[fields.length - 1]]?.defaultValue ?? null; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.TableServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = tableserviceModule.v2.TableServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = tableserviceModule.v2.TableServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new tableserviceModule.v2.TableServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new tableserviceModule.v2.TableServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new tableserviceModule.v2.TableServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new tableserviceModule.v2.TableServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new tableserviceModule.v2.TableServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = tableserviceModule.v2.TableServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new tableserviceModule.v2.TableServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.tableServiceStub, undefined); + await client.initialize(); + assert(client.tableServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.tableServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.tableServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getTable', () => { + it('invokes getTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.getTable = stubSimpleCall(expectedResponse); + const [response] = await client.getTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.getTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getTable(request), expectedError); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getTable(request), expectedError); + }); + }); + + describe('insertTable', () => { + it('invokes insertTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.insertTable = stubSimpleCall(expectedResponse); + const [response] = await client.insertTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.insertTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertTable(request), expectedError); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertTable(request), expectedError); + }); + }); + + describe('patchTable', () => { + it('invokes patchTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.patchTable = stubSimpleCall(expectedResponse); + const [response] = await client.patchTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.patchTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchTable(request), expectedError); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchTable(request), expectedError); + }); + }); + + describe('updateTable', () => { + it('invokes updateTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.updateTable = stubSimpleCall(expectedResponse); + const [response] = await client.updateTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.updateTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateTable(request), expectedError); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateTable(request), expectedError); + }); + }); + + describe('deleteTable', () => { + it('invokes deleteTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteTable = stubSimpleCall(expectedResponse); + const [response] = await client.deleteTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteTable( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteTable(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteTable(request), expectedError); + }); + }); + + describe('listTables', () => { + it('invokes listTables without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.innerApiCalls.listTables = stubSimpleCall(expectedResponse); + const [response] = await client.listTables(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTables without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.innerApiCalls.listTables = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listTables( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatTable[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTables with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listTables = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listTables(request), expectedError); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTablesStream without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.descriptors.page.listTables.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listTablesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatTable[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatTable) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listTables, request)); + assert( + (client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listTablesStream with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listTables.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listTablesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatTable[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatTable) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listTables, request)); + assert( + (client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listTables without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.descriptors.page.listTables.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatTable[] = []; + const iterable = client.listTablesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listTables with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listTables.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listTablesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatTable[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2-esm/package.json b/baselines/bigquery-v2-esm/package.json new file mode 100644 index 000000000..d7efb1646 --- /dev/null +++ b/baselines/bigquery-v2-esm/package.json @@ -0,0 +1,111 @@ +{ + "name": "bigquery", + "version": "0.1.0", + "description": "Bigquery client for Node.js", + "repository": "googleapis/nodejs-bigquery", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "./build/cjs/src/index.cjs", + "types": "./build/cjs/src/index.d.ts", + "type": "module", + "exports": { + ".": { + "import": { + "types": "./build/esm/src/index.d.ts", + "default": "./build/esm/src/index.js" + }, + "require": { + "types": "./build/cjs/src/index.d.ts", + "default": "./build/cjs/src/index.cjs" + } + }, + "./build/protos/protos": { + "import": { + "types": "./build/protos/protos/protos.d.ts", + "default": "./build/protos/protos/protos.js" + }, + "require": { + "types": "./build/protos/protos/protos.d.ts", + "default": "./build/protos/protos/protos.cjs" + } + } + }, + "files": [ + "build/esm", + "build/cjs", + "build/protos", + "!build/esm/**/*.map", + "!build/cjs/**/*.map" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google bigquery", + "bigquery", + "dataset service", + "job service", + "model service", + "project service", + "routine service", + "row access policy service", + "table service" + ], + "scripts": { + "clean": "gts clean", + "compile-protos": "compileProtos esm/src --esm ", + "docs": "jsdoc -c .jsdoc.cjs", + "postpack": "minifyProtoJson build/cjs && minifyProtoJson build/esm", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test:cjs": "c8 mocha build/cjs/system-test", + "system-test:esm": "c8 mocha build/esm/system-test", + "system-test": "npm run system-test:esm && npm run system-test:cjs", + "test:cjs": "c8 mocha build/cjs/test", + "test:esm": "c8 mocha build/esm/test", + "test": "npm run test:cjs && npm run test:esm", + "compile:esm": "tsc -p ./tsconfig.esm.json && cp -r esm/src/json-helper.cjs build/esm/src/json-helper.cjs", + "babel": "babel esm --out-dir build/cjs --ignore \"esm/**/*.d.ts\" --extensions \".ts\" --out-file-extension .cjs --copy-files", + "compile:cjs": "tsc -p ./tsconfig.json && npm run babel", + "compile": "npm run compile:esm && npm run compile:cjs && rm -rf build/protos && cp -r protos build/protos", + "samples-test": "cd samples/ && npm link ../ && npm i && npm test" + }, + "dependencies": { + "google-gax": "^4.4.0" + }, + "devDependencies": { + "@babel/cli": "^7.25.6", + "@babel/core": "^7.25.2", + "@babel/preset-env": "^7.25.4", + "@babel/preset-typescript": "^7.24.7", + "@types/mocha": "^10.0.7", + "@types/node": "^20.16.4", + "@types/sinon": "^17.0.0", + "babel-plugin-replace-import-extension": "^1.1.4", + "c8": "^10.1.2", + "gapic-tools": "^0.4.6", + "gts": "^5.3.1", + "jsdoc": "^4.0.3", + "jsdoc-region-tag": "^3.0.0", + "jsdoc-fresh": "^3.0.0", + "long": "^5.2.3", + "mocha": "^10.7.3", + "pack-n-play": "^2.0.3", + "sinon": "^18.0.0", + "typescript": "5.1.6", + "ts-loader": "^8.4.0", + "webpack": "^5.94.0", + "webpack-cli": "^4.10.0" + }, + "engines": { + "node": ">=v14" + } +} diff --git a/baselines/bigquery-v2-esm/package.json.baseline b/baselines/bigquery-v2-esm/package.json.baseline new file mode 120000 index 000000000..2ff8622f1 --- /dev/null +++ b/baselines/bigquery-v2-esm/package.json.baseline @@ -0,0 +1 @@ +package.json \ No newline at end of file diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline new file mode 100755 index 000000000..fd076037b --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline @@ -0,0 +1,62 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "BigLakeConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for BigLake managed tables. +message BigLakeConfiguration { + // Supported file formats for BigLake tables. + enum FileFormat { + // Default Value. + FILE_FORMAT_UNSPECIFIED = 0; + + // Apache Parquet format. + PARQUET = 1; + } + + // Supported table formats for BigLake tables. + enum TableFormat { + // Default Value. + TABLE_FORMAT_UNSPECIFIED = 0; + + // Apache Iceberg format. + ICEBERG = 1; + } + + // Required. The connection specifying the credentials to be used to read and + // write to external storage, such as Cloud Storage. The connection_id can + // have the form `{project}.{location}.{connection_id}` or + // `projects/{project}/locations/{location}/connections/{connection_id}". + string connection_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The fully qualified location prefix of the external folder where + // table data is stored. The '*' wildcard character is not allowed. The URI + // should be in the format `gs://bucket/path_to_table/` + string storage_uri = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The file format the table data is stored in. + FileFormat file_format = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The table format the metadata only snapshots are stored in. + TableFormat table_format = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/clustering.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/clustering.proto.baseline new file mode 100755 index 000000000..b871f41ec --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/clustering.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ClusteringProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configures table clustering. +message Clustering { + // One or more fields on which data should be clustered. Only top-level, + // non-repeated, simple-type fields are supported. The ordering of the + // clustering fields should be prioritized from most to least important + // for filtering purposes. + // + // Additional information on limitations can be found here: + // https://cloud.google.com/bigquery/docs/creating-clustered-tables#limitations + repeated string fields = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline new file mode 100755 index 000000000..e2c6fb67d --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline @@ -0,0 +1,29 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DataFormatOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for data format adjustments. +message DataFormatOptions { + // Optional. Output timestamp as usec int64. Default is false. + bool use_int64_timestamp = 1 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset.proto.baseline new file mode 100755 index 000000000..62968e292 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset.proto.baseline @@ -0,0 +1,625 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_catalog_dataset_options.proto"; +import "google/cloud/bigquery/v2/external_dataset_reference.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Dataset Service. +// +// It should not be relied on for production use cases at this time. +service DatasetService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Returns the dataset specified by datasetID. + rpc GetDataset(GetDatasetRequest) returns (Dataset) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Creates a new empty dataset. + rpc InsertDataset(InsertDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + // This method supports RFC5789 patch semantics. + rpc PatchDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + rpc UpdateDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Deletes the dataset specified by the datasetId value. Before you can delete + // a dataset, you must delete all its tables, either manually or by specifying + // deleteContents. Immediately after deletion, you can create another dataset + // with the same name. + rpc DeleteDataset(DeleteDatasetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Lists all datasets in the specified project to which the user has been + // granted the READER dataset role. + rpc ListDatasets(ListDatasetsRequest) returns (DatasetList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets" + }; + } + + // Undeletes a dataset which is within time travel window based on datasetId. + // If a time is specified, the dataset version deleted at that time is + // undeleted, else the last live version is undeleted. + rpc UndeleteDataset(UndeleteDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}:undelete" + body: "*" + }; + } +} + +// Grants all resources of particular types in a particular dataset read access +// to the current dataset. +// +// Similar to how individually authorized views work, updates to any resource +// granted through its dataset (including creation of new resources) requires +// read permission to referenced resources, plus write permission to the +// authorizing dataset. +message DatasetAccessEntry { + // Indicates the type of resources in a dataset that the entry applies to. + enum TargetType { + // Do not use. You must set a target type explicitly. + TARGET_TYPE_UNSPECIFIED = 0; + + // This entry applies to views in the dataset. + VIEWS = 1; + + // This entry applies to routines in the dataset. + ROUTINES = 2; + } + + // The dataset this entry applies to + DatasetReference dataset = 1; + + // Which resources in the dataset this entry applies to. Currently, only + // views are supported, but additional target types may be added in the + // future. + repeated TargetType target_types = 2; +} + +// An object that defines dataset access for an entity. +message Access { + // An IAM role ID that should be granted to the user, group, + // or domain specified in this access entry. + // The following legacy mappings will be applied: + // + // * `OWNER`: `roles/bigquery.dataOwner` + // * `WRITER`: `roles/bigquery.dataEditor` + // * `READER`: `roles/bigquery.dataViewer` + // + // This field will accept any of the above formats, but will return only + // the legacy format. For example, if you set this field to + // "roles/bigquery.dataOwner", it will be returned back as "OWNER". + string role = 1; + + // [Pick one] An email address of a user to grant access to. For example: + // fred@example.com. Maps to IAM policy member "user:EMAIL" or + // "serviceAccount:EMAIL". + string user_by_email = 2; + + // [Pick one] An email address of a Google Group to grant access to. + // Maps to IAM policy member "group:GROUP". + string group_by_email = 3; + + // [Pick one] A domain to grant access to. Any users signed in with the domain + // specified will be granted the specified access. Example: "example.com". + // Maps to IAM policy member "domain:DOMAIN". + string domain = 4; + + // [Pick one] A special group to grant access to. Possible values include: + // + // * projectOwners: Owners of the enclosing project. + // * projectReaders: Readers of the enclosing project. + // * projectWriters: Writers of the enclosing project. + // * allAuthenticatedUsers: All authenticated BigQuery users. + // + // Maps to similarly-named IAM members. + string special_group = 5; + + // [Pick one] Some other type of member that appears in the IAM Policy but + // isn't a user, group, domain, or special group. + string iam_member = 7; + + // [Pick one] A view from a different dataset to grant access to. Queries + // executed against that view will have read access to views/tables/routines + // in this dataset. + // The role field is not required when this field is set. If that view is + // updated by any user, access to the view needs to be granted again via an + // update operation. + TableReference view = 6; + + // [Pick one] A routine from a different dataset to grant access to. Queries + // executed against that routine will have read access to + // views/tables/routines in this dataset. Only UDF is supported for now. + // The role field is not required when this field is set. If that routine is + // updated by any user, access to the routine needs to be granted again via + // an update operation. + RoutineReference routine = 8; + + // [Pick one] A grant authorizing all resources of a particular type in a + // particular dataset access to this dataset. Only views are supported for + // now. The role field is not required when this field is set. If that dataset + // is deleted and re-created, its access needs to be granted again via an + // update operation. + DatasetAccessEntry dataset = 9; +} + +// Represents a BigQuery dataset. +message Dataset { + // Indicates the billing model that will be applied to the dataset. + enum StorageBillingModel { + // Value not set. + STORAGE_BILLING_MODEL_UNSPECIFIED = 0; + + // Billing for logical bytes. + LOGICAL = 1; + + // Billing for physical bytes. + PHYSICAL = 2; + } + + // Output only. The resource type. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of the resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The fully-qualified unique name of the dataset in the format + // projectId:datasetId. The dataset name without the project name is given in + // the datasetId field. When creating a new dataset, leave this field blank, + // and instead specify the datasetId field. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. You can + // use this URL in Get or Update requests to the resource. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. A reference that identifies the dataset. + DatasetReference dataset_reference = 5 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for the dataset. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of the dataset. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The default lifetime of all tables in the dataset, in + // milliseconds. The minimum lifetime value is 3600000 milliseconds (one + // hour). To clear an existing default expiration with a PATCH request, set to + // 0. Once this property is set, all newly-created tables in the dataset will + // have an expirationTime property set to the creation time plus the value in + // this property, and changing the value will only affect new tables, not + // existing ones. When the expirationTime for a given table is reached, that + // table will be deleted automatically. + // If a table's expirationTime is modified or removed before the table + // expires, or if you provide an explicit expirationTime when creating a + // table, that value takes precedence over the default expiration time + // indicated by this property. + google.protobuf.Int64Value default_table_expiration_ms = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // This default partition expiration, expressed in milliseconds. + // + // When new time-partitioned tables are created in a dataset where this + // property is set, the table will inherit this value, propagated as the + // `TimePartitioning.expirationMs` property on the new table. If you set + // `TimePartitioning.expirationMs` explicitly when creating a table, + // the `defaultPartitionExpirationMs` of the containing dataset is ignored. + // + // When creating a partitioned table, if `defaultPartitionExpirationMs` + // is set, the `defaultTableExpirationMs` value is ignored and the table + // will not be inherit a table expiration deadline. + google.protobuf.Int64Value default_partition_expiration_ms = 14; + + // The labels associated with this dataset. You can use these + // to organize and group your datasets. + // You can set this property when inserting or updating a dataset. + // See [Creating and Updating Dataset + // Labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#creating_and_updating_dataset_labels) + // for more information. + map labels = 9; + + // Optional. An array of objects that define dataset access for one or more + // entities. You can set this property when inserting or updating a dataset in + // order to control who is allowed to access the data. If unspecified at + // dataset creation time, BigQuery adds default dataset access for the + // following entities: access.specialGroup: projectReaders; access.role: + // READER; access.specialGroup: projectWriters; access.role: WRITER; + // access.specialGroup: projectOwners; access.role: OWNER; + // access.userByEmail: [dataset creator email]; access.role: OWNER; + // If you patch a dataset, then this field is overwritten by the patched + // dataset's access field. To add entities, you must supply the entire + // existing access array in addition to any new entities that you want to add. + repeated Access access = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this dataset was created, in milliseconds since + // the epoch. + int64 creation_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The date when this dataset was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The geographic location where the dataset should reside. See + // https://cloud.google.com/bigquery/docs/locations for supported + // locations. + string location = 13; + + // The default encryption key for all tables in the dataset. + // After this property is set, the encryption key of all newly-created tables + // in the dataset is set to this value unless the table creation request or + // query explicitly overrides the key. + EncryptionConfiguration default_encryption_configuration = 16; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzs = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzi = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Same as `type` in `ListFormatDataset`. + // The type of the dataset, one of: + // + // * DEFAULT - only accessible by owner and authorized accounts, + // * PUBLIC - accessible by everyone, + // * LINKED - linked dataset, + // * EXTERNAL - dataset with definition in external metadata catalog. + string type = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The source dataset reference when the dataset is of type LINKED. + // For all other dataset types it is not set. This field cannot be updated + // once it is set. Any attempt to update this field using Update and Patch API + // Operations will be ignored. + LinkedDatasetSource linked_dataset_source = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Metadata about the LinkedDataset. Filled out when the dataset + // type is LINKED. + LinkedDatasetMetadata linked_dataset_metadata = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Reference to a read-only external dataset defined in data + // catalogs outside of BigQuery. Filled out when the dataset type is EXTERNAL. + ExternalDatasetReference external_dataset_reference = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Options defining open source compatible datasets living in the + // BigQuery catalog. Contains metadata of open source database, schema or + // namespace represented by the current dataset. + ExternalCatalogDatasetOptions external_catalog_dataset_options = 32 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. TRUE if the dataset and its table names are case-insensitive, + // otherwise FALSE. By default, this is FALSE, which means the dataset and its + // table names are case-sensitive. This field does not affect routine + // references. + google.protobuf.BoolValue is_case_insensitive = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default collation specification of future tables + // created in the dataset. If a table is created in this dataset without + // table-level default collation, then the table inherits the dataset default + // collation, which is applied to the string fields that do not have explicit + // collation specified. A change to this field affects only tables created + // afterwards, and does not alter the existing tables. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new tables + // created within this dataset. During table creation, if this field is + // specified, the table within this dataset will inherit the default rounding + // mode of the dataset. Setting the default rounding mode on a table overrides + // this option. Existing tables in the dataset are unaffected. + // If columns are defined during that table creation, + // they will immediately inherit the table's default rounding mode, + // unless otherwise specified. + TableFieldSchema.RoundingMode default_rounding_mode = 26 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the time travel window in hours. The value can be from 48 + // to 168 hours (2 to 7 days). The default value is 168 hours if this is not + // set. + google.protobuf.Int64Value max_time_travel_hours = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Tags for the dataset. To provide tags as inputs, use the + // `resourceTags` field. + repeated GcpTag tags = 24 + [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Updates storage_billing_model for the dataset. + StorageBillingModel storage_billing_model = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for all tables and dataset. If + // set, restrict certain accesses on the dataset and all its tables based on + // the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 27 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this dataset. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 30 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A global tag managed by Resource Manager. +// https://cloud.google.com/iam/docs/tags-access-control#definitions +message GcpTag { + // Required. The namespaced friendly name of the tag key, e.g. + // "12345/environment" where 12345 is org id. + string tag_key = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The friendly short name of the tag value, e.g. "production". + string tag_value = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A dataset source type which refers to another BigQuery dataset. +message LinkedDatasetSource { + // The source dataset reference contains project numbers and not project ids. + DatasetReference source_dataset = 1; +} + +// Metadata about the Linked Dataset. +message LinkedDatasetMetadata { + // Specifies whether Linked Dataset is currently in a linked state or not. + enum LinkState { + // The default value. + // Default to the LINKED state. + LINK_STATE_UNSPECIFIED = 0; + + // Normal Linked Dataset state. Data is queryable via the Linked Dataset. + LINKED = 1; + + // Data publisher or owner has unlinked this Linked Dataset. It means you + // can no longer query or see the data in the Linked Dataset. + UNLINKED = 2; + } + + // Output only. Specifies whether Linked Dataset is currently in a linked + // state or not. + LinkState link_state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a dataset. +message GetDatasetRequest { + // DatasetView specifies which dataset information is returned. + enum DatasetView { + // The default value. + // Default to the FULL view. + DATASET_VIEW_UNSPECIFIED = 0; + + // Includes metadata information for the dataset, such as location, + // etag, lastModifiedTime, etc. + METADATA = 1; + + // Includes ACL information for the dataset, which defines dataset access + // for one or more entities. + ACL = 2; + + // Includes both dataset metadata and ACL information. + FULL = 3; + } + + // Required. Project ID of the requested dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested dataset + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the view that determines which dataset information is + // returned. By default, metadata and ACL information are returned. + DatasetView dataset_view = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting a dataset. +message InsertDatasetRequest { + // Required. Project ID of the new dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource to use for the new dataset + Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Message for updating or patching a dataset. +message UpdateOrPatchDatasetRequest { + // Required. Project ID of the dataset being updated + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the dataset being updated + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource which will replace or patch the specified + // dataset. + Dataset dataset = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting a dataset. +message DeleteDatasetRequest { + // Required. Project ID of the dataset being deleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // If True, delete all the tables in the dataset. + // If False and the dataset contains tables, the request will fail. + // Default is False + bool delete_contents = 3; +} + +message ListDatasetsRequest { + // Required. Project ID of the datasets to be listed + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 2; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 3; + + // Whether to list all datasets, including hidden ones + bool all = 4; + + // An expression for filtering the results of the request by label. + // The syntax is `labels.[:]`. + // Multiple filters can be ANDed together by connecting with a space. + // Example: `labels.department:receiving labels.active`. + // See [Filtering datasets using + // labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + // for details. + string filter = 5; +} + +// A dataset resource with only a subset of fields, to be returned in a list of +// datasets. +message ListFormatDataset { + // The resource type. + // This property always returns the value "bigquery#dataset" + string kind = 1; + + // The fully-qualified, unique, opaque ID of the dataset. + string id = 2; + + // The dataset reference. + // Use this property to access specific parts of the dataset's ID, such as + // project ID or dataset ID. + DatasetReference dataset_reference = 3; + + // The labels associated with this dataset. + // You can use these to organize and group your datasets. + map labels = 4; + + // An alternate name for the dataset. The friendly name is purely + // decorative in nature. + google.protobuf.StringValue friendly_name = 5; + + // The geographic location where the dataset resides. + string location = 6; +} + +// Response format for a page of results when listing datasets. +message DatasetList { + // Output only. The resource type. + // This property always returns the value "bigquery#datasetList" + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash value of the results page. You can use this property to + // determine if the page has changed since the last request. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // A token that can be used to request the next results page. This property is + // omitted on the final results page. + string next_page_token = 3; + + // An array of the dataset resources in the project. + // Each resource contains basic information. + // For full information about a particular dataset resource, use the Datasets: + // get method. This property is omitted when there are no datasets in the + // project. + repeated ListFormatDataset datasets = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request format for undeleting a dataset. +message UndeleteDatasetRequest { + // Required. Project ID of the dataset to be undeleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The exact time when the dataset was deleted. If not specified, + // the most recently deleted version is undeleted. Undeleting a dataset + // using deletion time is not supported. + google.protobuf.Timestamp deletion_time = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline new file mode 100755 index 000000000..03695a4c4 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Identifier for a dataset. +message DatasetReference { + // Required. A unique ID for this dataset, without the project name. The ID + // must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + string dataset_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID of the project containing this dataset. + string project_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline new file mode 100755 index 000000000..72266b110 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "DecimalTargetTypesProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data types that could be used as a target type when converting decimal +// values. +enum DecimalTargetType { + // Invalid type. + DECIMAL_TARGET_TYPE_UNSPECIFIED = 0; + + // Decimal values could be converted to NUMERIC + // type. + NUMERIC = 1; + + // Decimal values could be converted to BIGNUMERIC + // type. + BIGNUMERIC = 2; + + // Decimal values could be converted to STRING type. + STRING = 3; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline new file mode 100755 index 000000000..ac7ee1679 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "EncryptionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for Cloud KMS encryption settings. +message EncryptionConfiguration { + // Optional. Describes the Cloud KMS encryption key that will be used to + // protect destination BigQuery table. The BigQuery Service Account associated + // with your project requires access to this encryption key. + google.protobuf.StringValue kms_key_name = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/error.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/error.proto.baseline new file mode 100755 index 000000000..9cab21c4f --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/error.proto.baseline @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Error details. +message ErrorProto { + // A short error code that summarizes the error. + string reason = 1; + + // Specifies where the error occurred, if present. + string location = 2; + + // Debugging information. This property is internal to Google and should not + // be used. + string debug_info = 3; + + // A human-readable description of the error. + string message = 4; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline new file mode 100755 index 000000000..70d0f1f40 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline @@ -0,0 +1,39 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogDatasetOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options defining open source compatible datasets living in the BigQuery +// catalog. Contains metadata of open source database, schema +// or namespace represented by the current dataset. +message ExternalCatalogDatasetOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source schema. Maximum size of 2Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The storage location URI for all tables in the dataset. + // Equivalent to hive metastore's database locationUri. Maximum length of 1024 + // characters. + string default_storage_location_uri = 2 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline new file mode 100755 index 000000000..b0833d441 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline @@ -0,0 +1,87 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogTableOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Metadata about open source compatible table. The fields contained in +// these options correspond to hive metastore's table level properties. +message ExternalCatalogTableOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source table. Corresponds with hive meta store table + // parameters. Maximum size of 4Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A storage descriptor containing information about the physical + // storage of this table. + StorageDescriptor storage_descriptor = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The connection + // is needed to read the open source table from BigQuery Engine. The + // connection_id can have the form + // `..` or + // `projects//locations//connections/`. + string connection_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Contains information about how a table's data is stored and accessed by open +// source query engines. +message StorageDescriptor { + // Optional. The physical location of the table + // (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). + // The maximum length is 2056 bytes. + string location_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the InputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + // The maximum length is 128 characters. + string input_format = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the OutputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + // The maximum length is 128 characters. + string output_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serializer and deserializer information. + SerDeInfo serde_info = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Serializer and deserializer information. +message SerDeInfo { + // Optional. Name of the SerDe. + // The maximum length is 256 characters. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies a fully-qualified class name of the serialization + // library that is responsible for the translation of data between table + // representation and the underlying low-level input and output format + // structures. The maximum length is 256 characters. + string serialization_library = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Key-value pairs that define the initialization parameters for the + // serialization library. + // Maximum size 10 Kib. + map parameters = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline new file mode 100755 index 000000000..610af982a --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline @@ -0,0 +1,499 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/map_target_type.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ExternalDataConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for external data sources. +message AvroOptions { + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Parquet Options for load and make external tables. +message ParquetOptions { + // Optional. Indicates whether to infer Parquet ENUM logical type as STRING + // instead of BYTES by default. + google.protobuf.BoolValue enum_as_string = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates whether to use schema inference specifically for + // Parquet LIST logical type. + google.protobuf.BoolValue enable_list_inference = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates how to represent a Parquet map if present. + MapTargetType map_target_type = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a CSV data source. +message CsvOptions { + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when reading the data. The default value is 0. This property is + // useful if you have header rows in the file that should be skipped. + // When autodetect is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ("). + // If your data does not contain quoted sections, + // set the property value to an empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + google.protobuf.StringValue quote = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow quoted data sections that + // contain newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should accept rows that are missing + // trailing optional columns. If true, BigQuery treats missing trailing + // columns as null values. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. The default value is false. + google.protobuf.BoolValue allow_jagged_rows = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + // BigQuery decodes the data after the raw, binary data has been split using + // the values of the quote and fieldDelimiter properties. + string encoding = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if the embedded ASCII control characters (the first 32 + // characters in the ASCII-table, from '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when querying a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Json Options for load and make external tables. +message JsonOptions { + // Optional. The character encoding of the data. + // The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + string encoding = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column. +message BigtableColumn { + // [Required] Qualifier of the column. + // Columns in the parent column family that has this exact qualifier are + // exposed as `.` field. + // If the qualifier is valid UTF-8 string, it can be specified in the + // qualifier_string field. Otherwise, a base-64 encoded value must be set to + // qualifier_encoded. + // The column field name is the same as the column qualifier. However, if the + // qualifier is not a valid BigQuery field identifier i.e. does not match + // [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name. + google.protobuf.BytesValue qualifier_encoded = 1; + + // Qualifier string. + google.protobuf.StringValue qualifier_string = 2; + + // Optional. If the qualifier is not a valid BigQuery field identifier i.e. + // does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided + // as the column field name and is used as field name in queries. + string field_name = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type to convert the value in cells of this column. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // 'type' can also be set at the column family level. However, the setting at + // this level takes precedence if 'type' is set at both levels. + string type = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // 'encoding' can also be set at the column family level. However, the setting + // at this level takes precedence if 'encoding' is set at both levels. + string encoding = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set, only the latest version of value in this column + // are exposed. + // 'onlyReadLatest' can also be set at the column family level. However, the + // setting at this level takes precedence if 'onlyReadLatest' is set at both + // levels. + google.protobuf.BoolValue only_read_latest = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column family. +message BigtableColumnFamily { + // Identifier of the column family. + string family_id = 1; + + // Optional. The type to convert the value in cells of this column family. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a type for it. + string type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying an encoding for it. + string encoding = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lists of columns that should be exposed as individual fields as + // opposed to a list of (column name, value) pairs. + // All columns whose qualifier matches a qualifier in this list can be + // accessed as `.`. + // Other columns can be accessed as a list through + // the `.Column` field. + repeated BigtableColumn columns = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set only the latest version of value are exposed for + // all columns in this column family. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a different setting + // for that column. + google.protobuf.BoolValue only_read_latest = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Cloud Bigtable data sources. +message BigtableOptions { + // Optional. List of column families to expose in the table schema along with + // their types. + // This list restricts the column families that can be referenced in queries + // and specifies their value types. + // You can use this list to do type conversions - see the 'type' field for + // more details. + // If you leave this list empty, all column families are present in the table + // schema and their values are read as BYTES. + // During a query only the column families referenced in that query are read + // from Bigtable. + repeated BigtableColumnFamily column_families = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the column families that are not + // specified in columnFamilies list are not exposed in the table schema. + // Otherwise, they are read with BYTES type values. + // The default value is false. + google.protobuf.BoolValue ignore_unspecified_column_families = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the rowkey column families will be read + // and converted to string. Otherwise they are read with BYTES type values and + // users need to manually cast them with CAST if necessary. + // The default value is false. + google.protobuf.BoolValue read_rowkey_as_string = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then each column family will be read as a + // single JSON column. Otherwise they are read as a repeated cell structure + // containing timestamp/value tuples. The default value is false. + google.protobuf.BoolValue output_column_families_as_json = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Sheets data sources. +message GoogleSheetsOptions { + // Optional. The number of rows at the top of a sheet that BigQuery will skip + // when reading the data. The default value is 0. This property is useful if + // you have header rows that should be skipped. When autodetect is on, + // the behavior is the following: + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Range of a sheet to query from. Only used when non-empty. + // Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id + // For example: sheet1!A1:B20 + string range = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +message ExternalDataConfiguration { + // Supported Object Metadata Types. + enum ObjectMetadata { + // Unspecified by default. + OBJECT_METADATA_UNSPECIFIED = 0; + + // A synonym for `SIMPLE`. + DIRECTORY = 1; + + // Directory listing of objects. + SIMPLE = 2; + } + + // MetadataCacheMode identifies if the table should use metadata caching for + // files from external source (eg Google Cloud Storage). + enum MetadataCacheMode { + // Unspecified metadata cache mode. + METADATA_CACHE_MODE_UNSPECIFIED = 0; + + // Set this mode to trigger automatic background refresh of metadata cache + // from the external source. Queries will use the latest available cache + // version within the table's maxStaleness interval. + AUTOMATIC = 1; + + // Set this mode to enable triggering manual refresh of the metadata cache + // from external source. Queries will use the latest manually triggered + // cache version within the table's maxStaleness interval. + MANUAL = 2; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. + // Size limits related to load jobs apply to external data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups, exactly one URI can be specified. Also, + // the '*' wildcard character is not allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default source URIs are expanded against the + // underlying storage. Other options include specifying manifest files. Only + // applicable to object storage systems. + FileSetSpecType file_set_spec_type = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the data. + // Schema is required for CSV and JSON formats if autodetect is not on. + // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, + // Avro, ORC and Parquet formats. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The data format. + // For CSV files, specify "CSV". + // For Google sheets, specify "GOOGLE_SHEETS". + // For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". + // For Avro files, specify "AVRO". + // For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". + // For Apache Iceberg tables, specify "ICEBERG". + // For ORC files, specify "ORC". + // For Parquet files, specify "PARQUET". + // [Beta] For Google Cloud Bigtable, specify "BIGTABLE". + string source_format = 3; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // reading data. If the number of bad records exceeds this value, an invalid + // error is returned in the job result. The default value is 0, which requires + // that all records are valid. This setting is ignored for Google Cloud + // Bigtable, Google Cloud Datastore backups, Avro, ORC and Parquet formats. + google.protobuf.Int32Value max_bad_records = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Try to detect schema and format options automatically. + // Any option specified explicitly will be honored. + google.protobuf.BoolValue autodetect = 5; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. + // The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names + // Google Cloud Bigtable: This setting is ignored. + // Google Cloud Datastore backups: This setting is ignored. + // Avro: This setting is ignored. + // ORC: This setting is ignored. + // Parquet: This setting is ignored. + google.protobuf.BoolValue ignore_unknown_values = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type of the data source. + // Possible values include GZIP and NONE. The default value is NONE. + // This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore + // backups, Avro, ORC and Parquet + // formats. An empty string is an invalid value. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to CSV. + CsvOptions csv_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to JSON. + JsonOptions json_options = 26 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to BIGTABLE. + BigtableOptions bigtable_options = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to GOOGLE_SHEETS. + GoogleSheetsOptions google_sheets_options = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. Not all storage + // formats support hive partitioning -- requesting hive partitioning on an + // unsupported format will lead to an error, as will providing an invalid + // specification. + HivePartitioningOptions hive_partitioning_options = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The + // connection_id can have the form + // `{project_id}.{location_id};{connection_id}` or + // `projects/{project_id}/locations/{location_id}/connections/{connection_id}`. + string connection_id = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 16; + + // Optional. Additional properties to set if sourceFormat is set to AVRO. + AvroOptions avro_options = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. ObjectMetadata is used to create Object Tables. Object Tables + // contain a listing of objects (with their metadata) found at the + // source_uris. If ObjectMetadata is set, source_format should be omitted. + // + // Currently SIMPLE is the only supported Object Metadata type. + optional ObjectMetadata object_metadata = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When creating an external table, the user can provide a reference + // file with the table schema. This is enabled for the following formats: + // AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Metadata Cache Mode for the table. Set this to enable caching of + // metadata from external data source. + MetadataCacheMode metadata_cache_mode = 24 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline new file mode 100755 index 000000000..8d3a3b4c9 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalDatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "bigqueryconnection.googleapis.com/Connection" + pattern: "projects/{project}/locations/{location}/connections/{connection}" +}; + +// Configures the access a dataset defined in an external metadata storage. +message ExternalDatasetReference { + // Required. External source that backs this dataset. + string external_source = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The connection id that is used to access the external_source. + // + // Format: + // projects/{project_id}/locations/{location_id}/connections/{connection_id} + string connection = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigqueryconnection.googleapis.com/Connection" + } + ]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline new file mode 100755 index 000000000..1068d20eb --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "FileSetSpecificationTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This enum defines how to interpret source URIs for load jobs and external +// tables. +enum FileSetSpecType { + // This option expands source URIs by listing files from the object store. It + // is the default behavior if FileSetSpecType is not set. + FILE_SET_SPEC_TYPE_FILE_SYSTEM_MATCH = 0; + + // This option indicates that the provided URIs are newline-delimited manifest + // files, with one URI per line. Wildcard URIs are not supported. + FILE_SET_SPEC_TYPE_NEW_LINE_DELIMITED_MANIFEST = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline new file mode 100755 index 000000000..76872bd1a --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline @@ -0,0 +1,86 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "HivePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for configuring hive partitioning detect. +message HivePartitioningOptions { + // Optional. When set, what mode of hive partitioning to use when reading + // data. The following modes are supported: + // + // * AUTO: automatically infer partition key name(s) and type(s). + // + // * STRINGS: automatically infer partition key name(s). All types are + // strings. + // + // * CUSTOM: partition key schema is encoded in the source URI prefix. + // + // Not all storage formats support hive partitioning. Requesting hive + // partitioning on an unsupported format will lead to an error. + // Currently supported formats are: JSON, CSV, ORC, Avro and Parquet. + string mode = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When hive partition detection is requested, a common prefix for + // all source uris must be required. The prefix must end immediately before + // the partition key encoding begins. For example, consider files following + // this data layout: + // + // gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro + // + // gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro + // + // When hive partitioning is requested with either AUTO or STRINGS detection, + // the common prefix can be either of gs://bucket/path_to_table or + // gs://bucket/path_to_table/. + // + // CUSTOM detection requires encoding the partitioning schema immediately + // after the common prefix. For CUSTOM, any of + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:STRING}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:STRING} + // + // would all be valid source URI prefixes. + string source_uri_prefix = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, queries over this table require a partition + // filter that can be used for partition elimination to be specified. + // + // Note that this field should only be true when creating a permanent + // external table or querying a temporary external table. + // + // Hive-partitioned loads with require_partition_filter explicitly set to + // true will fail. + google.protobuf.BoolValue require_partition_filter = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. For permanent external tables, this field is populated with + // the hive partition keys in the order they were inferred. The types of the + // partition keys can be deduced by checking the table schema (which will + // include the partition keys). Not every API will populate this field in the + // output. For example, Tables.Get will populate it, but Tables.List will not + // contain this field. + repeated string fields = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job.proto.baseline new file mode 100755 index 000000000..b15e1fb42 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job.proto.baseline @@ -0,0 +1,738 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/data_format_options.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/job_config.proto"; +import "google/cloud/bigquery/v2/job_creation_reason.proto"; +import "google/cloud/bigquery/v2/job_reference.proto"; +import "google/cloud/bigquery/v2/job_stats.proto"; +import "google/cloud/bigquery/v2/job_status.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Job Service. +// +// It should not be relied on for production use cases at this time. +service JobService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/devstorage.full_control," + "https://www.googleapis.com/auth/devstorage.read_only," + "https://www.googleapis.com/auth/devstorage.read_write"; + + // Requests that a job be cancelled. This call will return immediately, and + // the client will need to poll for the job status to see if the cancel + // completed successfully. Cancelled jobs may still incur costs. + rpc CancelJob(CancelJobRequest) returns (JobCancelResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/cancel" + }; + } + + // Returns information about a specific job. Job information is available for + // a six month period after creation. Requires that you're the person who ran + // the job, or have the Is Owner project role. + rpc GetJob(GetJobRequest) returns (Job) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}" + }; + } + + // Starts a new asynchronous job. + // + // This API has two different kinds of endpoint URIs, as this method supports + // a variety of use cases. + // + // * The *Metadata* URI is used for most interactions, as it accepts the job + // configuration directly. + // * The *Upload* URI is ONLY for the case when you're sending both a load job + // configuration and a data stream together. In this case, the Upload URI + // accepts the job configuration and the data as two distinct multipart MIME + // parts. + rpc InsertJob(InsertJobRequest) returns (Job) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs" + body: "job" + }; + } + + // Requests the deletion of the metadata of a job. This call returns when the + // job's metadata is deleted. + rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/delete" + }; + } + + // Lists all jobs that you started in the specified project. Job information + // is available for a six month period after creation. The job list is sorted + // in reverse chronological order, by job creation time. Requires the Can View + // project role, or the Is Owner project role if you set the allUsers + // property. + rpc ListJobs(ListJobsRequest) returns (JobList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs" + }; + } + + // RPC to get the results of a query job. + rpc GetQueryResults(GetQueryResultsRequest) + returns (GetQueryResultsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/queries/{job_id=*}" + }; + } + + // Runs a BigQuery SQL query synchronously and returns query results if the + // query completes within a specified timeout. + rpc Query(PostQueryRequest) returns (QueryResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/queries" + body: "query_request" + }; + } +} + +message Job { + // Output only. The type of the resource. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Opaque ID field of the job. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Email address of the user who ran the job. + string user_email = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 6 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Reference describing the unique-per-user name of the job. + JobReference job_reference = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The status of this job. Examine this value when polling an + // asynchronous job to see if the job is complete. + JobStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Full-projection-only] String representation of identity of + // requesting party. Populated for both first- and third-party identities. + // Only present for APIs that support third-party identities. + string principal_subject = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The reason why a Job was created. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Describes format of a jobs cancellation request. +message CancelJobRequest { + // Required. Project ID of the job to cancel + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job to cancel + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a jobs cancellation response. +message JobCancelResponse { + // The resource type of the response. + string kind = 1; + + // The final state of the job. + Job job = 2; +} + +// Describes format of a jobs get request. +message GetJobRequest { + // Required. Project ID of the requested job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the requested job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a job insertion request. +message InsertJobRequest { + // Project ID of project that will be billed for the job. + string project_id = 1; + + // Jobs resource to insert. + Job job = 3; +} + +// Describes the format of a jobs deletion request. +message DeleteJobRequest { + // Required. Project ID of the job for which metadata is to be deleted. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job for which metadata is to be deleted. If this is + // a parent job which has child jobs, the metadata from all child jobs will be + // deleted as well. Direct deletion of the metadata of child jobs is not + // allowed. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. Required. + // See details at: + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes the format of the list jobs request. +message ListJobsRequest { + // Projection is used to control what job information is returned. + enum Projection { + option allow_alias = true; + + // Does not include the job configuration + minimal = 0; + + // Does not include the job configuration + MINIMAL = 0; + + // Includes all job data + full = 1; + + // Includes all job data + FULL = 1; + } + + // StateFilter allows filtration by job execution state. + enum StateFilter { + option allow_alias = true; + + // Finished jobs + done = 0; + + // Finished jobs + DONE = 0; + + // Pending jobs + pending = 1; + + // Pending jobs + PENDING = 1; + + // Running jobs + running = 2; + + // Running jobs. + RUNNING = 2; + } + + // Project ID of the jobs to list. + string project_id = 1; + + // Whether to display jobs owned by all users in the project. Default False. + bool all_users = 2; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.Int32Value max_results = 3; + + // Min value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created after or at this timestamp are returned. + uint64 min_creation_time = 4; + + // Max value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created before or at this timestamp are returned. + google.protobuf.UInt64Value max_creation_time = 5; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 6; + + // Restrict information returned to a set of selected fields + Projection projection = 7; + + // Filter for job state + repeated StateFilter state_filter = 8; + + // If set, show only child jobs of the specified parent. Otherwise, show all + // top-level jobs. + string parent_job_id = 9; +} + +// ListFormatJob is a partial projection of job information returned as part +// of a jobs.list response. +message ListFormatJob { + // Unique opaque ID of the job. + string id = 1; + + // The resource type. + string kind = 2; + + // Unique opaque ID of the job. + JobReference job_reference = 3; + + // Running state of the job. When the state is DONE, errorResult can be + // checked to determine whether the job succeeded or failed. + string state = 4; + + // A result object that will be present only if the job has failed. + ErrorProto error_result = 5; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 7 [(google.api.field_behavior) = REQUIRED]; + + // [Full-projection-only] Describes the status of this job. + JobStatus status = 8; + + // [Full-projection-only] Email address of the user who ran the job. + string user_email = 9; + + // [Full-projection-only] String representation of identity of requesting + // party. Populated for both first- and third-party identities. Only present + // for APIs that support third-party identities. + string principal_subject = 10; +} + +// JobList is the response format for a jobs.list call. +message JobList { + // A hash of this page of results. + string etag = 1; + + // The resource type of the response. + string kind = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // List of jobs that were requested. + repeated ListFormatJob jobs = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request object of GetQueryResults. +message GetQueryResultsRequest { + // Required. Project ID of the query job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the query job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Zero-based index of the starting row. + google.protobuf.UInt64Value start_index = 3; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // Maximum number of results to read. + google.protobuf.UInt32Value max_results = 5; + + // Optional: Specifies the maximum amount of time, in milliseconds, that the + // client is willing to wait for the query to complete. By default, this limit + // is 10 seconds (10,000 milliseconds). If the query is complete, the + // jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 7; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response object of GetQueryResults. +message GetQueryResultsResponse { + // The resource type of the response. + string kind = 1; + + // A hash of this response. + string etag = 2; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 3; + + // Reference to the BigQuery Job that was created to run the query. This field + // will be present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + JobReference job_reference = 4; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. Present only + // when the query completes successfully. + google.protobuf.UInt64Value total_rows = 5; + + // A token used for paging results. When this token is non-empty, it + // indicates additional results are available. + string page_token = 6; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. Present only + // when the query completes successfully. + // + // The REST-based representation of this data leverages a series of + // JSON f,v objects for indicating fields and values. + repeated google.protobuf.Struct rows = 7; + + // The total number of bytes processed for this query. + google.protobuf.Int64Value total_bytes_processed = 8; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 9; + + // Output only. The first errors or warnings encountered during the running + // of the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 11; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for the query request. +message PostQueryRequest { + // Required. Project ID of the query request. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query request body. + QueryRequest query_request = 2; +} + +// Describes the format of the jobs.query request. +message QueryRequest { + // Job Creation Mode provides different options on job creation. + enum JobCreationMode { + // If unspecified JOB_CREATION_REQUIRED is the default. + JOB_CREATION_MODE_UNSPECIFIED = 0; + + // Default. Job creation is always required. + JOB_CREATION_REQUIRED = 1; + + // Job creation is optional. Returning immediate results is prioritized. + // BigQuery will automatically determine if a Job needs to be created. + // The conditions under which BigQuery can decide to not create a Job are + // subject to change. If Job creation is required, JOB_CREATION_REQUIRED + // mode should be used, which is the default. + JOB_CREATION_OPTIONAL = 2; + } + + // The resource type of the request. + string kind = 2; + + // Required. A query string to execute, using Google Standard SQL or legacy + // SQL syntax. Example: "SELECT COUNT(f1) FROM + // myProjectId.myDatasetId.myTableId". + string query = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The maximum number of rows of data to return per page of + // results. Setting this flag to a small value such as 1000 and then paging + // through results might improve reliability when the query result set is + // large. In addition to this limit, responses are also limited to 10 MB. By + // default, there is no maximum row count, and only the byte limit applies. + google.protobuf.UInt32Value max_results = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default datasetId and projectId to assume for any + // unqualified table names in the query. If not set, all table names in the + // query string must be qualified in the format 'datasetId.tableId'. + DatasetReference default_dataset = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional: Specifies the maximum amount of time, in milliseconds, + // that the client is willing to wait for the query to complete. By default, + // this limit is 10 seconds (10,000 milliseconds). If the query is complete, + // the jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, BigQuery doesn't run the job. Instead, if the + // query is valid, BigQuery returns statistics about the job such as how many + // bytes would be processed. If the query is invalid, an error returns. The + // default value is false. + bool dry_run = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. The default value is true. + google.protobuf.BoolValue use_query_cache = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Specifies whether to use BigQuery's legacy SQL dialect for this query. The + // default value is true. If set to false, the query will use BigQuery's + // GoogleSQL: https://cloud.google.com/bigquery/sql-reference/ When + // useLegacySql is set to false, the value of flattenResults is ignored; query + // will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 10; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 11; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 12; + + // The geographic location where the job should run. See details at + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 13; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 16 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this query. + // Labels can be used to organize and group query jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label keys must start with a letter + // and each label in the list must have a different key. + map labels = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Limits the bytes billed for this query. Queries with + // bytes billed above this limit will fail (without incurring a charge). + // If unspecified, the project default is used. + google.protobuf.Int64Value maximum_bytes_billed = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique user provided identifier to ensure idempotent behavior + // for queries. Note that this is different from the job_id. It has the + // following properties: + // + // 1. It is case-sensitive, limited to up to 36 ASCII characters. A UUID is + // recommended. + // + // 2. Read only queries can ignore this token since they are nullipotent by + // definition. + // + // 3. For the purposes of idempotency ensured by the request_id, a request + // is considered duplicate of another only if they have the same request_id + // and are actually duplicates. When determining whether a request is a + // duplicate of another request, all parameters in the request that + // may affect the result are considered. For example, query, + // connection_properties, query_parameters, use_legacy_sql are parameters + // that affect the result and are considered when determining whether a + // request is a duplicate, but properties like timeout_ms don't + // affect the result and are thus not considered. Dry run query + // requests are never considered duplicate of another request. + // + // 4. When a duplicate mutating query request is detected, it returns: + // a. the results of the mutation if it completes successfully within + // the timeout. + // b. the running operation if it is still in progress at the end of the + // timeout. + // + // 5. Its lifetime is limited to 15 minutes. In other words, if two + // requests are sent with the same request_id, but more than 15 minutes + // apart, idempotency is not guaranteed. + string request_id = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, creates a new session using a randomly generated + // session_id. If false, runs query with an existing session_id passed in + // ConnectionProperty, otherwise runs query in non-session mode. + // + // The session location will be set to QueryRequest.location if it is present, + // otherwise it's set to the default location based on existing routing logic. + google.protobuf.BoolValue create_session = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, jobs are always required. + // + // If set, the query request will follow the behavior described + // JobCreationMode. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationMode job_creation_mode = 22 + [(google.api.field_behavior) = OPTIONAL]; +} + +message QueryResponse { + // The resource type. + string kind = 1; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 2; + + // Reference to the Job that was created to run the query. This field will be + // present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + // + // If job_creation_mode was set to `JOB_CREATION_OPTIONAL` and the query + // completes without creating a job, this field will be empty. + JobReference job_reference = 3; + + // Optional. The reason why a Job was created. + // + // Only relevant when a job_reference is present in the response. + // If job_reference is not present it will always be unset. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Auto-generated ID for the query. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + string query_id = 14; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. + google.protobuf.UInt64Value total_rows = 4; + + // A token used for paging results. A non-empty token indicates that + // additional results are available. To see additional results, + // query the + // [`jobs.getQueryResults`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults) + // method. For more information, see [Paging through table + // data](https://cloud.google.com/bigquery/docs/paging-results). + string page_token = 5; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. + repeated google.protobuf.Struct rows = 6; + + // The total number of bytes processed for this query. If this query was a dry + // run, this is the number of bytes that would be processed if the query were + // run. + google.protobuf.Int64Value total_bytes_processed = 7; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 8; + + // Output only. The first errors or warnings encountered during the running of + // the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 10; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_config.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_config.proto.baseline new file mode 100755 index 000000000..9f42488c8 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_config.proto.baseline @@ -0,0 +1,814 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/system_variable.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Properties for the destination table. +message DestinationTableProperties { + // Optional. Friendly name for the destination table. If the table already + // exists, it should be same as the existing friendly name. + google.protobuf.StringValue friendly_name = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The description for the destination table. + // This will only be used if the destination table is newly created. + // If the table already exists and a value different than the current + // description is provided, the job will fail. + google.protobuf.StringValue description = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this table. You can use these to + // organize and group your tables. This will only be used if the destination + // table is newly created. If the table already exists and labels are + // different than the current labels are provided, the job will fail. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A connection-level property to customize query behavior. Under JDBC, these +// correspond directly to connection properties passed to the DriverManager. +// Under ODBC, these correspond to properties in the connection string. +// +// Currently supported connection properties: +// +// * **dataset_project_id**: represents the default project for datasets that +// are used in the query. Setting the +// system variable `@@dataset_project_id` achieves the same behavior. For +// more information about system variables, see: +// https://cloud.google.com/bigquery/docs/reference/system-variables +// +// * **time_zone**: represents the default timezone used to run the query. +// +// * **session_id**: associates the query with a given session. +// +// * **query_label**: associates the query with a given job label. If set, +// all subsequent queries in a script or session will have this label. For the +// format in which a you can specify a query label, see labels +// in the JobConfiguration resource type: +// https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration +// +// * **service_account**: indicates the service account to use to run a +// continuous query. If set, the query job uses the service account to access +// Google Cloud resources. Service account access is bounded by the IAM +// permissions that you have granted to the service account. +// +// Additional properties are allowed, but ignored. Specifying multiple +// connection properties with the same key returns an error. +message ConnectionProperty { + // The key of the property to set. + string key = 1; + + // The value of the property to set. + string value = 2; +} + +// JobConfigurationQuery configures a BigQuery query job. +message JobConfigurationQuery { + // [Required] SQL query text to execute. The useLegacySql field can be used + // to indicate whether the query uses legacy SQL or GoogleSQL. + string query = 1; + + // Optional. Describes the table where the query results should be stored. + // This property must be set for large results that exceed the maximum + // response size. For queries that produce anonymous (cached) results, this + // field will be populated by BigQuery. + TableReference destination_table = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. You can specify external table definitions, which operate as + // ephemeral tables that can be queried. These definitions are configured + // using a JSON map, where the string key represents the table identifier, and + // the value is the corresponding external data configuration object. + map external_table_definitions = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 4; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints, and uses the schema from the query result. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default dataset to use for unqualified + // table names in the query. This setting does not alter behavior of + // unqualified dataset names. Setting the system variable + // `@@dataset_id` achieves the same behavior. See + // https://cloud.google.com/bigquery/docs/reference/system-variables for more + // information on system variables. + DatasetReference default_dataset = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a priority for the query. Possible values include + // INTERACTIVE and BATCH. The default value is INTERACTIVE. + string priority = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, allows the query + // to produce arbitrarily large result tables at a slight cost in performance. + // Requires destinationTable to be set. + // For GoogleSQL queries, this flag is ignored and large results are + // always allowed. However, you must still set destinationTable when result + // size exceeds the allowed maximum response size. + google.protobuf.BoolValue allow_large_results = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. Moreover, the query cache is only available when a + // query does not have a destination table specified. The default value is + // true. + google.protobuf.BoolValue use_query_cache = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, flattens all nested + // and repeated fields in the query results. + // allowLargeResults must be true if this is set to false. + // For GoogleSQL queries, this flag is ignored and results are never + // flattened. + google.protobuf.BoolValue flatten_results = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Limits the bytes billed for this job. Queries that will have + // bytes billed beyond this limit will fail (without incurring a charge). + // If unspecified, this will be set to your project default. + google.protobuf.Int64Value maximum_bytes_billed = 14; + + // Optional. Specifies whether to use BigQuery's legacy SQL dialect for this + // query. The default value is true. If set to false, the query will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // When useLegacySql is set to false, the value of flattenResults is ignored; + // query will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 16; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 17; + + // Output only. System variables for GoogleSQL queries. A system variable is + // output if the variable is settable and its value differs from the system + // default. + // "@@" prefix is not included in the name of the System variables. + optional SystemVariables system_variables = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Allows the schema of the destination table to be updated as a side effect + // of the query job. Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 18; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 19; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 22; + + // Clustering specification for the destination table. + Clustering clustering = 20; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 21; + + // Options controlling the execution of scripts. + ScriptOptions script_options = 24; + + // Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 33; + + // If this property is true, the job creates a new session using a randomly + // generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 34; + + // Optional. Whether to run the query as continuous or a regular query. + // Continuous query is currently in experimental stage and not ready for + // general usage. + google.protobuf.BoolValue continuous = 36 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options related to script execution. +message ScriptOptions { + // KeyResultStatementKind controls how the key result is determined. + enum KeyResultStatementKind { + // Default value. + KEY_RESULT_STATEMENT_KIND_UNSPECIFIED = 0; + + // The last result determines the key result. + LAST = 1; + + // The first SELECT statement determines the key result. + FIRST_SELECT = 2; + } + + // Timeout period for each statement in a script. + google.protobuf.Int64Value statement_timeout_ms = 1; + + // Limit on the number of bytes billed per statement. Exceeding this budget + // results in an error. + google.protobuf.Int64Value statement_byte_budget = 2; + + // Determines which statement in the script represents the "key result", + // used to populate the schema and query results of the script job. + // Default is LAST. + KeyResultStatementKind key_result_statement = 4; +} + +// JobConfigurationLoad contains the configuration properties for loading data +// into a destination table. +message JobConfigurationLoad { + // Indicates the character map used for column names. + enum ColumnNameCharacterMap { + // Unspecified column name character map. + COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED = 0; + + // Support flexible column name and reject invalid column names. + STRICT = 1; + + // Support alphanumeric + underscore characters and names must start with a + // letter or underscore. Invalid column names will be normalized. + V1 = 2; + + // Support flexible column name. Invalid column names will be normalized. + V2 = 3; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. + // For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. Size limits related to load jobs apply to external + // data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups: + // Exactly one URI can be specified. Also, the '*' wildcard character is not + // allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default, source URIs are expanded against the + // underlying storage. You can also specify manifest files to control how the + // file set is constructed. This option is only applicable to object storage + // systems. + FileSetSpecType file_set_spec_type = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the destination table. The schema can be + // omitted if the destination table already exists, or if you're loading data + // from Google Cloud Datastore. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The destination table to load the data into. + TableReference destination_table = 3; + + // Optional. [Experimental] Properties with which to create the destination + // table if it is new. + DestinationTableProperties destination_table_properties = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints and uses the schema from the load job. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_APPEND. + // Each action is atomic and only occurs if BigQuery is able to complete the + // job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when loading a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when loading the data. The default value is 0. This property is useful + // if you have header rows in the file that should be skipped. When autodetect + // is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int32Value skip_leading_rows = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. BigQuery decodes the data after + // the raw, binary data has been split using the values of the `quote` and + // `fieldDelimiter` properties. + // + // If you don't specify an encoding, or if you specify a UTF-8 encoding when + // the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to + // UTF-8. Generally, your data loads successfully, but it may not match + // byte-for-byte what you expect. To avoid this, specify the correct encoding + // by using the `--encoding` flag. + // + // If BigQuery can't convert a character other than the ASCII `0` character, + // BigQuery converts the character to the standard Unicode replacement + // character: �. + string encoding = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ('"'). + // If your data does not contain quoted sections, set the property value to an + // empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + // @default " + google.protobuf.StringValue quote = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // running the job. If the number of bad records exceeds this value, an + // invalid error is returned in the job result. + // The default value is 0, which requires that all records are valid. + // This is only supported for CSV and NEWLINE_DELIMITED_JSON file formats. + google.protobuf.Int32Value max_bad_records = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Indicates if BigQuery should allow quoted data sections that contain + // newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 15; + + // Optional. The format of the data files. + // For CSV files, specify "CSV". For datastore backups, + // specify "DATASTORE_BACKUP". For newline-delimited JSON, + // specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". + // For parquet, specify "PARQUET". For orc, specify "ORC". + // The default value is CSV. + string source_format = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Accept rows that are missing trailing optional columns. + // The missing values are treated as nulls. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. + // The default value is false. + // Only applicable to CSV, ignored for other formats. + google.protobuf.BoolValue allow_jagged_rows = 17 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names in the table schema + // Avro, Parquet, ORC: Fields in the file schema that don't exist in the + // table schema. + google.protobuf.BoolValue ignore_unknown_values = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity + // properties to load into BigQuery from a Cloud Datastore backup. Property + // names are case sensitive and must be top-level properties. If no properties + // are specified, BigQuery loads all properties. If any named property isn't + // found in the Cloud Datastore backup, an invalid error is returned in the + // job result. + repeated string projection_fields = 19; + + // Optional. Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + google.protobuf.BoolValue autodetect = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Allows the schema of the destination table to be updated as a side effect + // of the load job if a schema is autodetected or supplied in the job + // configuration. + // Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 21; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 22; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 26; + + // Clustering specification for the destination table. + Clustering clustering = 23; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 24; + + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The user can provide a reference file with the reader schema. + // This file is only loaded if it is part of source URIs, but is not loaded + // otherwise. It is enabled for the following formats: AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. + // Not all storage formats support hive partitioning -- requesting hive + // partitioning on an unsupported format will lead to an error, as will + // providing an invalid specification. + HivePartitioningOptions hive_partitioning_options = 37 + [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 39; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 42 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When sourceFormat is set to "CSV", this indicates whether the + // embedded ASCII control characters (the first 32 characters in the + // ASCII-table, from + // '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the load job behavior. + // Currently, only the 'session_id' connection property is supported, and is + // used to resolve _SESSION appearing as the dataset id. + repeated ConnectionProperty connection_properties = 46 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this property is true, the job creates a new session using a + // randomly generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Character map supported for column names in CSV/Parquet loads. + // Defaults to STRICT and can be overridden by Project Config Service. Using + // this option with unsupporting load formats will result in an error. + ColumnNameCharacterMap column_name_character_map = 50 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. [Experimental] Configures the load job to copy files directly to + // the destination BigLake managed table, bypassing file content reading and + // rewriting. + // + // Copying files only is supported when all the following are true: + // + // * `source_uris` are located in the same Cloud Storage location as the + // destination table's `storage_uri` location. + // * `source_format` is `PARQUET`. + // * `destination_table` is an existing BigLake managed table. The table's + // schema does not have flexible column names. The table's columns do not + // have type parameters other than precision and scale. + // * No options other than the above are specified. + google.protobuf.BoolValue copy_files_only = 51 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationTableCopy configures a job that copies data from one table +// to another. +// For more information on copying tables, see [Copy a +// table](https://cloud.google.com/bigquery/docs/managing-tables#copy-table). +message JobConfigurationTableCopy { + // Indicates different operation types supported in table copy job. + enum OperationType { + // Unspecified operation type. + OPERATION_TYPE_UNSPECIFIED = 0; + + // The source and destination table have the same table type. + COPY = 1; + + // The source table type is TABLE and + // the destination table type is SNAPSHOT. + SNAPSHOT = 2; + + // The source table type is SNAPSHOT and + // the destination table type is TABLE. + RESTORE = 3; + + // The source and destination table have the same table type, + // but only bill for unique data. + CLONE = 4; + } + + // [Pick one] Source table to copy. + TableReference source_table = 1; + + // [Pick one] Source tables to copy. + repeated TableReference source_tables = 2; + + // [Required] The destination table. + TableReference destination_table = 3; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data and uses the schema and table constraints from the source table. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration destination_encryption_configuration = 6; + + // Optional. Supported operation types in table copy job. + OperationType operation_type = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The time when the destination table expires. Expired tables will + // be deleted and their storage reclaimed. + google.protobuf.Timestamp destination_expiration_time = 9 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationExtract configures a job that exports data from a BigQuery +// table into Google Cloud Storage. +message JobConfigurationExtract { + // Options related to model extraction. + message ModelExtractOptions { + // The 1-based ID of the trial to be exported from a hyperparameter tuning + // model. If not specified, the trial with id = + // [Model](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId + // is exported. This field is ignored for models not trained with + // hyperparameter tuning. + google.protobuf.Int64Value trial_id = 1; + } + + // Required. Source reference for the export. + oneof source { + // A reference to the table being exported. + TableReference source_table = 1; + + // A reference to the model being exported. + ModelReference source_model = 9; + } + + // [Pick one] A list of fully-qualified Google Cloud Storage URIs where the + // extracted table should be written. + repeated string destination_uris = 3; + + // Optional. Whether to print out a header row in the results. + // Default is true. Not applicable when extracting models. + google.protobuf.BoolValue print_header = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When extracting data in CSV format, this defines the + // delimiter to use between fields in the exported data. + // Default is ','. Not applicable when extracting models. + string field_delimiter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The exported file format. Possible values include CSV, + // NEWLINE_DELIMITED_JSON, PARQUET, or AVRO for tables and ML_TF_SAVED_MODEL + // or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. + // Tables with nested or repeated fields cannot be exported as CSV. The + // default value for models is ML_TF_SAVED_MODEL. + string destination_format = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type to use for exported files. Possible values + // include DEFLATE, GZIP, NONE, SNAPPY, and ZSTD. The default value is NONE. + // Not all compression formats are support for all file formats. DEFLATE is + // only supported for Avro. ZSTD is only supported for Parquet. Not applicable + // when extracting models. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Whether to use logical types when extracting to AVRO format. Not applicable + // when extracting models. + google.protobuf.BoolValue use_avro_logical_types = 13; + + // Optional. Model extract options only applicable when extracting models. + ModelExtractOptions model_extract_options = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +message JobConfiguration { + // Output only. The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or + // UNKNOWN. + string job_type = 8; + + // [Pick one] Configures a query job. + JobConfigurationQuery query = 1; + + // [Pick one] Configures a load job. + JobConfigurationLoad load = 2; + + // [Pick one] Copies a table. + JobConfigurationTableCopy copy = 3; + + // [Pick one] Configures an extract job. + JobConfigurationExtract extract = 4; + + // Optional. If set, don't actually run this job. A valid query will return + // a mostly empty response with some processing statistics, while an invalid + // query will return the same error it would if it wasn't a dry run. Behavior + // of non-query jobs is undefined. + google.protobuf.BoolValue dry_run = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job timeout in milliseconds. If this time limit is exceeded, + // BigQuery will attempt to stop a longer job, but may not always succeed in + // canceling it before the job completes. For example, a job that takes more + // than 60 seconds to complete has a better chance of being stopped than a job + // that takes 10 seconds to complete. + google.protobuf.Int64Value job_timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this job. You can use these to organize and + // group your jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label values are optional. Label + // keys must start with a letter and each label in the list must have a + // different key. + map labels = 7; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline new file mode 100755 index 000000000..0cede032b --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline @@ -0,0 +1,60 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JobCreationReasonProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Reason about why a Job was created from a +// [`jobs.query`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) +// method when used with `JOB_CREATION_OPTIONAL` Job creation mode. +// +// For +// [`jobs.insert`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) +// method calls it will always be `REQUESTED`. +// +// [Preview](https://cloud.google.com/products/#product-launch-stages) +message JobCreationReason { + // Indicates the high level reason why a job was created. + enum Code { + // Reason is not specified. + CODE_UNSPECIFIED = 0; + + // Job creation was requested. + REQUESTED = 1; + + // The query request ran beyond a system defined timeout specified by the + // [timeoutMs field in the + // QueryRequest](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#queryrequest). + // As a result it was considered a long running operation for which a job + // was created. + LONG_RUNNING = 2; + + // The results from the query cannot fit in the response. + LARGE_RESULTS = 3; + + // BigQuery has determined that the query needs to be executed as a Job. + OTHER = 4; + } + + // Output only. Specifies the high level reason why a Job was created. + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_reference.proto.baseline new file mode 100755 index 000000000..d7f3ece6f --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_reference.proto.baseline @@ -0,0 +1,45 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// A job reference is a fully qualified identifier for referring to a job. +message JobReference { + // Required. The ID of the project containing this job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the job. The ID must contain only letters (a-z, A-Z), + // numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 + // characters. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The geographic location of the job. The default value is US. + // + // For more information about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations + google.protobuf.StringValue location = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated string location_alternative = 5; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_stats.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_stats.proto.baseline new file mode 100755 index 000000000..877e00392 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_stats.proto.baseline @@ -0,0 +1,1439 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/model.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatsProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; +option (google.api.resource_definition) = { + type: "storage.googleapis.com/Bucket" + pattern: "*" +}; + +// An operation within a stage. +message ExplainQueryStep { + // Machine-readable operation type. + string kind = 1; + + // Human-readable description of the step(s). + repeated string substeps = 2; +} + +// A single stage of query execution. +message ExplainQueryStage { + // Indicates the type of compute mode. + enum ComputeMode { + // ComputeMode type not specified. + COMPUTE_MODE_UNSPECIFIED = 0; + + // This stage was processed using BigQuery slots. + BIGQUERY = 1; + + // This stage was processed using BI Engine compute. + BI_ENGINE = 2; + } + + // Human-readable name for the stage. + string name = 1; + + // Unique ID for the stage within the plan. + google.protobuf.Int64Value id = 2; + + // Stage start time represented as milliseconds since the epoch. + int64 start_ms = 3; + + // Stage end time represented as milliseconds since the epoch. + int64 end_ms = 4; + + // IDs for stages that are inputs to this stage. + repeated int64 input_stages = 5; + + // Relative amount of time the average shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_avg = 6; + + // Milliseconds the average shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_avg = 7; + + // Relative amount of time the slowest shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_max = 8; + + // Milliseconds the slowest shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_max = 9; + + // Relative amount of time the average shard spent reading input. + google.protobuf.DoubleValue read_ratio_avg = 10; + + // Milliseconds the average shard spent reading input. + google.protobuf.Int64Value read_ms_avg = 11; + + // Relative amount of time the slowest shard spent reading input. + google.protobuf.DoubleValue read_ratio_max = 12; + + // Milliseconds the slowest shard spent reading input. + google.protobuf.Int64Value read_ms_max = 13; + + // Relative amount of time the average shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_avg = 14; + + // Milliseconds the average shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_avg = 15; + + // Relative amount of time the slowest shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_max = 16; + + // Milliseconds the slowest shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_max = 17; + + // Relative amount of time the average shard spent on writing output. + google.protobuf.DoubleValue write_ratio_avg = 18; + + // Milliseconds the average shard spent on writing output. + google.protobuf.Int64Value write_ms_avg = 19; + + // Relative amount of time the slowest shard spent on writing output. + google.protobuf.DoubleValue write_ratio_max = 20; + + // Milliseconds the slowest shard spent on writing output. + google.protobuf.Int64Value write_ms_max = 21; + + // Total number of bytes written to shuffle. + google.protobuf.Int64Value shuffle_output_bytes = 22; + + // Total number of bytes written to shuffle and spilled to disk. + google.protobuf.Int64Value shuffle_output_bytes_spilled = 23; + + // Number of records read into the stage. + google.protobuf.Int64Value records_read = 24; + + // Number of records written by the stage. + google.protobuf.Int64Value records_written = 25; + + // Number of parallel input segments to be processed + google.protobuf.Int64Value parallel_inputs = 26; + + // Number of parallel input segments completed. + google.protobuf.Int64Value completed_parallel_inputs = 27; + + // Current status for this stage. + string status = 28; + + // List of operations within the stage in dependency order (approximately + // chronological). + repeated ExplainQueryStep steps = 29; + + // Slot-milliseconds used by the stage. + google.protobuf.Int64Value slot_ms = 30; + + // Output only. Compute mode for this stage. + ComputeMode compute_mode = 31 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Summary of the state of query execution at a given time. +message QueryTimelineSample { + // Milliseconds elapsed since the start of query execution. + google.protobuf.Int64Value elapsed_ms = 1; + + // Cumulative slot-ms consumed by the query. + google.protobuf.Int64Value total_slot_ms = 2; + + // Total units of work remaining for the query. This number can be revised + // (increased or decreased) while the query is running. + google.protobuf.Int64Value pending_units = 3; + + // Total parallel units of work completed by this query. + google.protobuf.Int64Value completed_units = 4; + + // Total number of active workers. This does not correspond directly to + // slot usage. This is the largest value observed since the last sample. + google.protobuf.Int64Value active_units = 5; + + // Units of work that can be scheduled immediately. Providing additional slots + // for these units of work will accelerate the query, if no other query in + // the reservation needs additional slots. + google.protobuf.Int64Value estimated_runnable_units = 7; +} + +// The external service cost is a portion of the total cost, these costs are not +// additive with total_bytes_billed. Moreover, this field only track external +// service costs that will show up as BigQuery costs (e.g. training BigQuery +// ML job with google cloud CAIP or Automl Tables services), not other costs +// which may be accrued by running the query (e.g. reading from Bigtable or +// Cloud Storage). The external service costs with different billing sku (e.g. +// CAIP job is charged based on VM usage) are converted to BigQuery +// billed_bytes and slot_ms with equivalent amount of US dollars. Services may +// not directly correlate to these metrics, but these are the equivalents for +// billing purposes. +// Output only. +message ExternalServiceCost { + // External service name. + string external_service = 1; + + // External service cost in terms of bigquery bytes processed. + google.protobuf.Int64Value bytes_processed = 2; + + // External service cost in terms of bigquery bytes billed. + google.protobuf.Int64Value bytes_billed = 3; + + // External service cost in terms of bigquery slot milliseconds. + google.protobuf.Int64Value slot_ms = 4; + + // Non-preemptable reserved slots used for external job. + // For example, reserved slots for Cloua AI Platform job are the VM usages + // converted to BigQuery slot with equivalent mount of price. + int64 reserved_slot_count = 5; +} + +// Statistics for the EXPORT DATA statement as part of Query Job. EXTRACT +// JOB statistics are populated in JobStatistics4. +message ExportDataStatistics { + // Number of destination files generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value file_count = 1; + + // [Alpha] Number of destination rows generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value row_count = 2; +} + +// Reason why BI Engine didn't accelerate the query (or sub-query). +message BiEngineReason { + // Indicates the high-level reason for no/partial acceleration + enum Code { + // BiEngineReason not specified. + CODE_UNSPECIFIED = 0; + + // No reservation available for BI Engine acceleration. + NO_RESERVATION = 1; + + // Not enough memory available for BI Engine acceleration. + INSUFFICIENT_RESERVATION = 2; + + // This particular SQL text is not supported for acceleration by BI Engine. + UNSUPPORTED_SQL_TEXT = 4; + + // Input too large for acceleration by BI Engine. + INPUT_TOO_LARGE = 5; + + // Catch-all code for all other cases for partial or disabled acceleration. + OTHER_REASON = 6; + + // One or more tables were not eligible for BI Engine acceleration. + TABLE_EXCLUDED = 7; + } + + // Output only. High-level BI Engine reason for partial or disabled + // acceleration + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Free form human-readable reason for partial or disabled + // acceleration. + string message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BI Engine specific query. +// Populated as part of JobStatistics2 +message BiEngineStatistics { + // Indicates the type of BI Engine acceleration. + enum BiEngineMode { + // BiEngineMode type not specified. + ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine disabled the acceleration. bi_engine_reasons + // specifies a more detailed reason. + DISABLED = 1; + + // Part of the query was accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL = 2; + + // All of the query was accelerated using BI Engine. + FULL = 3; + } + + // Indicates the type of BI Engine acceleration. + enum BiEngineAccelerationMode { + // BiEngineMode type not specified. + BI_ENGINE_ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine acceleration was attempted but disabled. bi_engine_reasons + // specifies a more detailed reason. + BI_ENGINE_DISABLED = 1; + + // Some inputs were accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL_INPUT = 2; + + // All of the query inputs were accelerated using BI Engine. + FULL_INPUT = 3; + + // All of the query was accelerated using BI Engine. + FULL_QUERY = 4; + } + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineMode bi_engine_mode = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineAccelerationMode acceleration_mode = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // In case of DISABLED or PARTIAL bi_engine_mode, these contain the + // explanatory reasons as to why BI Engine could not accelerate. + // In case the full query was accelerated, this field is not populated. + repeated BiEngineReason bi_engine_reasons = 2; +} + +// Reason about why no search index was used in the search query (or +// sub-query). +message IndexUnusedReason { + // Indicates the high-level reason for the scenario when no search index was + // used. + enum Code { + // Code not specified. + CODE_UNSPECIFIED = 0; + + // Indicates the search index configuration has not been created. + INDEX_CONFIG_NOT_AVAILABLE = 1; + + // Indicates the search index creation has not been completed. + PENDING_INDEX_CREATION = 2; + + // Indicates the base table has been truncated (rows have been removed + // from table with TRUNCATE TABLE statement) since the last time the search + // index was refreshed. + BASE_TABLE_TRUNCATED = 3; + + // Indicates the search index configuration has been changed since the last + // time the search index was refreshed. + INDEX_CONFIG_MODIFIED = 4; + + // Indicates the search query accesses data at a timestamp before the last + // time the search index was refreshed. + TIME_TRAVEL_QUERY = 5; + + // Indicates the usage of search index will not contribute to any pruning + // improvement for the search function, e.g. when the search predicate is in + // a disjunction with other non-search predicates. + NO_PRUNING_POWER = 6; + + // Indicates the search index does not cover all fields in the search + // function. + UNINDEXED_SEARCH_FIELDS = 7; + + // Indicates the search index does not support the given search query + // pattern. + UNSUPPORTED_SEARCH_PATTERN = 8; + + // Indicates the query has been optimized by using a materialized view. + OPTIMIZED_WITH_MATERIALIZED_VIEW = 9; + + // Indicates the query has been secured by data masking, and thus search + // indexes are not applicable. + SECURED_BY_DATA_MASKING = 11; + + // Indicates that the search index and the search function call do not + // have the same text analyzer. + MISMATCHED_TEXT_ANALYZER = 12; + + // Indicates the base table is too small (below a certain threshold). + // The index does not provide noticeable search performance gains + // when the base table is too small. + BASE_TABLE_TOO_SMALL = 13; + + // Indicates that the total size of indexed base tables in your organization + // exceeds your region's limit and the index is not used in the query. To + // index larger base tables, you can + // use + // your own reservation for index-management jobs. + BASE_TABLE_TOO_LARGE = 14; + + // Indicates that the estimated performance gain from using the search index + // is too low for the given search query. + ESTIMATED_PERFORMANCE_GAIN_TOO_LOW = 15; + + // Indicates that search indexes can not be used for search query with + // STANDARD edition. + NOT_SUPPORTED_IN_STANDARD_EDITION = 17; + + // Indicates that an option in the search function that cannot make use of + // the index has been selected. + INDEX_SUPPRESSED_BY_FUNCTION_OPTION = 18; + + // Indicates that the query was cached, and thus the search index was not + // used. + QUERY_CACHE_HIT = 19; + + // The index cannot be used in the search query because it is stale. + STALE_INDEX = 20; + + // Indicates an internal error that causes the search index to be unused. + INTERNAL_ERROR = 10; + + // Indicates that the reason search indexes cannot be used in the query is + // not covered by any of the other IndexUnusedReason options. + OTHER_REASON = 16; + } + + // Specifies the high-level reason for the scenario when no search index was + // used. + optional Code code = 1; + + // Free form human-readable reason for the scenario when no search index was + // used. + optional string message = 2; + + // Specifies the base table involved in the reason that no search index was + // used. + optional TableReference base_table = 3; + + // Specifies the name of the unused search index, if available. + optional string index_name = 4; +} + +// Statistics for a search query. +// Populated as part of JobStatistics2. +message SearchStatistics { + // Indicates the type of search index usage in the entire search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No search indexes were used in the search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the search query used search indexes. See [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use search indexes. + PARTIALLY_USED = 2; + + // The entire search query used search indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Statistics for a vector search query. +// Populated as part of JobStatistics2. +message VectorSearchStatistics { + // Indicates the type of vector index usage in the entire vector search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No vector indexes were used in the vector search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the vector search query used vector indexes. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use vector indexes. + PARTIALLY_USED = 2; + + // The entire vector search query used vector indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the vector search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Query optimization information for a QUERY job. +message QueryInfo { + // Output only. Information about query optimizations. + google.protobuf.Struct optimization_details = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a LOAD query. +message LoadQueryStatistics { + // Output only. Number of source files in a LOAD query. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a LOAD query. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a LOAD query. + // Note that while a LOAD query is in the running state, this value may + // change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note that while a LOAD query + // is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered while processing a LOAD + // query. Note that if the job has failed because of more bad records + // encountered than the maximum allowed in the load job configuration, then + // this number can be less than the total number of bad records present in the + // input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a query job. +message JobStatistics2 { + // Output only. Describes execution plan for the query. + repeated ExplainQueryStage query_plan = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The original estimate of bytes processed for the job. + google.protobuf.Int64Value estimated_bytes_processed = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of partitions processed from all partitioned + // tables referenced in the job. + google.protobuf.Int64Value total_partitions_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For dry-run jobs, totalBytesProcessed is an estimate and this + // field specifies the accuracy of the estimate. Possible values can be: + // UNKNOWN: accuracy of the estimate is unknown. + // PRECISE: estimate is precise. + // LOWER_BOUND: estimate is lower bound of what the query would cost. + // UPPER_BOUND: estimate is upper bound of what the query would cost. + string total_bytes_processed_accuracy = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If the project is configured to use on-demand pricing, + // then this field contains the total bytes billed for the job. + // If the project is configured to use flat-rate pricing, then you are + // not billed for bytes and this field is informational only. + google.protobuf.Int64Value total_bytes_billed = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Billing tier for the job. This is a BigQuery-specific concept + // which is not related to the Google Cloud notion of "free tier". The value + // here is a measure of the query's resource consumption relative to the + // amount of data scanned. For on-demand queries, the limit is 100, and all + // queries within this limit are billed at the standard on-demand rates. + // On-demand queries that exceed this limit will fail with a + // billingTierLimitExceeded error. + google.protobuf.Int32Value billing_tier = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced tables for the job. Queries that reference more + // than 50 tables will not have a complete list. + repeated TableReference referenced_tables = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced routines for the job. + repeated RoutineReference referenced_routines = 24 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the results. Present only for successful dry + // run of non-legacy SQL queries. + TableSchema schema = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows affected by a DML statement. Present + // only for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 32 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. GoogleSQL only: list of undeclared query + // parameters detected during a dry run validation. + repeated QueryParameter undeclared_query_parameters = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The type of query statement, if valid. + // Possible values: + // + // * `SELECT`: + // [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) + // statement. + // * `ASSERT`: + // [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) + // statement. + // * `INSERT`: + // [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) + // statement. + // * `UPDATE`: + // [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) + // statement. + // * `DELETE`: + // [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `MERGE`: + // [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `CREATE_TABLE`: [`CREATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) + // statement, without `AS SELECT`. + // * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS + // SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) + // statement. + // * `CREATE_VIEW`: [`CREATE + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) + // statement. + // * `CREATE_MODEL`: [`CREATE + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) + // statement. + // * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) + // statement. + // * `CREATE_FUNCTION`: [`CREATE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) + // statement. + // * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) + // statement. + // * `CREATE_PROCEDURE`: [`CREATE + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) + // statement. + // * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS + // POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) + // statement. + // * `CREATE_SCHEMA`: [`CREATE + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) + // statement. + // * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) + // statement. + // * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) + // statement. + // * `DROP_TABLE`: [`DROP + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) + // statement. + // * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) + // statement. + // * `DROP_VIEW`: [`DROP + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) + // statement. + // * `DROP_MODEL`: [`DROP + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) + // statement. + // * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) + // statement. + // * `DROP_FUNCTION` : [`DROP + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) + // statement. + // * `DROP_TABLE_FUNCTION` : [`DROP TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) + // statement. + // * `DROP_PROCEDURE`: [`DROP + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) + // statement. + // * `DROP_SEARCH_INDEX`: [`DROP SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) + // statement. + // * `DROP_SCHEMA`: [`DROP + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) + // statement. + // * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) + // statement. + // * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS + // POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) + // statement. + // * `ALTER_TABLE`: [`ALTER + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) + // statement. + // * `ALTER_VIEW`: [`ALTER + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) + // statement. + // * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) + // statement. + // * `ALTER_SCHEMA`: [`ALTER + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) + // statement. + // * `SCRIPT`: + // [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). + // * `TRUNCATE_TABLE`: [`TRUNCATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) + // statement. + // * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) + // statement. + // * `EXPORT_DATA`: [`EXPORT + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) + // statement. + // * `EXPORT_MODEL`: [`EXPORT + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) + // statement. + // * `LOAD_DATA`: [`LOAD + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) + // statement. + // * `CALL`: + // [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) + // statement. + string statement_type = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL operation performed, possibly + // dependent on the pre-existence of the DDL target. + string ddl_operation_performed = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target table. Present only for + // CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. + TableReference ddl_target_table = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The table after rename. Present only for ALTER TABLE RENAME TO + // query. + TableReference ddl_destination_table = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target row access policy. Present only for + // CREATE/DROP ROW ACCESS POLICY queries. + RowAccessPolicyReference ddl_target_row_access_policy = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of row access policies affected by a DDL statement. + // Present only for DROP ALL ROW ACCESS POLICIES queries. + google.protobuf.Int64Value ddl_affected_row_access_policy_count = 27 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Beta] The DDL target routine. Present only for + // CREATE/DROP FUNCTION/PROCEDURE queries. + RoutineReference ddl_target_routine = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target dataset. Present only for CREATE/ALTER/DROP + // SCHEMA(dataset) queries. + DatasetReference ddl_target_dataset = 30 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a BigQuery ML training job. + MlStatistics ml_statistics = 23 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Stats for EXPORT DATA statement. + ExportDataStatistics export_data_statistics = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job cost breakdown as bigquery internal cost and external + // service costs. + repeated ExternalServiceCost external_service_costs = 28 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. BI Engine specific Statistics. + BiEngineStatistics bi_engine_statistics = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a LOAD query. + LoadQueryStatistics load_query_statistics = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced table for DCL statement. + TableReference dcl_target_table = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced view for DCL statement. + TableReference dcl_target_view = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced dataset for DCL statement. + DatasetReference dcl_target_dataset = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Search query specific statistics. + SearchStatistics search_statistics = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Vector Search query specific statistics. + VectorSearchStatistics vector_search_statistics = 44 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Performance insights. + PerformanceInsights performance_insights = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query optimization information for a QUERY job. + QueryInfo query_info = 39 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a Spark procedure job. + SparkStatistics spark_statistics = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes transferred for cross-cloud queries such as Cross + // Cloud Transfer and CREATE TABLE AS SELECT (CTAS). + google.protobuf.Int64Value transferred_bytes = 41 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of materialized views of a query job. + MaterializedViewStatistics materialized_view_statistics = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of metadata cache usage in a query for BigLake + // tables. + MetadataCacheStatistics metadata_cache_statistics = 43 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a load job. +message JobStatistics3 { + // Output only. Number of source files in a load job. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a load job. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a load job. + // Note that while an import job is in the running state, this + // value may change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note + // that while a load job is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered. Note that if the job + // has failed because of more bad records encountered than the maximum + // allowed in the load job configuration, then this number can be less than + // the total number of bad records present in the input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for an extract job. +message JobStatistics4 { + // Output only. Number of files per destination URI or URI pattern + // specified in the extract configuration. These values will be in the same + // order as the URIs specified in the 'destinationUris' field. + repeated int64 destination_uri_file_counts = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of user bytes extracted into the result. This is the + // byte count as computed by BigQuery for billing purposes + // and doesn't have any relationship with the number of actual + // result bytes extracted in the desired format. + google.protobuf.Int64Value input_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a copy job. +message CopyJobStatistics { + // Output only. Number of rows copied to the destination table. + google.protobuf.Int64Value copied_rows = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes copied to the destination table. + google.protobuf.Int64Value copied_logical_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to a BigQuery ML training job. +message MlStatistics { + // Training type. + enum TrainingType { + // Unspecified training type. + TRAINING_TYPE_UNSPECIFIED = 0; + + // Single training with fixed parameter space. + SINGLE_TRAINING = 1; + + // [Hyperparameter tuning + // training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + HPARAM_TUNING = 2; + } + + // Output only. Maximum number of iterations specified as max_iterations in + // the 'CREATE MODEL' query. The actual number of iterations may be less than + // this number due to early stop. + int64 max_iterations = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Results for all completed iterations. + // Empty for [hyperparameter tuning + // jobs](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + repeated Model.TrainingRun.IterationResult iteration_results = 2; + + // Output only. The type of the model that is being trained. + Model.ModelType model_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Training type of the job. + TrainingType training_type = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter tuning + // job](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // sorted by trial_id. + repeated Model.HparamTuningTrial hparam_trials = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to the child job of a script. +message ScriptStatistics { + // Describes how the job is evaluated. + enum EvaluationKind { + // Default value. + EVALUATION_KIND_UNSPECIFIED = 0; + + // The statement appears directly in the script. + STATEMENT = 1; + + // The statement evaluates an expression that appears in the script. + EXPRESSION = 2; + } + + // Represents the location of the statement/expression being evaluated. + // Line and column numbers are defined as follows: + // + // - Line and column numbers start with one. That is, line 1 column 1 denotes + // the start of the script. + // - When inside a stored procedure, all line/column numbers are relative + // to the procedure body, not the script in which the procedure was defined. + // - Start/end positions exclude leading/trailing comments and whitespace. + // The end position always ends with a ";", when present. + // - Multi-byte Unicode characters are treated as just one column. + // - If the original script (or procedure definition) contains TAB characters, + // a tab "snaps" the indentation forward to the nearest multiple of 8 + // characters, plus 1. For example, a TAB on column 1, 2, 3, 4, 5, 6 , or 8 + // will advance the next character to column 9. A TAB on column 9, 10, 11, + // 12, 13, 14, 15, or 16 will advance the next character to column 17. + message ScriptStackFrame { + // Output only. One-based start line. + int32 start_line = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based start column. + int32 start_column = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end line. + int32 end_line = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end column. + int32 end_column = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the active procedure, empty if in a top-level + // script. + string procedure_id = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Text of the current statement/expression. + string text = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Whether this child job was a statement or expression. + EvaluationKind evaluation_kind = 1; + + // Stack trace showing the line/column/procedure name of each frame on the + // stack at the point where the current evaluation happened. The leaf frame + // is first, the primary script is last. Never empty. + repeated ScriptStackFrame stack_frames = 2; +} + +// Statistics for row-level security. +message RowLevelSecurityStatistics { + // Whether any accessed data was protected by row access policies. + bool row_level_security_applied = 1; +} + +// Statistics for data-masking. +message DataMaskingStatistics { + // Whether any accessed data was protected by the data masking. + bool data_masking_applied = 1; +} + +// Statistics for a single job execution. +message JobStatistics { + // [Alpha] Information of a multi-statement transaction. + message TransactionInfo { + // Output only. [Alpha] Id of the transaction. + string transaction_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Creation time of this job, in milliseconds since the epoch. + // This field will be present on all jobs. + int64 creation_time = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Start time of this job, in milliseconds since the epoch. + // This field will be present when the job transitions from the PENDING state + // to either RUNNING or DONE. + int64 start_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. End time of this job, in milliseconds since the epoch. This + // field will be present whenever a job is in the DONE state. + int64 end_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [TrustedTester] Job progress (0.0 -> 1.0) for LOAD and + // EXTRACT jobs. + google.protobuf.DoubleValue completion_ratio = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Quotas which delayed this job's start time. + repeated string quota_deferments = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a query job. + JobStatistics2 query = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a load job. + JobStatistics3 load = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for an extract job. + JobStatistics4 extract = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a copy job. + CopyJobStatistics copy = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the primary reservation assigned to this job. Note + // that this could be different than reservations reported in the reservation + // usage field if parent reservations were used to execute this job. + string reservation_id = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of child jobs executed. + int64 num_child_jobs = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this is a child job, specifies the job ID of the parent. + string parent_job_id = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this a child job of a script, specifies information about + // the context of this job within the script. + ScriptStatistics script_statistics = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for row-level security. Present only for query and + // extract jobs. + RowLevelSecurityStatistics row_level_security_statistics = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for data-masking. Present only for query and + // extract jobs. + DataMaskingStatistics data_masking_statistics = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Alpha] Information of the multi-statement transaction if this + // job is part of one. + // + // This property is only expected on a child job or a job that is in a + // session. A script parent job is not part of the transaction started in the + // script. + TransactionInfo transaction_info = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The duration in milliseconds of the execution of the final + // attempt of this job, as BigQuery may internally re-attempt to execute the + // job. + int64 final_execution_duration_ms = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of edition corresponding to the reservation for this job + // at the time of this update. + ReservationEdition edition = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Detailed statistics for DML statements +message DmlStats { + // Output only. Number of inserted Rows. Populated by DML INSERT and MERGE + // statements + google.protobuf.Int64Value inserted_row_count = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of deleted Rows. populated by DML DELETE, MERGE and + // TRUNCATE statements. + google.protobuf.Int64Value deleted_row_count = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of updated Rows. Populated by DML UPDATE and MERGE + // statements. + google.protobuf.Int64Value updated_row_count = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights for the job. +message PerformanceInsights { + // Output only. Average execution ms of previous runs. Indicates the job ran + // slow compared to previous executions. To find previous executions, use + // INFORMATION_SCHEMA tables and filter jobs with same query hash. + int64 avg_previous_execution_ms = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Standalone query stage performance insights, for exploring + // potential improvements. + repeated StagePerformanceStandaloneInsight + stage_performance_standalone_insights = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query stage performance insights compared to previous runs, + // for diagnosing performance regression. + repeated StagePerformanceChangeInsight stage_performance_change_insights = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights compared to the previous executions for a specific +// stage. +message StagePerformanceChangeInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Input data change insight of the query stage. + optional InputDataChange input_data_change = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Details about the input data change insight. +message InputDataChange { + // Output only. Records read difference percentage compared to a previous run. + float records_read_diff_percentage = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Standalone performance insights for a specific stage. +message StagePerformanceStandaloneInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has a slot contention issue. + optional bool slot_contention = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has insufficient shuffle quota. + optional bool insufficient_shuffle_quota = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the stage had the following reasons for being + // disqualified from BI Engine execution. + repeated BiEngineReason bi_engine_reasons = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. High cardinality joins in the stage. + repeated HighCardinalityJoin high_cardinality_joins = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Partition skew in the stage. + optional PartitionSkew partition_skew = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// High cardinality join detailed information. +message HighCardinalityJoin { + // Output only. Count of left input rows. + int64 left_rows = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of right input rows. + int64 right_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of the output rows. + int64 output_rows = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The index of the join operator in the ExplainQueryStep lists. + int32 step_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Partition skew detailed information. +message PartitionSkew { + // Details about source stages which produce skewed data. + message SkewSource { + // Output only. Stage id of the skew source stage. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Source stages which produce skewed data. + repeated SkewSource skew_sources = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BigSpark query. +// Populated as part of JobStatistics2 +message SparkStatistics { + // Spark job logs can be filtered by these fields in Cloud Logging. + message LoggingInfo { + // Output only. Resource type used for logging. + string resource_type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Project ID where the Spark logs were written. + string project_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Spark job ID if a Spark job is created successfully. + optional string spark_job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Location where the Spark job is executed. + // A location is selected by BigQueury for jobs configured to run in a + // multi-region. + optional string spark_job_location = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Endpoints returned from Dataproc. + // Key list: + // - history_server_endpoint: A link to Spark job UI. + map endpoints = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Logging info is used to generate a link to Cloud Logging. + optional LoggingInfo logging_info = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The Cloud KMS encryption key that is used to protect the + // resources created by the Spark job. If the Spark procedure uses the invoker + // security mode, the Cloud KMS encryption key is either inferred from the + // provided system variable, + // `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery + // job's project (if the CMEK organization policy is enforced). Otherwise, the + // Cloud KMS key is either inferred from the Spark connection associated with + // the procedure (if it is provided), or from the default key of the Spark + // connection's project if the CMEK organization policy is enforced. + // + // Example: + // + // * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` + optional string kms_key_name = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Output only. The Google Cloud Storage bucket that is used as the default + // file system by the Spark application. This field is only filled when the + // Spark procedure uses the invoker security mode. The `gcsStagingBucket` + // bucket is inferred from the `@@spark_proc_properties.staging_bucket` system + // variable (if it is provided). Otherwise, BigQuery creates a default staging + // bucket for the job and returns the bucket name in this field. + // + // Example: + // + // * `gs://[bucket_name]` + optional string gcs_staging_bucket = 6 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; +} + +// Statistics of materialized views considered in a query job. +message MaterializedViewStatistics { + // Materialized views considered for the query job. Only certain materialized + // views are used. For a detailed list, see the child message. + // + // If many materialized views are considered, then the list might be + // incomplete. + repeated MaterializedView materialized_view = 1; +} + +// A materialized view considered for a query job. +message MaterializedView { + // Reason why a materialized view was not chosen for a query. For more + // information, see [Understand why materialized views were + // rejected](https://cloud.google.com/bigquery/docs/materialized-views-use#understand-rejected). + enum RejectedReason { + // Default unspecified value. + REJECTED_REASON_UNSPECIFIED = 0; + + // View has no cached data because it has not refreshed yet. + NO_DATA = 1; + + // The estimated cost of the view is more expensive than another view or the + // base table. + // + // Note: The estimate cost might not match the billed cost. + COST = 2; + + // View has no cached data because a base table is truncated. + BASE_TABLE_TRUNCATED = 3; + + // View is invalidated because of a data change in one or more base tables. + // It could be any recent change if the + // [`max_staleness`](https://cloud.google.com/bigquery/docs/materialized-views-create#max_staleness) + // option is not set for the view, or otherwise any change outside of the + // staleness window. + BASE_TABLE_DATA_CHANGE = 4; + + // View is invalidated because a base table's partition expiration has + // changed. + BASE_TABLE_PARTITION_EXPIRATION_CHANGE = 5; + + // View is invalidated because a base table's partition has expired. + BASE_TABLE_EXPIRED_PARTITION = 6; + + // View is invalidated because a base table has an incompatible metadata + // change. + BASE_TABLE_INCOMPATIBLE_METADATA_CHANGE = 7; + + // View is invalidated because it was refreshed with a time zone other than + // that of the current job. + TIME_ZONE = 8; + + // View is outside the time travel window. + OUT_OF_TIME_TRAVEL_WINDOW = 9; + + // View is inaccessible to the user because of a fine-grained security + // policy on one of its base tables. + BASE_TABLE_FINE_GRAINED_SECURITY_POLICY = 10; + + // One of the view's base tables is too stale. For example, the cached + // metadata of a BigLake external table needs to be updated. + BASE_TABLE_TOO_STALE = 11; + } + + // The candidate materialized view. + optional TableReference table_reference = 1; + + // Whether the materialized view is chosen for the query. + // + // A materialized view can be chosen to rewrite multiple parts of the same + // query. If a materialized view is chosen to rewrite any part of the query, + // then this field is true, even if the materialized view was not chosen to + // rewrite others parts. + optional bool chosen = 2; + + // If present, specifies a best-effort estimation of the bytes saved by using + // the materialized view rather than its base tables. + optional int64 estimated_bytes_saved = 3; + + // If present, specifies the reason why the materialized view was not chosen + // for the query. + optional RejectedReason rejected_reason = 4; +} + +// Table level detail on the usage of metadata caching. Only set for Metadata +// caching eligible tables referenced in the query. +message TableMetadataCacheUsage { + // Reasons for not using metadata caching. + enum UnusedReason { + // Unused reasons not specified. + UNUSED_REASON_UNSPECIFIED = 0; + + // Metadata cache was outside the table's maxStaleness. + EXCEEDED_MAX_STALENESS = 1; + + // Metadata caching feature is not enabled. [Update BigLake tables] + // (/bigquery/docs/create-cloud-storage-table-biglake#update-biglake-tables) + // to enable the metadata caching. + METADATA_CACHING_NOT_ENABLED = 3; + + // Other unknown reason. + OTHER_REASON = 2; + } + + // Metadata caching eligible table referenced in the query. + optional TableReference table_reference = 1; + + // Reason for not using metadata caching for the table. + optional UnusedReason unused_reason = 2; + + // Free form human-readable reason metadata caching was unused for + // the job. + optional string explanation = 3; + + // Duration since last refresh as of this job for managed tables (indicates + // metadata cache staleness as seen by this job). + google.protobuf.Duration staleness = 5; + + // [Table + // type](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type). + string table_type = 6; +} + +// Statistics for metadata caching in BigLake tables. +message MetadataCacheStatistics { + // Set for the Metadata caching eligible tables referenced in the query. + repeated TableMetadataCacheUsage table_metadata_cache_usage = 1; +} + +// The type of editions. +// Different features and behaviors are provided to different editions +// Capacity commitments and reservations are linked to editions. +enum ReservationEdition { + // Default value, which will be treated as ENTERPRISE. + RESERVATION_EDITION_UNSPECIFIED = 0; + + // Standard edition. + STANDARD = 1; + + // Enterprise edition. + ENTERPRISE = 2; + + // Enterprise plus edition. + ENTERPRISE_PLUS = 3; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_status.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_status.proto.baseline new file mode 100755 index 000000000..71f0a33dc --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/job_status.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/error.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatusProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message JobStatus { + // Output only. Final error result of the job. If present, indicates that the + // job has completed and was unsuccessful. + ErrorProto error_result = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The first errors encountered during the running of the job. + // The final message includes the number of errors that caused the process to + // stop. Errors here do not necessarily mean that the job has not completed or + // was unsuccessful. + repeated ErrorProto errors = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Running state of the job. Valid states include 'PENDING', + // 'RUNNING', and 'DONE'. + string state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/json_extension.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/json_extension.proto.baseline new file mode 100755 index 000000000..49338d746 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/json_extension.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JsonExtensionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Used to indicate that a JSON variant, rather than normal JSON, is being used +// as the source_format. This should only be used in combination with the +// JSON source format. +enum JsonExtension { + // The default if provided value is not one included in the enum, or the value + // is not specified. The source formate is parsed without any modification. + JSON_EXTENSION_UNSPECIFIED = 0; + + // Use GeoJSON variant of JSON. See https://tools.ietf.org/html/rfc7946. + GEOJSON = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline new file mode 100755 index 000000000..391bd5ae4 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "LocationMetadataProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// BigQuery-specific metadata about a location. This will be set on +// google.cloud.location.Location.metadata in Cloud Location API +// responses. +message LocationMetadata { + // The legacy BigQuery location ID, e.g. “EU” for the “europe” location. + // This is for any API consumers that need the legacy “US” and “EU” locations. + string legacy_location_id = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline new file mode 100755 index 000000000..dc66e7d7a --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "MapTargetTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Indicates the map target type. Only applies to parquet maps. +enum MapTargetType { + // In this mode, the map will have the following schema: + // struct map_field_name { repeated struct key_value { key value } }. + MAP_TARGET_TYPE_UNSPECIFIED = 0; + + // In this mode, the map will have the following schema: + // repeated struct map_field_name { key value }. + ARRAY_OF_STRUCT = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model.proto.baseline new file mode 100755 index 000000000..dc3311876 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model.proto.baseline @@ -0,0 +1,2040 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Model Service. +// +// It should not be relied on for production use cases at this time. +service ModelService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified model resource by model ID. + rpc GetModel(GetModelRequest) returns (Model) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } + + // Lists all models in the specified dataset. Requires the READER dataset + // role. After retrieving the list of models, you can get information about a + // particular model by calling the models.get method. + rpc ListModels(ListModelsRequest) returns (ListModelsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models" + }; + option (google.api.method_signature) = "project_id,dataset_id,max_results"; + } + + // Patch specific fields in the specified model. + rpc PatchModel(PatchModelRequest) returns (Model) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + body: "model" + }; + option (google.api.method_signature) = + "project_id,dataset_id,model_id,model"; + } + + // Deletes the model specified by modelId from the dataset. + rpc DeleteModel(DeleteModelRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } +} + +// Remote Model Info +message RemoteModelInfo { + // Supported service type for remote model. + enum RemoteServiceType { + // Unspecified remote service type. + REMOTE_SERVICE_TYPE_UNSPECIFIED = 0; + + // V3 Cloud AI Translation API. See more details at [Cloud Translation API] + // (https://cloud.google.com/translate/docs/reference/rest). + CLOUD_AI_TRANSLATE_V3 = 1; + + // V1 Cloud AI Vision API See more details at [Cloud Vision API] + // (https://cloud.google.com/vision/docs/reference/rest). + CLOUD_AI_VISION_V1 = 2; + + // V1 Cloud AI Natural Language API. See more details at [REST Resource: + // documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents). + CLOUD_AI_NATURAL_LANGUAGE_V1 = 3; + + // V2 Speech-to-Text API. See more details at [Google Cloud Speech-to-Text + // V2 API](https://cloud.google.com/speech-to-text/v2/docs) + CLOUD_AI_SPEECH_TO_TEXT_V2 = 7; + } + + // Remote services are services outside of BigQuery used by remote models for + // predictions. A remote service is backed by either an arbitrary endpoint or + // a selected remote service type, but not both. + oneof remote_service { + // Output only. The endpoint for remote model. + string endpoint = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The remote service type for remote model. + RemoteServiceType remote_service_type = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Fully qualified name of the user-provided connection object of + // the remote model. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Max number of rows in each batch sent to the remote service. + // If unset, the number of rows in each batch is set dynamically. + int64 max_batching_rows = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The model version for LLM. + string remote_model_version = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The name of the speech recognizer to use for speech + // recognition. The expected format is + // `projects/{project}/locations/{location}/recognizers/{recognizer}`. + // Customers can specify this field at model creation. If not specified, a + // default recognizer `projects/{model + // project}/locations/global/recognizers/_` will be used. See more details at + // [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) + string speech_recognizer = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single transform column. +message TransformColumn { + // Output only. Name of the column. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data type of the column after the transform. + StandardSqlDataType type = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The SQL expression used in the column transform. + string transform_sql = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Model { + // Indicates the type of the Model. + enum ModelType { + // Default value. + MODEL_TYPE_UNSPECIFIED = 0; + + // Linear regression model. + LINEAR_REGRESSION = 1; + + // Logistic regression based classification model. + LOGISTIC_REGRESSION = 2; + + // K-means clustering model. + KMEANS = 3; + + // Matrix factorization model. + MATRIX_FACTORIZATION = 4; + + // DNN classifier model. + DNN_CLASSIFIER = 5; + + // An imported TensorFlow model. + TENSORFLOW = 6; + + // DNN regressor model. + DNN_REGRESSOR = 7; + + // An imported XGBoost model. + XGBOOST = 8; + + // Boosted tree regressor model. + BOOSTED_TREE_REGRESSOR = 9; + + // Boosted tree classifier model. + BOOSTED_TREE_CLASSIFIER = 10; + + // ARIMA model. + ARIMA = 11; + + // AutoML Tables regression model. + AUTOML_REGRESSOR = 12; + + // AutoML Tables classification model. + AUTOML_CLASSIFIER = 13; + + // Prinpical Component Analysis model. + PCA = 14; + + // Wide-and-deep classifier model. + DNN_LINEAR_COMBINED_CLASSIFIER = 16; + + // Wide-and-deep regressor model. + DNN_LINEAR_COMBINED_REGRESSOR = 17; + + // Autoencoder model. + AUTOENCODER = 18; + + // New name for the ARIMA model. + ARIMA_PLUS = 19; + + // ARIMA with external regressors. + ARIMA_PLUS_XREG = 23; + + // Random forest regressor model. + RANDOM_FOREST_REGRESSOR = 24; + + // Random forest classifier model. + RANDOM_FOREST_CLASSIFIER = 25; + + // An imported TensorFlow Lite model. + TENSORFLOW_LITE = 26; + + // An imported ONNX model. + ONNX = 28; + + // Model to capture the columns and logic in the TRANSFORM clause along with + // statistics useful for ML analytic functions. + TRANSFORM_ONLY = 29; + } + + // Loss metric to evaluate model training performance. + enum LossType { + // Default value. + LOSS_TYPE_UNSPECIFIED = 0; + + // Mean squared loss, used for linear regression. + MEAN_SQUARED_LOSS = 1; + + // Mean log loss, used for logistic regression. + MEAN_LOG_LOSS = 2; + } + + // Distance metric used to compute the distance between two points. + enum DistanceType { + // Default value. + DISTANCE_TYPE_UNSPECIFIED = 0; + + // Eculidean distance. + EUCLIDEAN = 1; + + // Cosine distance. + COSINE = 2; + } + + // Indicates the method to split input data into multiple tables. + enum DataSplitMethod { + // Default value. + DATA_SPLIT_METHOD_UNSPECIFIED = 0; + + // Splits data randomly. + RANDOM = 1; + + // Splits data with the user provided tags. + CUSTOM = 2; + + // Splits data sequentially. + SEQUENTIAL = 3; + + // Data split will be skipped. + NO_SPLIT = 4; + + // Splits data automatically: Uses NO_SPLIT if the data size is small. + // Otherwise uses RANDOM. + AUTO_SPLIT = 5; + } + + // Type of supported data frequency for time series forecasting models. + enum DataFrequency { + // Default value. + DATA_FREQUENCY_UNSPECIFIED = 0; + + // Automatically inferred from timestamps. + AUTO_FREQUENCY = 1; + + // Yearly data. + YEARLY = 2; + + // Quarterly data. + QUARTERLY = 3; + + // Monthly data. + MONTHLY = 4; + + // Weekly data. + WEEKLY = 5; + + // Daily data. + DAILY = 6; + + // Hourly data. + HOURLY = 7; + + // Per-minute data. + PER_MINUTE = 8; + } + + // Type of supported holiday regions for time series forecasting models. + enum HolidayRegion { + // Holiday region unspecified. + HOLIDAY_REGION_UNSPECIFIED = 0; + + // Global. + GLOBAL = 1; + + // North America. + NA = 2; + + // Japan and Asia Pacific: Korea, Greater China, India, Australia, and New + // Zealand. + JAPAC = 3; + + // Europe, the Middle East and Africa. + EMEA = 4; + + // Latin America and the Caribbean. + LAC = 5; + + // United Arab Emirates + AE = 6; + + // Argentina + AR = 7; + + // Austria + AT = 8; + + // Australia + AU = 9; + + // Belgium + BE = 10; + + // Brazil + BR = 11; + + // Canada + CA = 12; + + // Switzerland + CH = 13; + + // Chile + CL = 14; + + // China + CN = 15; + + // Colombia + CO = 16; + + // Czechoslovakia + CS = 17; + + // Czech Republic + CZ = 18; + + // Germany + DE = 19; + + // Denmark + DK = 20; + + // Algeria + DZ = 21; + + // Ecuador + EC = 22; + + // Estonia + EE = 23; + + // Egypt + EG = 24; + + // Spain + ES = 25; + + // Finland + FI = 26; + + // France + FR = 27; + + // Great Britain (United Kingdom) + GB = 28; + + // Greece + GR = 29; + + // Hong Kong + HK = 30; + + // Hungary + HU = 31; + + // Indonesia + ID = 32; + + // Ireland + IE = 33; + + // Israel + IL = 34; + + // India + IN = 35; + + // Iran + IR = 36; + + // Italy + IT = 37; + + // Japan + JP = 38; + + // Korea (South) + KR = 39; + + // Latvia + LV = 40; + + // Morocco + MA = 41; + + // Mexico + MX = 42; + + // Malaysia + MY = 43; + + // Nigeria + NG = 44; + + // Netherlands + NL = 45; + + // Norway + NO = 46; + + // New Zealand + NZ = 47; + + // Peru + PE = 48; + + // Philippines + PH = 49; + + // Pakistan + PK = 50; + + // Poland + PL = 51; + + // Portugal + PT = 52; + + // Romania + RO = 53; + + // Serbia + RS = 54; + + // Russian Federation + RU = 55; + + // Saudi Arabia + SA = 56; + + // Sweden + SE = 57; + + // Singapore + SG = 58; + + // Slovenia + SI = 59; + + // Slovakia + SK = 60; + + // Thailand + TH = 61; + + // Turkey + TR = 62; + + // Taiwan + TW = 63; + + // Ukraine + UA = 64; + + // United States + US = 65; + + // Venezuela + VE = 66; + + // Viet Nam + VN = 67; + + // South Africa + ZA = 68; + } + + // Enums for seasonal period. + message SeasonalPeriod { + // Seasonal period type. + enum SeasonalPeriodType { + // Unspecified seasonal period. + SEASONAL_PERIOD_TYPE_UNSPECIFIED = 0; + + // No seasonality + NO_SEASONALITY = 1; + + // Daily period, 24 hours. + DAILY = 2; + + // Weekly period, 7 days. + WEEKLY = 3; + + // Monthly period, 30 days or irregular. + MONTHLY = 4; + + // Quarterly period, 90 days or irregular. + QUARTERLY = 5; + + // Yearly period, 365 days or irregular. + YEARLY = 6; + } + } + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + enum ColorSpace { + // Unspecified color space + COLOR_SPACE_UNSPECIFIED = 0; + + // RGB + RGB = 1; + + // HSV + HSV = 2; + + // YIQ + YIQ = 3; + + // YUV + YUV = 4; + + // GRAYSCALE + GRAYSCALE = 5; + } + + // Enums for kmeans model type. + message KmeansEnums { + // Indicates the method used to initialize the centroids for KMeans + // clustering algorithm. + enum KmeansInitializationMethod { + // Unspecified initialization method. + KMEANS_INITIALIZATION_METHOD_UNSPECIFIED = 0; + + // Initializes the centroids randomly. + RANDOM = 1; + + // Initializes the centroids using data specified in + // kmeans_initialization_column. + CUSTOM = 2; + + // Initializes with kmeans++. + KMEANS_PLUS_PLUS = 3; + } + } + + // Enums for XGBoost model type. + message BoostedTreeOptionEnums { + // Booster types supported. Refer to booster parameter in XGBoost. + enum BoosterType { + // Unspecified booster type. + BOOSTER_TYPE_UNSPECIFIED = 0; + + // Gbtree booster. + GBTREE = 1; + + // Dart booster. + DART = 2; + } + + // Type of normalization algorithm for boosted tree models using dart + // booster. Refer to normalize_type in XGBoost. + enum DartNormalizeType { + // Unspecified dart normalize type. + DART_NORMALIZE_TYPE_UNSPECIFIED = 0; + + // New trees have the same weight of each of dropped trees. + TREE = 1; + + // New trees have the same weight of sum of dropped trees. + FOREST = 2; + } + + // Tree construction algorithm used in boosted tree models. + // Refer to tree_method in XGBoost. + enum TreeMethod { + // Unspecified tree method. + TREE_METHOD_UNSPECIFIED = 0; + + // Use heuristic to choose the fastest method. + AUTO = 1; + + // Exact greedy algorithm. + EXACT = 2; + + // Approximate greedy algorithm using quantile sketch and gradient + // histogram. + APPROX = 3; + + // Fast histogram optimized approximate greedy algorithm. + HIST = 4; + } + } + + // Enums for hyperparameter tuning. + message HparamTuningEnums { + // Available evaluation metrics used as hyperparameter tuning objectives. + enum HparamTuningObjective { + // Unspecified evaluation metric. + HPARAM_TUNING_OBJECTIVE_UNSPECIFIED = 0; + + // Mean absolute error. + // mean_absolute_error = AVG(ABS(label - predicted)) + MEAN_ABSOLUTE_ERROR = 1; + + // Mean squared error. + // mean_squared_error = AVG(POW(label - predicted, 2)) + MEAN_SQUARED_ERROR = 2; + + // Mean squared log error. + // mean_squared_log_error = AVG(POW(LN(1 + label) - LN(1 + predicted), 2)) + MEAN_SQUARED_LOG_ERROR = 3; + + // Mean absolute error. + // median_absolute_error = APPROX_QUANTILES(absolute_error, 2)[OFFSET(1)] + MEDIAN_ABSOLUTE_ERROR = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + // r_squared = 1 - SUM(squared_error)/(COUNT(label)*VAR_POP(label)) + R_SQUARED = 5; + + // Explained variance. + // explained_variance = 1 - VAR_POP(label_error)/VAR_POP(label) + EXPLAINED_VARIANCE = 6; + + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged metric + // treating each class as a binary classifier. + PRECISION = 7; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + RECALL = 8; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a globally micro-averaged metric. + ACCURACY = 9; + + // The F1 score is an average of recall and precision. For multiclass this + // is a macro-averaged metric. + F1_SCORE = 10; + + // Logorithmic Loss. For multiclass this is a macro-averaged metric. + LOG_LOSS = 11; + + // Area Under an ROC Curve. For multiclass this is a macro-averaged + // metric. + ROC_AUC = 12; + + // Davies-Bouldin Index. + DAVIES_BOULDIN_INDEX = 13; + + // Mean Average Precision. + MEAN_AVERAGE_PRECISION = 14; + + // Normalized Discounted Cumulative Gain. + NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN = 15; + + // Average Rank. + AVERAGE_RANK = 16; + } + } + + // Indicates the learning rate optimization strategy to use. + enum LearnRateStrategy { + // Default value. + LEARN_RATE_STRATEGY_UNSPECIFIED = 0; + + // Use line search to determine learning rate. + LINE_SEARCH = 1; + + // Use a constant learning rate. + CONSTANT = 2; + } + + // Indicates the optimization strategy used for training. + enum OptimizationStrategy { + // Default value. + OPTIMIZATION_STRATEGY_UNSPECIFIED = 0; + + // Uses an iterative batch gradient descent algorithm. + BATCH_GRADIENT_DESCENT = 1; + + // Uses a normal equation to solve linear regression problem. + NORMAL_EQUATION = 2; + } + + // Indicates the training algorithm to use for matrix factorization models. + enum FeedbackType { + // Default value. + FEEDBACK_TYPE_UNSPECIFIED = 0; + + // Use weighted-als for implicit feedback problems. + IMPLICIT = 1; + + // Use nonweighted-als for explicit feedback problems. + EXPLICIT = 2; + } + + // Evaluation metrics for regression and explicit feedback type matrix + // factorization models. + message RegressionMetrics { + // Mean absolute error. + google.protobuf.DoubleValue mean_absolute_error = 1; + + // Mean squared error. + google.protobuf.DoubleValue mean_squared_error = 2; + + // Mean squared log error. + google.protobuf.DoubleValue mean_squared_log_error = 3; + + // Median absolute error. + google.protobuf.DoubleValue median_absolute_error = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + google.protobuf.DoubleValue r_squared = 5; + } + + // Aggregate metrics for classification/classifier models. For multi-class + // models, the metrics are either macro-averaged or micro-averaged. When + // macro-averaged, the metrics are calculated for each label and then an + // unweighted average is taken of those values. When micro-averaged, the + // metric is calculated globally by counting the total number of correctly + // predicted rows. + message AggregateClassificationMetrics { + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged + // metric treating each class as a binary classifier. + google.protobuf.DoubleValue precision = 1; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue recall = 2; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a micro-averaged metric. + google.protobuf.DoubleValue accuracy = 3; + + // Threshold at which the metrics are computed. For binary + // classification models this is the positive class threshold. + // For multi-class classfication models this is the confidence + // threshold. + google.protobuf.DoubleValue threshold = 4; + + // The F1 score is an average of recall and precision. For multiclass + // this is a macro-averaged metric. + google.protobuf.DoubleValue f1_score = 5; + + // Logarithmic Loss. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue log_loss = 6; + + // Area Under a ROC Curve. For multiclass this is a macro-averaged + // metric. + google.protobuf.DoubleValue roc_auc = 7; + } + + // Evaluation metrics for binary classification/classifier models. + message BinaryClassificationMetrics { + // Confusion matrix for binary classification models. + message BinaryConfusionMatrix { + // Threshold value used when computing each of the following metric. + google.protobuf.DoubleValue positive_class_threshold = 1; + + // Number of true samples predicted as true. + google.protobuf.Int64Value true_positives = 2; + + // Number of false samples predicted as true. + google.protobuf.Int64Value false_positives = 3; + + // Number of true samples predicted as false. + google.protobuf.Int64Value true_negatives = 4; + + // Number of false samples predicted as false. + google.protobuf.Int64Value false_negatives = 5; + + // The fraction of actual positive predictions that had positive actual + // labels. + google.protobuf.DoubleValue precision = 6; + + // The fraction of actual positive labels that were given a positive + // prediction. + google.protobuf.DoubleValue recall = 7; + + // The equally weighted average of recall and precision. + google.protobuf.DoubleValue f1_score = 8; + + // The fraction of predictions given the correct label. + google.protobuf.DoubleValue accuracy = 9; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Binary confusion matrix at multiple thresholds. + repeated BinaryConfusionMatrix binary_confusion_matrix_list = 2; + + // Label representing the positive class. + string positive_label = 3; + + // Label representing the negative class. + string negative_label = 4; + } + + // Evaluation metrics for multi-class classification/classifier models. + message MultiClassClassificationMetrics { + // Confusion matrix for multi-class classification models. + message ConfusionMatrix { + // A single entry in the confusion matrix. + message Entry { + // The predicted label. For confidence_threshold > 0, we will + // also add an entry indicating the number of items under the + // confidence threshold. + string predicted_label = 1; + + // Number of items being predicted as this label. + google.protobuf.Int64Value item_count = 2; + } + + // A single row in the confusion matrix. + message Row { + // The original label of this row. + string actual_label = 1; + + // Info describing predicted label distribution. + repeated Entry entries = 2; + } + + // Confidence threshold used when computing the entries of the + // confusion matrix. + google.protobuf.DoubleValue confidence_threshold = 1; + + // One row per actual label. + repeated Row rows = 2; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Confusion matrix at different thresholds. + repeated ConfusionMatrix confusion_matrix_list = 2; + } + + // Evaluation metrics for clustering models. + message ClusteringMetrics { + // Message containing the information about one cluster. + message Cluster { + // Representative value of a single feature within the cluster. + message FeatureValue { + // Representative value of a categorical feature. + message CategoricalValue { + // Represents the count of a single category within the cluster. + message CategoryCount { + // The name of category. + string category = 1; + + // The count of training samples matching the category within the + // cluster. + google.protobuf.Int64Value count = 2; + } + + // Counts of all categories for the categorical feature. If there are + // more than ten categories, we return top ten (by count) and return + // one more CategoryCount with category "_OTHER_" and count as + // aggregate counts of remaining categories. + repeated CategoryCount category_counts = 1; + } + + // The feature column name. + string feature_column = 1; + + // Value. + oneof value { + // The numerical feature value. This is the centroid value for this + // feature. + google.protobuf.DoubleValue numerical_value = 2; + + // The categorical feature value. + CategoricalValue categorical_value = 3; + } + } + + // Centroid id. + int64 centroid_id = 1; + + // Values of highly variant features for this cluster. + repeated FeatureValue feature_values = 2; + + // Count of training data rows that were assigned to this cluster. + google.protobuf.Int64Value count = 3; + } + + // Davies-Bouldin index. + google.protobuf.DoubleValue davies_bouldin_index = 1; + + // Mean of squared distances between each sample to its cluster centroid. + google.protobuf.DoubleValue mean_squared_distance = 2; + + // Information for all clusters. + repeated Cluster clusters = 3; + } + + // Evaluation metrics used by weighted-ALS models specified by + // feedback_type=implicit. + message RankingMetrics { + // Calculates a precision per user for all the items by ranking them and + // then averages all the precisions across all the users. + google.protobuf.DoubleValue mean_average_precision = 1; + + // Similar to the mean squared error computed in regression and explicit + // recommendation models except instead of computing the rating directly, + // the output from evaluate is computed against a preference which is 1 or 0 + // depending on if the rating exists or not. + google.protobuf.DoubleValue mean_squared_error = 2; + + // A metric to determine the goodness of a ranking calculated from the + // predicted confidence by comparing it to an ideal rank measured by the + // original ratings. + google.protobuf.DoubleValue normalized_discounted_cumulative_gain = 3; + + // Determines the goodness of a ranking by computing the percentile rank + // from the predicted confidence and dividing it by the original rank. + google.protobuf.DoubleValue average_rank = 4; + } + + // Model evaluation metrics for ARIMA forecasting models. + message ArimaForecastingMetrics { + // Model evaluation metrics for a single ARIMA forecasting model. + message ArimaSingleModelForecastingMetrics { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 2; + + // Is arima model fitted with drift or not. It is always false when d + // is not 1. + google.protobuf.BoolValue has_drift = 3; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 4; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 9; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 5; + + // If true, holiday_effect is a part of time series decomposition result. + google.protobuf.BoolValue has_holiday_effect = 6; + + // If true, spikes_and_dips is a part of time series decomposition result. + google.protobuf.BoolValue has_spikes_and_dips = 7; + + // If true, step_changes is a part of time series decomposition result. + google.protobuf.BoolValue has_step_changes = 8; + } + + // Repeated as there can be many metric sets (one for each model) in + // auto-arima and the large-scale case. + repeated ArimaSingleModelForecastingMetrics + arima_single_model_forecasting_metrics = 6; + } + + // Model evaluation metrics for dimensionality reduction models. + message DimensionalityReductionMetrics { + // Total percentage of variance explained by the selected principal + // components. + google.protobuf.DoubleValue total_explained_variance_ratio = 1; + } + + // Evaluation metrics of a model. These are either computed on all training + // data or just the eval data based on whether eval data was used during + // training. These are not present for imported models. + message EvaluationMetrics { + // Metrics. + oneof metrics { + // Populated for regression models and explicit feedback type matrix + // factorization models. + RegressionMetrics regression_metrics = 1; + + // Populated for binary classification/classifier models. + BinaryClassificationMetrics binary_classification_metrics = 2; + + // Populated for multi-class classification/classifier models. + MultiClassClassificationMetrics multi_class_classification_metrics = 3; + + // Populated for clustering models. + ClusteringMetrics clustering_metrics = 4; + + // Populated for implicit feedback type matrix factorization models. + RankingMetrics ranking_metrics = 5; + + // Populated for ARIMA models. + ArimaForecastingMetrics arima_forecasting_metrics = 6; + + // Evaluation metrics when the model is a dimensionality reduction model, + // which currently includes PCA. + DimensionalityReductionMetrics dimensionality_reduction_metrics = 7; + } + } + + // Data split result. This contains references to the training and evaluation + // data tables that were used to train the model. + message DataSplitResult { + // Table reference of the training data after split. + TableReference training_table = 1; + + // Table reference of the evaluation data after split. + TableReference evaluation_table = 2; + + // Table reference of the test data after split. + TableReference test_table = 3; + } + + // Arima order, can be used for both non-seasonal and seasonal parts. + message ArimaOrder { + // Order of the autoregressive part. + google.protobuf.Int64Value p = 1; + + // Order of the differencing part. + google.protobuf.Int64Value d = 2; + + // Order of the moving-average part. + google.protobuf.Int64Value q = 3; + } + + // ARIMA model fitting metrics. + message ArimaFittingMetrics { + // Log-likelihood. + google.protobuf.DoubleValue log_likelihood = 1; + + // AIC. + google.protobuf.DoubleValue aic = 2; + + // Variance. + google.protobuf.DoubleValue variance = 3; + } + + // Global explanations containing the top most important features + // after training. + message GlobalExplanation { + // Explanation for a single feature. + message Explanation { + // The full feature name. For non-numerical features, will be formatted + // like `.`. Overall size of feature + // name will always be truncated to first 120 characters. + string feature_name = 1; + + // Attribution of feature. + google.protobuf.DoubleValue attribution = 2; + } + + // A list of the top global explanations. Sorted by absolute value of + // attribution in descending order. + repeated Explanation explanations = 1; + + // Class label for this set of global explanations. Will be empty/null for + // binary logistic and linear regression models. Sorted alphabetically in + // descending order. + string class_label = 2; + } + + // Encoding methods for categorical features. + message CategoryEncodingMethod { + // Supported encoding methods for categorical features. + enum EncodingMethod { + // Unspecified encoding method. + ENCODING_METHOD_UNSPECIFIED = 0; + + // Applies one-hot encoding. + ONE_HOT_ENCODING = 1; + + // Applies label encoding. + LABEL_ENCODING = 2; + + // Applies dummy encoding. + DUMMY_ENCODING = 3; + } + } + + // PCA solver options. + message PcaSolverOptionEnums { + // Enums for supported PCA solvers. + enum PcaSolver { + // Default value. + UNSPECIFIED = 0; + + // Full eigen-decoposition. + FULL = 1; + + // Randomized SVD. + RANDOMIZED = 2; + + // Auto. + AUTO = 3; + } + } + + // Model registry options. + message ModelRegistryOptionEnums { + // Enums for supported model registries. + enum ModelRegistry { + // Default value. + MODEL_REGISTRY_UNSPECIFIED = 0; + + // Vertex AI. + VERTEX_AI = 1; + } + } + + // Information about a single training query run for the model. + message TrainingRun { + // Options used in model training. + message TrainingOptions { + // The maximum number of iterations in training. Used only for iterative + // training algorithms. + int64 max_iterations = 1; + + // Type of loss function used during training run. + LossType loss_type = 2; + + // Learning rate in training. Used only for iterative training algorithms. + double learn_rate = 3; + + // L1 regularization coefficient. + google.protobuf.DoubleValue l1_regularization = 4; + + // L2 regularization coefficient. + google.protobuf.DoubleValue l2_regularization = 5; + + // When early_stop is true, stops training when accuracy improvement is + // less than 'min_relative_progress'. Used only for iterative training + // algorithms. + google.protobuf.DoubleValue min_relative_progress = 6; + + // Whether to train a model from the last checkpoint. + google.protobuf.BoolValue warm_start = 7; + + // Whether to stop early when the loss doesn't improve significantly + // any more (compared to min_relative_progress). Used only for iterative + // training algorithms. + google.protobuf.BoolValue early_stop = 8; + + // Name of input label columns in training data. + repeated string input_label_columns = 9; + + // The data split type for training and evaluation, e.g. RANDOM. + DataSplitMethod data_split_method = 10; + + // The fraction of evaluation data over the whole input data. The rest + // of data will be used as training data. The format should be double. + // Accurate to two decimal places. + // Default value is 0.2. + double data_split_eval_fraction = 11; + + // The column to split data with. This column won't be used as a + // feature. + // 1. When data_split_method is CUSTOM, the corresponding column should + // be boolean. The rows with true value tag are eval data, and the false + // are training data. + // 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION + // rows (from smallest to largest) in the corresponding column are used + // as training data, and the rest are eval data. It respects the order + // in Orderable data types: + // https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties + string data_split_column = 12; + + // The strategy to determine learn rate for the current iteration. + LearnRateStrategy learn_rate_strategy = 13; + + // Specifies the initial learning rate for the line search learn rate + // strategy. + double initial_learn_rate = 16; + + // Weights associated with each label class, for rebalancing the + // training data. Only applicable for classification models. + map label_class_weights = 17; + + // User column specified for matrix factorization models. + string user_column = 18; + + // Item column specified for matrix factorization models. + string item_column = 19; + + // Distance type for clustering models. + DistanceType distance_type = 20; + + // Number of clusters for clustering models. + int64 num_clusters = 21; + + // Google Cloud Storage URI from which the model was imported. Only + // applicable for imported models. + string model_uri = 22; + + // Optimization strategy for training linear regression models. + OptimizationStrategy optimization_strategy = 23; + + // Hidden units for dnn models. + repeated int64 hidden_units = 24; + + // Batch size for dnn models. + int64 batch_size = 25; + + // Dropout probability for dnn models. + google.protobuf.DoubleValue dropout = 26; + + // Maximum depth of a tree for boosted tree models. + int64 max_tree_depth = 27; + + // Subsample fraction of the training data to grow tree to prevent + // overfitting for boosted tree models. + double subsample = 28; + + // Minimum split loss for boosted tree models. + google.protobuf.DoubleValue min_split_loss = 29; + + // Booster type for boosted tree models. + BoostedTreeOptionEnums.BoosterType booster_type = 60; + + // Number of parallel trees constructed during each iteration for boosted + // tree models. + google.protobuf.Int64Value num_parallel_tree = 61; + + // Type of normalization algorithm for boosted tree models using + // dart booster. + BoostedTreeOptionEnums.DartNormalizeType dart_normalize_type = 62; + + // Tree construction algorithm for boosted tree models. + BoostedTreeOptionEnums.TreeMethod tree_method = 63; + + // Minimum sum of instance weight needed in a child for boosted tree + // models. + google.protobuf.Int64Value min_tree_child_weight = 64; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + google.protobuf.DoubleValue colsample_bytree = 65; + + // Subsample ratio of columns for each level for boosted tree models. + google.protobuf.DoubleValue colsample_bylevel = 66; + + // Subsample ratio of columns for each node(split) for boosted tree + // models. + google.protobuf.DoubleValue colsample_bynode = 67; + + // Num factors specified for matrix factorization models. + int64 num_factors = 30; + + // Feedback type that specifies which algorithm to run for matrix + // factorization. + FeedbackType feedback_type = 31; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + google.protobuf.DoubleValue wals_alpha = 32; + + // The method used to initialize the centroids for kmeans algorithm. + KmeansEnums.KmeansInitializationMethod kmeans_initialization_method = 33; + + // The column used to provide the initial centroids for kmeans algorithm + // when kmeans_initialization_method is CUSTOM. + string kmeans_initialization_column = 34; + + // Column to be designated as time series timestamp for ARIMA model. + string time_series_timestamp_column = 35; + + // Column to be designated as time series data for ARIMA model. + string time_series_data_column = 36; + + // Whether to enable auto ARIMA or not. + google.protobuf.BoolValue auto_arima = 37; + + // A specification of the non-seasonal part of the ARIMA model: the three + // components (p, d, q) are the AR order, the degree of differencing, and + // the MA order. + ArimaOrder non_seasonal_order = 38; + + // The data frequency of a time series. + DataFrequency data_frequency = 39; + + // Whether or not p-value test should be computed for this model. Only + // available for linear and logistic regression models. + google.protobuf.BoolValue calculate_p_values = 40; + + // Include drift when fitting an ARIMA model. + google.protobuf.BoolValue include_drift = 41; + + // The geographical region based on which the holidays are considered in + // time series modeling. If a valid value is specified, then holiday + // effects modeling is enabled. + HolidayRegion holiday_region = 42; + + // A list of geographical regions that are used for time series modeling. + repeated HolidayRegion holiday_regions = 71; + + // The time series id column that was used during ARIMA model training. + string time_series_id_column = 43; + + // The time series id columns that were used during ARIMA model training. + repeated string time_series_id_columns = 51; + + // The number of periods ahead that need to be forecasted. + int64 horizon = 44; + + // The max value of the sum of non-seasonal p and q. + int64 auto_arima_max_order = 46; + + // The min value of the sum of non-seasonal p and q. + int64 auto_arima_min_order = 83; + + // Number of trials to run this hyperparameter tuning job. + int64 num_trials = 47; + + // Maximum number of trials to run in parallel. + int64 max_parallel_trials = 48; + + // The target evaluation metrics to optimize the hyperparameters for. + repeated HparamTuningEnums.HparamTuningObjective + hparam_tuning_objectives = 54; + + // If true, perform decompose time series and save the results. + google.protobuf.BoolValue decompose_time_series = 50; + + // If true, clean spikes and dips in the input time series. + google.protobuf.BoolValue clean_spikes_and_dips = 52; + + // If true, detect step changes and make data adjustment in the input time + // series. + google.protobuf.BoolValue adjust_step_changes = 53; + + // If true, enable global explanation during training. + google.protobuf.BoolValue enable_global_explain = 55; + + // Number of paths for the sampled Shapley explain method. + int64 sampled_shapley_num_paths = 56; + + // Number of integral steps for the integrated gradients explain method. + int64 integrated_gradients_num_steps = 57; + + // Categorical feature encoding method. + CategoryEncodingMethod.EncodingMethod category_encoding_method = 58; + + // Based on the selected TF version, the corresponding docker image is + // used to train external models. + string tf_version = 70; + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + ColorSpace color_space = 72; + + // Name of the instance weight column for training data. + // This column isn't be used as a feature. + string instance_weight_column = 73; + + // Smoothing window size for the trend component. When a positive value is + // specified, a center moving average smoothing is applied on the history + // trend. When the smoothing window is out of the boundary at the + // beginning or the end of the trend, the first element or the last + // element is padded to fill the smoothing window before the average is + // applied. + int64 trend_smoothing_window_size = 74; + + // The fraction of the interpolated length of the time series that's used + // to model the time series trend component. All of the time points of the + // time series are used to model the non-trend component. This training + // option accelerates modeling training without sacrificing much + // forecasting accuracy. You can use this option with + // `minTimeSeriesLength` but not with `maxTimeSeriesLength`. + double time_series_length_fraction = 75; + + // The minimum number of time points in a time series that are used in + // modeling the trend component of the time series. If you use this option + // you must also set the `timeSeriesLengthFraction` option. This training + // option ensures that enough time points are available when you use + // `timeSeriesLengthFraction` in trend modeling. This is particularly + // important when forecasting multiple time series in a single query using + // `timeSeriesIdColumn`. If the total number of time points is less than + // the `minTimeSeriesLength` value, then the query uses all available time + // points. + int64 min_time_series_length = 76; + + // The maximum number of time points in a time series that can be used in + // modeling the trend component of the time series. Don't use this option + // with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. + int64 max_time_series_length = 77; + + // User-selected XGBoost versions for training of XGBoost models. + string xgboost_version = 78; + + // Whether to use approximate feature contribution method in XGBoost model + // explanation for global explain. + google.protobuf.BoolValue approx_global_feature_contrib = 84; + + // Whether the model should include intercept during model training. + google.protobuf.BoolValue fit_intercept = 85; + + // Number of principal components to keep in the PCA model. Must be <= the + // number of features. + int64 num_principal_components = 86; + + // The minimum ratio of cumulative explained variance that needs to be + // given by the PCA model. + double pca_explained_variance_ratio = 87; + + // If true, scale the feature values by dividing the feature standard + // deviation. Currently only apply to PCA. + google.protobuf.BoolValue scale_features = 88; + + // The solver for PCA. + PcaSolverOptionEnums.PcaSolver pca_solver = 89; + + // Whether to calculate class weights automatically based on the + // popularity of each label. + google.protobuf.BoolValue auto_class_weights = 90; + + // Activation function of the neural nets. + string activation_fn = 91; + + // Optimizer used for training the neural nets. + string optimizer = 92; + + // Budget in hours for AutoML training. + double budget_hours = 93; + + // Whether to standardize numerical features. Default to true. + google.protobuf.BoolValue standardize_features = 94; + + // L1 regularization coefficient to activations. + double l1_reg_activation = 95; + + // The model registry. + ModelRegistryOptionEnums.ModelRegistry model_registry = 96; + + // The version aliases to apply in Vertex AI model registry. Always + // overwrite if the version aliases exists in a existing model. + repeated string vertex_ai_model_version_aliases = 97; + } + + // Information about a single iteration of the training run. + message IterationResult { + // Information about a single cluster for clustering model. + message ClusterInfo { + // Centroid id. + int64 centroid_id = 1; + + // Cluster radius, the average distance from centroid + // to each point assigned to the cluster. + google.protobuf.DoubleValue cluster_radius = 2; + + // Cluster size, the total number of points assigned to the cluster. + google.protobuf.Int64Value cluster_size = 3; + } + + // (Auto-)arima fitting result. Wrap everything in ArimaResult for easier + // refactoring if we want to use model-specific iteration results. + message ArimaResult { + // Arima coefficients. + message ArimaCoefficients { + // Auto-regressive coefficients, an array of double. + repeated double auto_regressive_coefficients = 1; + + // Moving-average coefficients, an array of double. + repeated double moving_average_coefficients = 2; + + // Intercept coefficient, just a double not an array. + google.protobuf.DoubleValue intercept_coefficient = 3; + } + + // Arima model information. + message ArimaModelInfo { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima coefficients. + ArimaCoefficients arima_coefficients = 2; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 3; + + // Whether Arima model fitted with drift or not. It is always false + // when d is not 1. + google.protobuf.BoolValue has_drift = 4; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 5; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 10; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 6; + + // If true, holiday_effect is a part of time series decomposition + // result. + google.protobuf.BoolValue has_holiday_effect = 7; + + // If true, spikes_and_dips is a part of time series decomposition + // result. + google.protobuf.BoolValue has_spikes_and_dips = 8; + + // If true, step_changes is a part of time series decomposition + // result. + google.protobuf.BoolValue has_step_changes = 9; + } + + // This message is repeated because there are multiple arima models + // fitted in auto-arima. For non-auto-arima model, its size is one. + repeated ArimaModelInfo arima_model_info = 1; + + // Seasonal periods. Repeated because multiple periods are supported for + // one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 2; + } + + // Principal component infos, used only for eigen decomposition based + // models, e.g., PCA. Ordered by explained_variance in the descending + // order. + message PrincipalComponentInfo { + // Id of the principal component. + google.protobuf.Int64Value principal_component_id = 1; + + // Explained variance by this principal component, which is simply the + // eigenvalue. + google.protobuf.DoubleValue explained_variance = 2; + + // Explained_variance over the total explained variance. + google.protobuf.DoubleValue explained_variance_ratio = 3; + + // The explained_variance is pre-ordered in the descending order to + // compute the cumulative explained variance ratio. + google.protobuf.DoubleValue cumulative_explained_variance_ratio = 4; + } + + // Index of the iteration, 0 based. + google.protobuf.Int32Value index = 1; + + // Time taken to run the iteration in milliseconds. + google.protobuf.Int64Value duration_ms = 4; + + // Loss computed on the training data at the end of iteration. + google.protobuf.DoubleValue training_loss = 5; + + // Loss computed on the eval data at the end of iteration. + google.protobuf.DoubleValue eval_loss = 6; + + // Learn rate used for this iteration. + double learn_rate = 7; + + // Information about top clusters for clustering models. + repeated ClusterInfo cluster_infos = 8; + + // Arima result. + ArimaResult arima_result = 9; + + // The information of the principal components. + repeated PrincipalComponentInfo principal_component_infos = 10; + } + + // Output only. Options that were used for this training run, includes + // user specified and default options that were used. + TrainingOptions training_options = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The start time of this training run. + google.protobuf.Timestamp start_time = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Output of each iteration run, results.size() <= + // max_iterations. + repeated IterationResult results = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The evaluation metrics over training/eval data that were + // computed at the end of training. + EvaluationMetrics evaluation_metrics = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data split result of the training run. Only set when the + // input data is actually split. + DataSplitResult data_split_result = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the model level. Applies to both regression and classification models. + GlobalExplanation model_level_global_explanation = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the class level. Applies to classification models only. + repeated GlobalExplanation class_level_global_explanations = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The model id in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_id = 14; + + // Output only. The model version in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_version = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Search space for a double hyperparameter. + message DoubleHparamSearchSpace { + // Range of a double hyperparameter. + message DoubleRange { + // Min value of the double parameter. + google.protobuf.DoubleValue min = 1; + + // Max value of the double parameter. + google.protobuf.DoubleValue max = 2; + } + + // Discrete candidates of a double hyperparameter. + message DoubleCandidates { + // Candidates for the double parameter in increasing order. + repeated google.protobuf.DoubleValue candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the double hyperparameter. + DoubleRange range = 1; + + // Candidates of the double hyperparameter. + DoubleCandidates candidates = 2; + } + } + + // Search space for an int hyperparameter. + message IntHparamSearchSpace { + // Range of an int hyperparameter. + message IntRange { + // Min value of the int parameter. + google.protobuf.Int64Value min = 1; + + // Max value of the int parameter. + google.protobuf.Int64Value max = 2; + } + + // Discrete candidates of an int hyperparameter. + message IntCandidates { + // Candidates for the int parameter in increasing order. + repeated google.protobuf.Int64Value candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the int hyperparameter. + IntRange range = 1; + + // Candidates of the int hyperparameter. + IntCandidates candidates = 2; + } + } + + // Search space for string and enum. + message StringHparamSearchSpace { + // Canididates for the string or enum parameter in lower case. + repeated string candidates = 1; + } + + // Search space for int array. + message IntArrayHparamSearchSpace { + // An array of int. + message IntArray { + // Elements in the int array. + repeated int64 elements = 1; + } + + // Candidates for the int array parameter. + repeated IntArray candidates = 1; + } + + // Hyperparameter search spaces. + // These should be a subset of training_options. + message HparamSearchSpaces { + // Learning rate of training jobs. + DoubleHparamSearchSpace learn_rate = 2; + + // L1 regularization coefficient. + DoubleHparamSearchSpace l1_reg = 3; + + // L2 regularization coefficient. + DoubleHparamSearchSpace l2_reg = 4; + + // Number of clusters for k-means. + IntHparamSearchSpace num_clusters = 26; + + // Number of latent factors to train on. + IntHparamSearchSpace num_factors = 31; + + // Hidden units for neural network models. + IntArrayHparamSearchSpace hidden_units = 34; + + // Mini batch sample size. + IntHparamSearchSpace batch_size = 37; + + // Dropout probability for dnn model training and boosted tree models + // using dart booster. + DoubleHparamSearchSpace dropout = 38; + + // Maximum depth of a tree for boosted tree models. + IntHparamSearchSpace max_tree_depth = 41; + + // Subsample the training data to grow tree to prevent overfitting for + // boosted tree models. + DoubleHparamSearchSpace subsample = 42; + + // Minimum split loss for boosted tree models. + DoubleHparamSearchSpace min_split_loss = 43; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + DoubleHparamSearchSpace wals_alpha = 49; + + // Booster type for boosted tree models. + StringHparamSearchSpace booster_type = 56; + + // Number of parallel trees for boosted tree models. + IntHparamSearchSpace num_parallel_tree = 57; + + // Dart normalization type for boosted tree models. + StringHparamSearchSpace dart_normalize_type = 58; + + // Tree construction algorithm for boosted tree models. + StringHparamSearchSpace tree_method = 59; + + // Minimum sum of instance weight needed in a child for boosted tree models. + IntHparamSearchSpace min_tree_child_weight = 60; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + DoubleHparamSearchSpace colsample_bytree = 61; + + // Subsample ratio of columns for each level for boosted tree models. + DoubleHparamSearchSpace colsample_bylevel = 62; + + // Subsample ratio of columns for each node(split) for boosted tree models. + DoubleHparamSearchSpace colsample_bynode = 63; + + // Activation functions of neural network models. + StringHparamSearchSpace activation_fn = 67; + + // Optimizer of TF models. + StringHparamSearchSpace optimizer = 68; + } + + // Training info of a trial in [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models. + message HparamTuningTrial { + // Current status of the trial. + enum TrialStatus { + // Default value. + TRIAL_STATUS_UNSPECIFIED = 0; + + // Scheduled but not started. + NOT_STARTED = 1; + + // Running state. + RUNNING = 2; + + // The trial succeeded. + SUCCEEDED = 3; + + // The trial failed. + FAILED = 4; + + // The trial is infeasible due to the invalid params. + INFEASIBLE = 5; + + // Trial stopped early because it's not promising. + STOPPED_EARLY = 6; + } + + // 1-based index of the trial. + int64 trial_id = 1; + + // Starting time of the trial. + int64 start_time_ms = 2; + + // Ending time of the trial. + int64 end_time_ms = 3; + + // The hyperprameters selected for this trial. + TrainingRun.TrainingOptions hparams = 4; + + // Evaluation metrics of this trial calculated on the test data. + // Empty in Job API. + EvaluationMetrics evaluation_metrics = 5; + + // The status of the trial. + TrialStatus status = 6; + + // Error message for FAILED and INFEASIBLE trial. + string error_message = 7; + + // Loss computed on the training data at the end of trial. + google.protobuf.DoubleValue training_loss = 8; + + // Loss computed on the eval data at the end of trial. + google.protobuf.DoubleValue eval_loss = 9; + + // Hyperparameter tuning evaluation metrics of this trial calculated on the + // eval data. Unlike evaluation_metrics, only the fields corresponding to + // the hparam_tuning_objectives are set. + EvaluationMetrics hparam_tuning_evaluation_metrics = 10; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Unique identifier for this model. + ModelReference model_reference = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this model was created, in millisecs since the + // epoch. + int64 creation_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this model was last modified, in millisecs since + // the epoch. + int64 last_modified_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. A user-friendly description of this model. + string description = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A descriptive name for this model. + string friendly_name = 14 [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this model. You can use these to organize + // and group your models. Label keys and values can be no longer + // than 63 characters, can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // Label values are optional. Label keys must start with a letter and each + // label in the list must have a different key. + map labels = 15; + + // Optional. The time when this model expires, in milliseconds since the + // epoch. If not present, the model will persist indefinitely. Expired models + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created models. + int64 expiration_time = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the model resides. This value + // is inherited from the dataset. + string location = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). This shows the + // encryption configuration of the model data while stored in BigQuery + // storage. This field can be used with PatchModel to update encryption key + // for an already encrypted model. + EncryptionConfiguration encryption_configuration = 17; + + // Output only. Type of the model resource. + ModelType model_type = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Information for all training runs in increasing order of start_time. + repeated TrainingRun training_runs = 9; + + // Output only. Input feature columns for the model inference. If the model is + // trained with TRANSFORM clause, these are the input of the TRANSFORM clause. + repeated StandardSqlField feature_columns = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Label columns that were used to train this model. + // The output of the model will have a "predicted_" prefix to these columns. + repeated StandardSqlField label_columns = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This field will be populated if a TRANSFORM clause was used to + // train a model. TRANSFORM clause (if used) takes feature_columns as input + // and outputs transform_columns. transform_columns then are used to train the + // model. + repeated TransformColumn transform_columns = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. All hyperparameter search spaces in this model. + HparamSearchSpaces hparam_search_spaces = 18 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The default trial_id to use in TVFs when the trial_id is not + // passed in. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the best trial ID. For multi-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the smallest trial ID among all Pareto optimal trials. + int64 default_trial_id = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // model sorted by trial_id. + repeated HparamTuningTrial hparam_trials = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it only contains the best trial. For multi-objective + // [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it contains all Pareto optimal trials sorted by trial_id. + repeated int64 optimal_trial_ids = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Remote model info + RemoteModelInfo remote_model_info = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a BigQuery ML model. +message GetModelRequest { + // Required. Project ID of the requested model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the requested model. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +message PatchModelRequest { + // Required. Project ID of the model to patch. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to patch. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to patch. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. Patched model. + // Follows RFC5789 patch semantics. Missing fields are not updated. + // To clear a field, explicitly set to default value. + Model model = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting BigQuery ML models. +message DeleteModelRequest { + // Required. Project ID of the model to delete. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to delete. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to delete. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for listing BigQuery ML models. +message ListModelsRequest { + // Required. Project ID of the models to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the models to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call to request the next page of + // results + string page_token = 4; +} + +// Response format for a single page when listing BigQuery ML models. +message ListModelsResponse { + // Models in the requested dataset. Only the following fields are populated: + // model_reference, model_type, creation_time, last_modified_time and + // labels. + repeated Model models = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model_reference.proto.baseline new file mode 100755 index 000000000..9f190254e --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/model_reference.proto.baseline @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a model. +message ModelReference { + // Required. The ID of the project containing this model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the model. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline new file mode 100755 index 000000000..f331cb56a --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "PartitioningDefinitionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The partitioning information, which includes managed table, external table +// and metastore partitioned table partition information. +message PartitioningDefinition { + // Optional. Details about each partitioning column. This field is output only + // for all partitioning types other than metastore partitioned tables. + // BigQuery native tables only support 1 partitioning column. Other table + // types may support 0, 1 or more partitioning columns. + // For metastore partitioned tables, the order must match the definition order + // in the Hive Metastore, where it must match the physical layout of the + // table. For example, + // + // CREATE TABLE a_table(id BIGINT, name STRING) + // PARTITIONED BY (city STRING, state STRING). + // + // In this case the values must be ['city', 'state'] in that order. + repeated PartitionedColumn partitioned_column = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The partitioning column information. +message PartitionedColumn { + // Required. The name of the partition column. + optional string field = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline new file mode 100755 index 000000000..35f40a0a0 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline @@ -0,0 +1,169 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "PrivacyPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents privacy policy associated with "aggregation threshold" method. +message AggregationThresholdPolicy { + // Optional. The threshold for the "aggregation threshold" policy. + optional int64 threshold = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column(s) associated with this policy. + // For now, only one column per data source object (table, view) is allowed as + // a privacy unit column. + // Representing as a repeated field in metadata for extensibility to + // multiple columns in future. + // Duplicates and Repeated struct fields are not allowed. + // For nested fields, use dot notation ("outer.inner") + repeated string privacy_unit_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy associated with "differential privacy" method. +message DifferentialPrivacyPolicy { + // Optional. The maximum epsilon value that a query can consume. If the + // subscriber specifies epsilon as a parameter in a SELECT query, it must be + // less than or equal to this value. The epsilon parameter controls the amount + // of noise that is added to the groups — a higher epsilon means less noise. + optional double max_epsilon_per_query = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The delta value that is used per query. Delta represents the + // probability that any row will fail to be epsilon differentially private. + // Indicates the risk associated with exposing aggregate rows in the result of + // a query. + optional double delta_per_query = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum groups contributed value that is used per query. + // Represents the maximum number of groups to which each protected entity can + // contribute. Changing this value does not improve or worsen privacy. The + // best value for accuracy and utility depends on the query and data. + optional int64 max_groups_contributed = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column associated with this policy. Differential + // privacy policies can only have one privacy unit column per data source + // object (table, view). + optional string privacy_unit_column = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total epsilon budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of epsilon they request in their query. If there is sufficient + // budget, then the subscriber query attempts to complete. It might still fail + // due to other reasons, in which case the charge is refunded. If there is + // insufficient budget the query is rejected. There might be multiple charge + // attempts if a single query references multiple views. In this case there + // must be sufficient budget for all charges or the query is rejected and + // charges are refunded in best effort. The budget does not have a refresh + // policy and can only be updated via ALTER VIEW or circumvented by creating a + // new view that can be queried with a fresh budget. + optional double epsilon_budget = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total delta budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of delta that is pre-defined by the contributor through the privacy + // policy delta_per_query field. If there is sufficient budget, then the + // subscriber query attempts to complete. It might still fail due to other + // reasons, in which case the charge is refunded. If there is insufficient + // budget the query is rejected. There might be multiple charge attempts if a + // single query references multiple views. In this case there must be + // sufficient budget for all charges or the query is rejected and charges are + // refunded in best effort. The budget does not have a refresh policy and can + // only be updated via ALTER VIEW or circumvented by creating a new view that + // can be queried with a fresh budget. + optional double delta_budget = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The epsilon budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double epsilon_budget_remaining = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The delta budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double delta_budget_remaining = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents privacy policy associated with "join restrictions". Join +// restriction gives data providers the ability to enforce joins on the +// 'join_allowed_columns' when data is queried from a privacy protected view. +message JoinRestrictionPolicy { + // Enum for Join Restrictions policy. + enum JoinCondition { + // A join is neither required nor restricted on any column. Default value. + JOIN_CONDITION_UNSPECIFIED = 0; + + // A join is required on at least one of the specified columns. + JOIN_ANY = 1; + + // A join is required on all specified columns. + JOIN_ALL = 2; + + // A join is not required, but if present it is only permitted on + // 'join_allowed_columns' + JOIN_NOT_REQUIRED = 3; + + // Joins are blocked for all queries. + JOIN_BLOCKED = 4; + } + + // Optional. Specifies if a join is required or not on queries for the view. + // Default is JOIN_CONDITION_UNSPECIFIED. + optional JoinCondition join_condition = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The only columns that joins are allowed on. + // This field is must be specified for join_conditions JOIN_ANY and JOIN_ALL + // and it cannot be set for JOIN_BLOCKED. + repeated string join_allowed_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy that contains the privacy requirements specified by +// the data owner. Currently, this is only supported on views. +message PrivacyPolicy { + // Privacy policy associated with this requirement specification. Only one of + // the privacy methods is allowed per data source object. + oneof privacy_policy { + // Optional. Policy used for aggregation thresholds. + AggregationThresholdPolicy aggregation_threshold_policy = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Policy used for differential privacy. + DifferentialPrivacyPolicy differential_privacy_policy = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Join restriction policy is outside of the one of policies, since + // this policy can be set along with other policies. This policy gives data + // providers the ability to enforce joins on the 'join_allowed_columns' when + // data is queried from a privacy protected view. + optional JoinRestrictionPolicy join_restriction_policy = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/project.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/project.proto.baseline new file mode 100755 index 000000000..f04d3962e --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/project.proto.baseline @@ -0,0 +1,61 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ProjectProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Project Service. +// +// It should not be relied on for production use cases at this time. +service ProjectService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // RPC to get the service account for a project used for interactions with + // Google Cloud KMS + rpc GetServiceAccount(GetServiceAccountRequest) + returns (GetServiceAccountResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/serviceAccount" + }; + } +} + +// Request object of GetServiceAccount +message GetServiceAccountRequest { + // Required. ID of the project. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Response object of GetServiceAccount +message GetServiceAccountResponse { + // The resource type of the response. + string kind = 1; + + // The service account email address. + string email = 2; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline new file mode 100755 index 000000000..e65a95b80 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline @@ -0,0 +1,101 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "QueryParameterProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The type of a struct parameter. +message QueryParameterStructType { + // Optional. The name of this field. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this field. + QueryParameterType type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Human-oriented description of the field. + string description = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The type of a query parameter. +message QueryParameterType { + // Required. The top level type of this field. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The type of the array's elements, if this is an array. + QueryParameterType array_type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The types of the fields of this struct, in order, if this is a + // struct. + repeated QueryParameterStructType struct_types = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The element type of the range, if this is a range. + QueryParameterType range_element_type = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents the value of a range. +message RangeValue { + // Optional. The start value of the range. A missing value represents an + // unbounded start. + QueryParameterValue start = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The end value of the range. A missing value represents an + // unbounded end. + QueryParameterValue end = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The value of a query parameter. +message QueryParameterValue { + // Optional. The value of this value, if a simple scalar type. + google.protobuf.StringValue value = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The array values, if this is an array type. + repeated QueryParameterValue array_values = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // The struct field values. + map struct_values = 3; + + // Optional. The range value, if this is a range type. + RangeValue range_value = 6 [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated google.protobuf.Value alt_struct_values = 5; +} + +// A parameter given to a query. +message QueryParameter { + // Optional. If unset, this is a positional parameter. Otherwise, should be + // unique within a query. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this parameter. + QueryParameterType parameter_type = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The value of this parameter. + QueryParameterValue parameter_value = 3 + [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline new file mode 100755 index 000000000..1cfded0c9 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline @@ -0,0 +1,47 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RangePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RangePartitioning { + // Defines the ranges for range partitioning. + message Range { + // Required. The start of range partitioning, inclusive. This field is an + // INT64 value represented as a string. + string start = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The end of range partitioning, exclusive. This field is an + // INT64 value represented as a string. + string end = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The width of each interval. This field is an INT64 value + // represented as a string. + string interval = 3 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The name of the column to partition the table on. It must be a + // top-level, INT64 column whose mode is NULLABLE or REQUIRED. + string field = 1 [(google.api.field_behavior) = REQUIRED]; + + // Defines the ranges for range partitioning. + Range range = 2; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline new file mode 100755 index 000000000..5d9422db3 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RestrictionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RestrictionConfig { + // RestrictionType specifies the type of dataset/table restriction. + enum RestrictionType { + // Should never be used. + RESTRICTION_TYPE_UNSPECIFIED = 0; + + // Restrict data egress. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RESTRICTED_DATA_EGRESS = 1; + } + + // Output only. Specifies the type of dataset/table restriction. + RestrictionType type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine.proto.baseline new file mode 100755 index 000000000..352b74524 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine.proto.baseline @@ -0,0 +1,540 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Routine Service. +// +// It should not be relied on for production use cases at this time. +service RoutineService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified routine resource by routine ID. + rpc GetRoutine(GetRoutineRequest) returns (Routine) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Creates a new routine in the dataset. + rpc InsertRoutine(InsertRoutineRequest) returns (Routine) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + body: "routine" + }; + } + + // Updates information in an existing routine. The update method replaces the + // entire Routine resource. + rpc UpdateRoutine(UpdateRoutineRequest) returns (Routine) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + body: "routine" + }; + } + + // Patches information in an existing routine. The patch method does a partial + // update to an existing Routine resource. + rpc PatchRoutine(PatchRoutineRequest) returns (Routine) {} + + // Deletes the routine specified by routineId from the dataset. + rpc DeleteRoutine(DeleteRoutineRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Lists all routines in the specified dataset. Requires the READER dataset + // role. + rpc ListRoutines(ListRoutinesRequest) returns (ListRoutinesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + }; + } +} + +// A user-defined function or a stored procedure. +message Routine { + // The fine-grained type of the routine. + enum RoutineType { + // Default value. + ROUTINE_TYPE_UNSPECIFIED = 0; + + // Non-built-in persistent scalar function. + SCALAR_FUNCTION = 1; + + // Stored procedure. + PROCEDURE = 2; + + // Non-built-in persistent TVF. + TABLE_VALUED_FUNCTION = 3; + + // Non-built-in persistent aggregate function. + AGGREGATE_FUNCTION = 4; + } + + // The language of the routine. + enum Language { + // Default value. + LANGUAGE_UNSPECIFIED = 0; + + // SQL language. + SQL = 1; + + // JavaScript language. + JAVASCRIPT = 2; + + // Python language. + PYTHON = 3; + + // Java language. + JAVA = 4; + + // Scala language. + SCALA = 5; + } + + // Input/output argument of a function or a stored procedure. + message Argument { + // Represents the kind of a given argument. + enum ArgumentKind { + // Default value. + ARGUMENT_KIND_UNSPECIFIED = 0; + + // The argument is a variable with fully specified type, which can be a + // struct or an array, but not a table. + FIXED_TYPE = 1; + + // The argument is any type, including struct or array, but not a table. + // To be added: FIXED_TABLE, ANY_TABLE + ANY_TYPE = 2; + } + + // The input/output mode of the argument. + enum Mode { + // Default value. + MODE_UNSPECIFIED = 0; + + // The argument is input-only. + IN = 1; + + // The argument is output-only. + OUT = 2; + + // The argument is both an input and an output. + INOUT = 3; + } + + // Optional. The name of this argument. Can be absent for function return + // argument. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defaults to FIXED_TYPE. + ArgumentKind argument_kind = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the argument is input or output. + // Can be set for procedures only. + Mode mode = 3; + + // Required unless argument_kind = ANY_TYPE. + StandardSqlDataType data_type = 4; + + // Optional. Whether the argument is an aggregate function parameter. + // Must be Unset for routine types other than AGGREGATE_FUNCTION. + // For AGGREGATE_FUNCTION, if set to false, it is equivalent to adding "NOT + // AGGREGATE" clause in DDL; Otherwise, it is equivalent to omitting "NOT + // AGGREGATE" clause in DDL. + google.protobuf.BoolValue is_aggregate = 6 + [(google.api.field_behavior) = OPTIONAL]; + } + + // JavaScript UDF determinism levels. + // + // If all JavaScript UDFs are DETERMINISTIC, the query result is + // potentially cachable (see below). If any JavaScript UDF is + // NOT_DETERMINISTIC, the query result is not cacheable. + // + // Even if a JavaScript UDF is deterministic, many other factors can prevent + // usage of cached query results. Example factors include but not limited to: + // DDL/DML, non-deterministic SQL function calls, update of referenced + // tables/views/UDFs or imported JavaScript libraries. + // + // SQL UDFs cannot have determinism specified. Their determinism is + // automatically determined. + enum DeterminismLevel { + // The determinism of the UDF is unspecified. + DETERMINISM_LEVEL_UNSPECIFIED = 0; + + // The UDF is deterministic, meaning that 2 function calls with the same + // inputs always produce the same result, even across 2 query runs. + DETERMINISTIC = 1; + + // The UDF is not deterministic. + NOT_DETERMINISTIC = 2; + } + + // Security mode. + enum SecurityMode { + // The security mode of the routine is unspecified. + SECURITY_MODE_UNSPECIFIED = 0; + + // The routine is to be executed with the privileges of the user who + // defines it. + DEFINER = 1; + + // The routine is to be executed with the privileges of the user who + // invokes it. + INVOKER = 2; + } + + // Options for a remote user-defined function. + message RemoteFunctionOptions { + // Endpoint of the user-provided remote service, e.g. + // ```https://us-east1-my_gcf_project.cloudfunctions.net/remote_add``` + string endpoint = 1; + + // Fully qualified name of the user-provided connection object which holds + // the authentication information to send requests to the remote service. + // Format: + // ```"projects/{projectId}/locations/{locationId}/connections/{connectionId}"``` + string connection = 2; + + // User-defined context as a set of key/value pairs, which will be sent as + // function invocation context together with batched arguments in the + // requests to the remote service. The total number of bytes of keys and + // values must be less than 8KB. + map user_defined_context = 3; + + // Max number of rows in each batch sent to the remote service. + // If absent or if 0, BigQuery dynamically decides the number of rows in a + // batch. + int64 max_batching_rows = 4; + } + + // Data governance type values. Only supports `DATA_MASKING`. + enum DataGovernanceType { + // The data governance type is unspecified. + DATA_GOVERNANCE_TYPE_UNSPECIFIED = 0; + + // The data governance type is data masking. + DATA_MASKING = 1; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this routine. + RoutineReference routine_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The type of routine. + RoutineType routine_type = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this routine was created, in milliseconds since + // the epoch. + int64 creation_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this routine was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defaults to "SQL" if remote_function_options field is absent, not + // set otherwise. + Language language = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. + repeated Argument arguments = 7; + + // Optional if language = "SQL"; required otherwise. + // Cannot be set if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return type is inferred from definition_body at query time + // in each query that references this routine. If present, then the evaluated + // result will be cast to the specified returned type at query time. + // + // For example, for the functions created with the following statements: + // + // * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` + // + // * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` + // + // * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` + // + // The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and + // is absent for `Increment` (inferred as FLOAT64 at query time). + // + // Suppose the function `Add` is replaced by + // `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` + // + // Then the inferred return type of `Increment` is automatically changed to + // INT64 at query time, while the return type of `Decrement` remains FLOAT64. + StandardSqlDataType return_type = 10; + + // Optional. Can be set only if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return table type is inferred from definition_body at query + // time in each query that references this routine. If present, then the + // columns in the evaluated table result will be cast to match the column + // types specified in return table type, at query time. + StandardSqlTableType return_table_type = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If language = "JAVASCRIPT", this field stores the path of the + // imported JAVASCRIPT libraries. + repeated string imported_libraries = 8; + + // Required. The body of the routine. + // + // For functions, this is the expression in the AS clause. + // + // If language=SQL, it is the substring inside (but excluding) the + // parentheses. For example, for the function created with the following + // statement: + // + // `CREATE FUNCTION JoinLines(x string, y string) as (concat(x, "\n", y))` + // + // The definition_body is `concat(x, "\n", y)` (\n is not replaced with + // linebreak). + // + // If language=JAVASCRIPT, it is the evaluated string in the AS clause. + // For example, for the function created with the following statement: + // + // `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return "\n";\n'` + // + // The definition_body is + // + // `return "\n";\n` + // + // Note that both \n are replaced with linebreaks. + string definition_body = 9; + + // Optional. The description of the routine, if defined. + string description = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The determinism level of the JavaScript UDF, if defined. + DeterminismLevel determinism_level = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The security mode of the routine, if defined. If not defined, the + // security mode is automatically determined from the routine's configuration. + SecurityMode security_mode = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Use this option to catch many common errors. Error checking is + // not exhaustive, and successfully creating a procedure doesn't guarantee + // that the procedure will successfully execute at runtime. If `strictMode` is + // set to `TRUE`, the procedure body is further checked for errors such as + // non-existent tables or columns. The `CREATE PROCEDURE` statement fails if + // the body fails any of these checks. + // + // If `strictMode` is set to `FALSE`, the procedure body is checked only for + // syntax. For procedures that invoke themselves recursively, specify + // `strictMode=FALSE` to avoid non-existent procedure errors during + // validation. + // + // Default value is `TRUE`. + google.protobuf.BoolValue strict_mode = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Remote function specific options. + RemoteFunctionOptions remote_function_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Spark specific options. + SparkOptions spark_options = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `DATA_MASKING`, the function is validated and made + // available as a masking function. For more information, see [Create custom + // masking + // routines](https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask). + DataGovernanceType data_governance_type = 17 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options for a user-defined Spark routine. +message SparkOptions { + // Fully qualified name of the user-provided Spark connection object. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 1; + + // Runtime version. If not specified, the default runtime version is used. + string runtime_version = 2; + + // Custom container image for the runtime environment. + string container_image = 3; + + // Configuration properties as a set of key/value pairs, which will be passed + // on to the Spark application. For more information, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html) and the + // [procedure option + // list](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list). + map properties = 4; + + // The main file/jar URI of the Spark application. Exactly one of the + // definition_body field and the main_file_uri field must be set for Python. + // Exactly one of main_class and main_file_uri field + // should be set for Java/Scala language type. + string main_file_uri = 5; + + // Python files to be placed on the PYTHONPATH for PySpark application. + // Supported file types: `.py`, `.egg`, and `.zip`. For more information + // about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string py_file_uris = 6; + + // JARs to include on the driver and executor CLASSPATH. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string jar_uris = 7; + + // Files to be placed in the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string file_uris = 8; + + // Archive files to be extracted into the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string archive_uris = 9; + + // The fully qualified name of a class in jar_uris, for example, + // com.example.wordcount. Exactly one of main_class and main_jar_uri field + // should be set for Java/Scala language type. + string main_class = 10; +} + +// Describes the format for getting information about a routine. +message GetRoutineRequest { + // Required. Project ID of the requested routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the requested routine + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for inserting a routine. +message InsertRoutineRequest { + // Required. Project ID of the new routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource to insert + Routine routine = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for updating a routine. +message UpdateRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will replace the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for the partial update (patch) of a routine. +message PatchRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will be used to partially + // update the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; + + // Only the Routine fields in the field mask are updated + // by the given routine. Repeated routine fields will be fully replaced + // if contained in the field mask. + google.protobuf.FieldMask field_mask = 5; +} + +// Describes the format for deleting a routine. +message DeleteRoutineRequest { + // Required. Project ID of the routine to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to delete + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for listing routines. +message ListRoutinesRequest { + // Required. Project ID of the routines to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routines to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; + + // If set, then only the Routines matching this filter are returned. + // The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + // is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + string filter = 6; +} + +// Describes the format of a single result page when listing routines. +message ListRoutinesResponse { + // Routines in the requested dataset. Unless read_mask is set in the request, + // only the following fields are populated: + // etag, project_id, dataset_id, routine_id, routine_type, creation_time, + // last_modified_time, language, and remote_function_options. + repeated Routine routines = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline new file mode 100755 index 000000000..65ab1ae8d --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a routine. +message RoutineReference { + // Required. The ID of the project containing this routine. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this routine. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the routine. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline new file mode 100755 index 000000000..c6eb2e9b7 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline @@ -0,0 +1,108 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Service for interacting with row access policies. +service RowAccessPolicyService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Lists all row access policies on the specified table. + rpc ListRowAccessPolicies(ListRowAccessPoliciesRequest) + returns (ListRowAccessPoliciesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}/rowAccessPolicies" + }; + } +} + +// Request message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesRequest { + // Required. Project ID of the row access policies to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of row access policies to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to list row access policies. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // The maximum number of results to return in a single response page. Leverage + // the page tokens to iterate through the entire collection. + int32 page_size = 5; +} + +// Response message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesResponse { + // Row access policies on the requested table. + repeated RowAccessPolicy row_access_policies = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} + +// Represents access on a subset of rows on the specified table, defined by its +// filter predicate. Access to the subset of rows is controlled by its IAM +// policy. +message RowAccessPolicy { + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this row access policy. + RowAccessPolicyReference row_access_policy_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. A SQL boolean expression that represents the rows defined by this + // row access policy, similar to the boolean expression in a WHERE clause of a + // SELECT query on a table. + // References to other tables, routines, and temporary functions are not + // supported. + // + // Examples: region="EU" + // date_field = CAST('2019-9-27' as DATE) + // nullable_field is not NULL + // numeric_field BETWEEN 1.0 AND 5.0 + string filter_predicate = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this row access policy was created, in + // milliseconds since the epoch. + google.protobuf.Timestamp creation_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this row access policy was last modified, in + // milliseconds since the epoch. + google.protobuf.Timestamp last_modified_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline new file mode 100755 index 000000000..28028dab0 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline @@ -0,0 +1,41 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a row access policy. +message RowAccessPolicyReference { + // Required. The ID of the project containing this row access policy. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this row access policy. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table containing this row access policy. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the row access policy. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string policy_id = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/session_info.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/session_info.proto.baseline new file mode 100755 index 000000000..333ab3b7e --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/session_info.proto.baseline @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SessionInfoProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// [Preview] Information related to sessions. +message SessionInfo { + // Output only. The id of the session. + string session_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline new file mode 100755 index 000000000..0f63b2d5f --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline @@ -0,0 +1,166 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "StandardSqlProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data type of a variable such as a function argument. +// Examples include: +// +// * INT64: `{"typeKind": "INT64"}` +// +// * ARRAY: +// +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "STRING"} +// } +// +// * STRUCT>: +// +// { +// "typeKind": "STRUCT", +// "structType": +// { +// "fields": +// [ +// { +// "name": "x", +// "type": {"typeKind": "STRING"} +// }, +// { +// "name": "y", +// "type": +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "DATE"} +// } +// } +// ] +// } +// } +// +// * RANGE: +// +// { +// "typeKind": "RANGE", +// "rangeElementType": {"typeKind": "DATE"} +// } +message StandardSqlDataType { + // The kind of the datatype. + enum TypeKind { + // Invalid type. + TYPE_KIND_UNSPECIFIED = 0; + + // Encoded as a string in decimal format. + INT64 = 2; + + // Encoded as a boolean "false" or "true". + BOOL = 5; + + // Encoded as a number, or string "NaN", "Infinity" or "-Infinity". + FLOAT64 = 7; + + // Encoded as a string value. + STRING = 8; + + // Encoded as a base64 string per RFC 4648, section 4. + BYTES = 9; + + // Encoded as an RFC 3339 timestamp with mandatory "Z" time zone string: + // 1985-04-12T23:20:50.52Z + TIMESTAMP = 19; + + // Encoded as RFC 3339 full-date format string: 1985-04-12 + DATE = 10; + + // Encoded as RFC 3339 partial-time format string: 23:20:50.52 + TIME = 20; + + // Encoded as RFC 3339 full-date "T" partial-time: 1985-04-12T23:20:50.52 + DATETIME = 21; + + // Encoded as fully qualified 3 part: 0-5 15 2:30:45.6 + INTERVAL = 26; + + // Encoded as WKT + GEOGRAPHY = 22; + + // Encoded as a decimal string. + NUMERIC = 23; + + // Encoded as a decimal string. + BIGNUMERIC = 24; + + // Encoded as a string. + JSON = 25; + + // Encoded as a list with types matching Type.array_type. + ARRAY = 16; + + // Encoded as a list with fields of type Type.struct_type[i]. List is used + // because a JSON object cannot have duplicate field names. + STRUCT = 17; + + // Encoded as a pair with types matching range_element_type. Pairs must + // begin with "[", end with ")", and be separated by ", ". + RANGE = 29; + } + + // Required. The top level type of this field. + // Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). + TypeKind type_kind = 1 [(google.api.field_behavior) = REQUIRED]; + + // For complex types, the sub type information. + oneof sub_type { + // The type of the array's elements, if type_kind = "ARRAY". + StandardSqlDataType array_element_type = 2; + + // The fields of this struct, in order, if type_kind = "STRUCT". + StandardSqlStructType struct_type = 3; + + // The type of the range's elements, if type_kind = "RANGE". + StandardSqlDataType range_element_type = 4; + } +} + +// A field or a column. +message StandardSqlField { + // Optional. The name of this field. Can be absent for struct fields. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type of this parameter. Absent if not explicitly + // specified (e.g., CREATE FUNCTION statement can omit the return type; + // in this case the output parameter does not have this "type" field). + StandardSqlDataType type = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The representation of a SQL STRUCT type. +message StandardSqlStructType { + // Fields within the struct. + repeated StandardSqlField fields = 1; +} + +// A table type +message StandardSqlTableType { + // The columns in this table type + repeated StandardSqlField columns = 1; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/system_variable.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/system_variable.proto.baseline new file mode 100755 index 000000000..4437f0f4e --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/system_variable.proto.baseline @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/struct.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SystemVariableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// System variables given to a query. +message SystemVariables { + // Output only. Data type for each system variable. + map types = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Value for each system variable. + google.protobuf.Struct values = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table.proto.baseline new file mode 100755 index 000000000..9f93ae5df --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table.proto.baseline @@ -0,0 +1,730 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/biglake_config.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/external_catalog_table_options.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/partitioning_definition.proto"; +import "google/cloud/bigquery/v2/privacy_policy.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/table_constraints.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Table Service. +// +// It should not be relied on for production use cases at this time. +service TableService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified table resource by table ID. + // This method does not return the data in the table, it only returns the + // table resource, which describes the structure of this table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Creates a new, empty table in the dataset. + rpc InsertTable(InsertTableRequest) returns (Table) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire table resource, whereas the patch method only replaces fields that + // are provided in the submitted table resource. + // This method supports RFC5789 patch semantics. + rpc PatchTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire Table resource, whereas the patch method only replaces fields that + // are provided in the submitted Table resource. + rpc UpdateTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Deletes the table specified by tableId from the dataset. + // If the table contains data, all the data will be deleted. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Lists all tables in the specified dataset. Requires the READER dataset + // role. + rpc ListTables(ListTablesRequest) returns (TableList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + }; + } +} + +// Replication info of a table created using `AS REPLICA` DDL like: +// `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` +message TableReplicationInfo { + // Replication status of the table created using `AS REPLICA` like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + enum ReplicationStatus { + // Default value. + REPLICATION_STATUS_UNSPECIFIED = 0; + + // Replication is Active with no errors. + ACTIVE = 1; + + // Source object is deleted. + SOURCE_DELETED = 2; + + // Source revoked replication permissions. + PERMISSION_DENIED = 3; + + // Source configuration doesn’t allow replication. + UNSUPPORTED_CONFIGURATION = 4; + } + + // Required. Source table reference that is replicated. + TableReference source_table = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the interval at which the source table is polled for + // updates. + // It's Optional. If not specified, default replication interval would be + // applied. + int64 replication_interval_ms = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. If source is a materialized view, this field + // signifies the last refresh time of the source. + int64 replicated_source_last_refresh_time = 3 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication status of configured replication. + ReplicationStatus replication_status = 4 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication error that will permanently stopped + // table replication. + ErrorProto replication_error = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Describes the definition of a logical view. +message ViewDefinition { + // Required. A query that BigQuery executes when the view is referenced. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 2; + + // Specifies whether to use BigQuery's legacy SQL for this view. + // The default value is true. If set to false, the view will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // Queries and views that reference this view must use the same flag value. + // A wrapper is used here because the default value is True. + google.protobuf.BoolValue use_legacy_sql = 3; + + // True if the column names are explicitly specified. For example by using the + // 'CREATE VIEW v(c1, c2) AS ...' syntax. + // Can only be set for GoogleSQL views. + bool use_explicit_column_names = 4; + + // Optional. Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Foreign view representations. + repeated ForeignViewDefinition foreign_definitions = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A view can be represented in multiple ways. Each representation has its own +// dialect. This message stores the metadata required for these representations. +message ForeignViewDefinition { + // Required. The query that defines the view. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Represents the dialect of the query. + string dialect = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Definition and configuration of a materialized view. +message MaterializedViewDefinition { + // Required. A query whose results are persisted. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this materialized view was last refreshed, in + // milliseconds since the epoch. + int64 last_refresh_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Enable automatic refresh of the materialized view when the base + // table is updated. The default value is "true". + google.protobuf.BoolValue enable_refresh = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum frequency at which this materialized view will be + // refreshed. The default value is "1800000" (30 minutes). + google.protobuf.UInt64Value refresh_interval_ms = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. This option declares the intention to construct a materialized + // view that isn't refreshed incrementally. + google.protobuf.BoolValue allow_non_incremental_definition = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Status of a materialized view. +// The last refresh timestamp status is omitted here, but is present in the +// MaterializedViewDefinition message. +message MaterializedViewStatus { + // Output only. Refresh watermark of materialized view. The base tables' data + // were collected into the materialized view cache until this time. + google.protobuf.Timestamp refresh_watermark = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Error result of the last automatic refresh. If present, + // indicates that the last automatic refresh was unsuccessful. + ErrorProto last_refresh_status = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about base table and snapshot time of the snapshot. +message SnapshotDefinition { + // Required. Reference describing the ID of the table that was snapshot. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was snapshot. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp snapshot_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Information about base table and clone time of a table clone. +message CloneDefinition { + // Required. Reference describing the ID of the table that was cloned. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was cloned. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp clone_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +message Streamingbuffer { + // Output only. A lower-bound estimate of the number of bytes currently in + // the streaming buffer. + uint64 estimated_bytes = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A lower-bound estimate of the number of rows currently in the + // streaming buffer. + uint64 estimated_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains the timestamp of the oldest entry in the streaming + // buffer, in milliseconds since the epoch, if the streaming buffer is + // available. + fixed64 oldest_entry_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Table { + // The type of resource ID. + string kind = 1; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An opaque ID uniquely identifying the table. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access this resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this table. + TableReference table_reference = 5 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for this table. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of this table. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this table. You can use these to organize and + // group your tables. Label keys and values can be no longer than 63 + // characters, can only contain lowercase letters, numeric characters, + // underscores and dashes. International characters are allowed. Label values + // are optional. Label keys must start with a letter and each label in the + // list must have a different key. + map labels = 8; + + // Optional. Describes the schema of this table. + TableSchema schema = 9 [(google.api.field_behavior) = OPTIONAL]; + + // If specified, configures time-based partitioning for this table. + TimePartitioning time_partitioning = 10; + + // If specified, configures range partitioning for this table. + RangePartitioning range_partitioning = 27; + + // Clustering specification for the table. Must be specified with time-based + // partitioning, data in the table will be first partitioned and subsequently + // clustered. + Clustering clustering = 23; + + // Optional. If set to true, queries over this table require + // a partition filter that can be used for partition elimination to be + // specified. + google.protobuf.BoolValue require_partition_filter = 28 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The partition information for all table formats, including + // managed partitioned tables, hive partitioned tables, iceberg partitioned, + // and metastore partitioned tables. This field is only populated for + // metastore partitioned tables. For other table formats, this is an output + // only field. + optional PartitioningDefinition partition_definition = 51 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The size of this table in logical bytes, excluding any data in + // the streaming buffer. + google.protobuf.Int64Value num_bytes = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This includes + // storage used for time travel. + google.protobuf.Int64Value num_physical_bytes = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of logical bytes in the table that are considered + // "long-term storage". + google.protobuf.Int64Value num_long_term_bytes = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows of data in this table, excluding any data + // in the streaming buffer. + google.protobuf.UInt64Value num_rows = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created tables. + google.protobuf.Int64Value expiration_time = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this table was last modified, in milliseconds + // since the epoch. + fixed64 last_modified_time = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes the table type. The following values are supported: + // + // * `TABLE`: A normal BigQuery table. + // * `VIEW`: A virtual table defined by a SQL query. + // * `EXTERNAL`: A table that references data stored in an external storage + // system, such as Google Cloud Storage. + // * `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. + // * `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a + // base table at a particular time. See additional information on + // [table + // snapshots](https://cloud.google.com/bigquery/docs/table-snapshots-intro). + // + // The default value is `TABLE`. + string type = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The view definition. + ViewDefinition view = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The materialized view definition. + MaterializedViewDefinition materialized_view = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The materialized view status. + MaterializedViewStatus materialized_view_status = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Describes the data format, location, and other properties of + // a table stored outside of BigQuery. By defining these properties, the data + // source can then be queried as if it were a standard BigQuery table. + ExternalDataConfiguration external_data_configuration = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the configuration of a BigLake managed table. + BigLakeConfiguration biglake_configuration = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the table resides. This value + // is inherited from the dataset. + string location = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains information regarding this table's streaming buffer, + // if one is present. This field will be absent if the table is not being + // streamed to or if there is no data in the streaming buffer. + Streamingbuffer streaming_buffer = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration encryption_configuration = 22; + + // Output only. Contains information about the snapshot. This value is set via + // snapshot creation. + SnapshotDefinition snapshot_definition = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defines the default collation specification of new STRING fields + // in the table. During table creation or update, if a STRING field is added + // to this table without explicit collation specified, then the table inherits + // the table default collation. A change to this field affects only fields + // added afterwards, and does not alter the existing fields. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 30 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new decimal + // fields (NUMERIC OR BIGNUMERIC) in the table. During table creation or + // update, if a decimal field is added to this table without an explicit + // rounding mode specified, then the field inherits the table default + // rounding mode. Changing this field doesn't affect existing fields. + TableFieldSchema.RoundingMode default_rounding_mode = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Contains information about the clone. This value is set via + // the clone operation. + CloneDefinition clone_definition = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by time travel storage (deleted + // or changed data). This data is not kept in real time, and might be delayed + // by a few seconds to a few minutes. + google.protobuf.Int64Value num_time_travel_physical_bytes = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of logical bytes in the table or materialized + // view. + google.protobuf.Int64Value num_total_logical_bytes = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are less than 90 days old. + google.protobuf.Int64Value num_active_logical_bytes = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are more than 90 days old. + google.protobuf.Int64Value num_long_term_logical_bytes = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by current live data storage. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_current_physical_bytes = 53 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This also includes + // storage used for time travel. This data is not kept in real time, and might + // be delayed by a few seconds to a few minutes. + google.protobuf.Int64Value num_total_physical_bytes = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes less than 90 days old. This data is + // not kept in real time, and might be delayed by a few seconds to a few + // minutes. + google.protobuf.Int64Value num_active_physical_bytes = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes more than 90 days old. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_long_term_physical_bytes = 39 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of partitions present in the table or materialized + // view. This data is not kept in real time, and might be delayed by a few + // seconds to a few minutes. + google.protobuf.Int64Value num_partitions = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The maximum staleness of data that could be returned when the + // table (or stale MV) is queried. Staleness encoded as a string encoding + // of sql IntervalValue type. + string max_staleness = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for table. If set, restrict + // certain accesses on the table based on the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 46 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Tables Primary Key and Foreign Key information + TableConstraints table_constraints = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this table. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 48 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Table replication info for table created `AS REPLICA` DDL like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + TableReplicationInfo table_replication_info = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Table references of all replicas currently active on + // the table. + repeated TableReference replicas = 50 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Options defining open source compatible table. + ExternalCatalogTableOptions external_catalog_table_options = 54 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for getting table metadata. +message GetTableRequest { + // TableMetadataView specifies which table information is returned. + enum TableMetadataView { + // The default value. + // Default to the STORAGE_STATS view. + TABLE_METADATA_VIEW_UNSPECIFIED = 0; + + // Includes basic table information including schema and + // partitioning specification. This view does not include storage statistics + // such as numRows or numBytes. This view is significantly more efficient + // and should be used to support high query rates. + BASIC = 1; + + // Includes all information in the BASIC view as well as storage statistics + // (numBytes, numLongTermBytes, numRows and lastModifiedTime). + STORAGE_STATS = 2; + + // Includes all table information, including storage statistics. + // It returns same information as STORAGE_STATS view, but may contain + // additional information in the future. + FULL = 3; + } + + // Required. Project ID of the requested table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the requested table + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // List of table schema fields to return (comma-separated). + // If unspecified, all fields are returned. + // A fieldMask cannot be used here because the fields will automatically be + // converted from camelCase to snake_case and the conversion will fail if + // there are underscores. Since these are fields in BigQuery table schemas, + // underscores are allowed. + string selected_fields = 4; + + // Optional. Specifies the view that determines which table information is + // returned. By default, basic table information and storage statistics + // (STORAGE_STATS) are returned. + TableMetadataView view = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting table metadata. +message InsertTableRequest { + // Required. Project ID of the new table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource to insert + Table table = 4 [(google.api.field_behavior) = REQUIRED]; +} + +message UpdateOrPatchTableRequest { + // Required. Project ID of the table to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to update + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource which will replace or patch the specified table + Table table = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. When true will autodetect schema, else will keep original schema. + bool autodetect_schema = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for deleting a table. +message DeleteTableRequest { + // Required. Project ID of the table to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to delete + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for enumerating tables. +message ListTablesRequest { + // Required. Project ID of the tables to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the tables to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; +} + +// Information about a logical view. +message ListFormatView { + // True if view is defined in legacy SQL dialect, + // false if in GoogleSQL. + google.protobuf.BoolValue use_legacy_sql = 1; + + // Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 2; +} + +message ListFormatTable { + // The resource type. + string kind = 1; + + // An opaque ID of the table. + string id = 2; + + // A reference uniquely identifying table. + TableReference table_reference = 3; + + // The user-friendly name for this table. + google.protobuf.StringValue friendly_name = 4; + + // The type of table. + string type = 5; + + // The time-based partitioning for this table. + TimePartitioning time_partitioning = 6; + + // The range partitioning for this table. + RangePartitioning range_partitioning = 12; + + // Clustering specification for this table, if configured. + Clustering clustering = 11; + + // The labels associated with this table. You can use these to organize + // and group your tables. + map labels = 7; + + // Additional details for a view. + ListFormatView view = 8; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. + int64 expiration_time = 10; + + // Optional. If set to true, queries including this table must specify a + // partition filter. This filter is used for partition elimination. + google.protobuf.BoolValue require_partition_filter = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Partial projection of the metadata for a given table in a list response. +message TableList { + // The type of list. + string kind = 1; + + // A hash of this page of results. + string etag = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // Tables in the requested dataset. + repeated ListFormatTable tables = 4; + + // The total number of tables in the dataset. + google.protobuf.Int32Value total_items = 5; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline new file mode 100755 index 000000000..13edc6bb9 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableConstraintsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents the primary key constraint on a table's columns. +message PrimaryKey { + // Required. The columns that are composed of the primary key constraint. + repeated string columns = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The pair of the foreign key column and primary key column. +message ColumnReference { + // Required. The column that composes the foreign key. + string referencing_column = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The column in the primary key that are referenced by the + // referencing_column. + string referenced_column = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Represents a foreign key constraint on a table's columns. +message ForeignKey { + // Optional. Set only if the foreign key constraint is named. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The table that holds the primary key and is referenced by this + // foreign key. + TableReference referenced_table = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The columns that compose the foreign key. + repeated ColumnReference column_references = 3 + [(google.api.field_behavior) = REQUIRED]; +} + +// The TableConstraints defines the primary key and foreign key. +message TableConstraints { + // Optional. Represents a primary key constraint on a table's columns. + // Present only if the table has a primary key. + // The primary key is not enforced. + PrimaryKey primary_key = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Present only if the table has a foreign key. + // The foreign key is not enforced. + repeated ForeignKey foreign_keys = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_reference.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_reference.proto.baseline new file mode 100755 index 000000000..e6e9a1b35 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_reference.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TableReference { + // Required. The ID of the project containing this table. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this table. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table. The ID can contain Unicode characters in + // category L (letter), M (mark), N (number), Pc (connector, including + // underscore), Pd (dash), and Zs (space). For more information, see [General + // Category](https://wikipedia.org/wiki/Unicode_character_property#General_Category). + // The maximum length is 1,024 characters. Certain operations allow suffixing + // of the table ID with a partition decorator, such as + // `sample_table$20190123`. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_schema.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_schema.proto.baseline new file mode 100755 index 000000000..8a56f8e87 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/table_schema.proto.baseline @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableSchemaProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Schema of a table +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; + + // Optional. Specifies metadata of the foreign data type definition in field + // schema + // ([TableFieldSchema.foreign_type_definition][google.cloud.bigquery.v2.TableFieldSchema.foreign_type_definition]). + ForeignTypeInfo foreign_type_info = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata about the foreign data type definition such as the system +// in which the type is defined. +message ForeignTypeInfo { + // External systems, such as query engines or table formats, that have their + // own data types. + enum TypeSystem { + // TypeSystem not specified. + TYPE_SYSTEM_UNSPECIFIED = 0; + + // Represents Hive data types. + HIVE = 1; + } + + // Required. Specifies the system which defines the foreign data type. + TypeSystem type_system = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Data policy option proto, it currently supports name only, will support +// precedence later. +message DataPolicyOption { + // Data policy resource name in the form of + // projects/project_id/locations/location_id/dataPolicies/data_policy_id. + optional string name = 1; +} + +// A field in TableSchema +message TableFieldSchema { + message PolicyTagList { + // A list of policy tag resource names. For example, + // "projects/1/locations/eu/taxonomies/2/policyTags/3". At most 1 policy tag + // is currently allowed. + repeated string names = 1; + } + + // Rounding mode options that can be used when storing NUMERIC + // or BIGNUMERIC values. + enum RoundingMode { + // Unspecified will default to using ROUND_HALF_AWAY_FROM_ZERO. + ROUNDING_MODE_UNSPECIFIED = 0; + + // ROUND_HALF_AWAY_FROM_ZERO rounds half values away from zero + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5, 1.6, 1.7, 1.8, 1.9 => 2 + ROUND_HALF_AWAY_FROM_ZERO = 1; + + // ROUND_HALF_EVEN rounds half values to the nearest even value + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5 => 2 + // 1.6, 1.7, 1.8, 1.9 => 2 + // 2.5 => 2 + ROUND_HALF_EVEN = 2; + } + + // Represents the type of a field element. + message FieldElementType { + // Required. The type of a field element. For more information, see + // [TableFieldSchema.type][google.cloud.bigquery.v2.TableFieldSchema.type]. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 300 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. Possible values include: + // + // * STRING + // * BYTES + // * INTEGER (or INT64) + // * FLOAT (or FLOAT64) + // * BOOLEAN (or BOOL) + // * TIMESTAMP + // * DATE + // * TIME + // * DATETIME + // * GEOGRAPHY + // * NUMERIC + // * BIGNUMERIC + // * JSON + // * RECORD (or STRUCT) + // * RANGE + // + // Use of RECORD/STRUCT indicates that the field contains a nested schema. + string type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. Possible values include NULLABLE, REQUIRED and + // REPEATED. The default value is NULLABLE. + string mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set + // to RECORD. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + google.protobuf.StringValue description = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The policy tags attached to this field, used for field-level + // access control. If not set, defaults to empty policy_tags. + PolicyTagList policy_tags = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Data policy options, will replace the data_policies. + repeated DataPolicyOption data_policies = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type ≠ "STRING" and ≠ "BYTES". + int64 max_length = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type ≠ "NUMERIC" and ≠ + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10P-S + 10-S, + // 10P-S - 10-S] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10P + 1, 10P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. + // * If type = "BIGNUMERIC": + // 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 ≤ precision ≤ 29. + // * If type = "BIGNUMERIC": 1 ≤ precision ≤ 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the rounding mode to be used when storing values of + // NUMERIC and BIGNUMERIC type. + RoundingMode rounding_mode = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Field collation can be set only when the type of field is STRING. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue collation = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A SQL expression to specify the [default value] + // (https://cloud.google.com/bigquery/docs/default-values) for this field. + google.protobuf.StringValue default_value_expression = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The subtype of the RANGE, if the type of this field is RANGE. If + // the type is RANGE, this field is required. Values for the field element + // type can be the following: + // + // * DATE + // * DATETIME + // * TIMESTAMP + FieldElementType range_element_type = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Definition of the foreign data type. + // Only valid for top-level schema fields (not nested fields). + // If the type is FOREIGN, this field is required. + string foreign_type_definition = 23 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline new file mode 100755 index 000000000..440a20983 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline @@ -0,0 +1,44 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TimePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TimePartitioning { + // Required. The supported types are DAY, HOUR, MONTH, and YEAR, which will + // generate one partition per day, hour, month, and year, respectively. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Number of milliseconds for which to keep the storage for a + // partition. + // A wrapper is used here because 0 is an invalid value. + google.protobuf.Int64Value expiration_ms = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, the table is partitioned by pseudo + // column '_PARTITIONTIME'; if set, the table is partitioned by this field. + // The field must be a top-level TIMESTAMP or DATE field. Its mode must be + // NULLABLE or REQUIRED. + // A wrapper is used here because an empty string is an invalid value. + google.protobuf.StringValue field = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline new file mode 100755 index 000000000..d630bf9c7 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline @@ -0,0 +1,42 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "UdfProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// +// This is used for defining User Defined Function (UDF) resources only when +// using legacy SQL. Users of GoogleSQL should leverage either DDL (e.g. +// CREATE [TEMPORARY] FUNCTION ... ) or the Routines API to define UDF +// resources. +// +// For additional information on migrating, see: +// https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions +message UserDefinedFunctionResource { + // [Pick one] A code resource to load from a Google Cloud Storage URI + // (gs://bucket/path). + google.protobuf.StringValue resource_uri = 1; + + // [Pick one] An inline resource that contains code for a user-defined + // function (UDF). Providing a inline code resource is equivalent to providing + // a URI for a file containing the same code. + google.protobuf.StringValue inline_code = 2; +} diff --git a/baselines/bigquery-v2-esm/protos/google/cloud/common_resources.proto.baseline b/baselines/bigquery-v2-esm/protos/google/cloud/common_resources.proto.baseline new file mode 100755 index 000000000..a2f46cea3 --- /dev/null +++ b/baselines/bigquery-v2-esm/protos/google/cloud/common_resources.proto.baseline @@ -0,0 +1,52 @@ +// Copyright 2020 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains stub messages for common resources in GCP. +// It is not intended to be directly generated, and is instead used by +// other tooling to be able to match common resource patterns. +syntax = "proto3"; + +package google.cloud; + +import "google/api/resource.proto"; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Project" + pattern: "projects/{project}" +}; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Organization" + pattern: "organizations/{organization}" +}; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Folder" + pattern: "folders/{folder}" +}; + + +option (google.api.resource_definition) = { + type: "cloudbilling.googleapis.com/BillingAccount" + pattern: "billingAccounts/{billing_account}" +}; + +option (google.api.resource_definition) = { + type: "locations.googleapis.com/Location" + pattern: "projects/{project}/locations/{location}" +}; + diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.delete_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.delete_dataset.js.baseline new file mode 100644 index 000000000..e54a23f56 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.delete_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_DeleteDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being deleted + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of dataset being deleted + */ + // const datasetId = 'abc123' + /** + * If True, delete all the tables in the dataset. + * If False and the dataset contains tables, the request will fail. + * Default is False + */ + // const deleteContents = true + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callDeleteDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.deleteDataset(request); + console.log(response); + } + + callDeleteDataset(); + // [END bigquery_v2_generated_DatasetService_DeleteDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.get_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.get_dataset.js.baseline new file mode 100644 index 000000000..e722d561d --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.get_dataset.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_GetDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested dataset + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested dataset + */ + // const datasetId = 'abc123' + /** + * Optional. Specifies the view that determines which dataset information is + * returned. By default, metadata and ACL information are returned. + */ + // const datasetView = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callGetDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.getDataset(request); + console.log(response); + } + + callGetDataset(); + // [END bigquery_v2_generated_DatasetService_GetDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.insert_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.insert_dataset.js.baseline new file mode 100644 index 000000000..a87a9ae12 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.insert_dataset.js.baseline @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, dataset) { + // [START bigquery_v2_generated_DatasetService_InsertDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new dataset + */ + // const projectId = 'abc123' + /** + * Required. Datasets resource to use for the new dataset + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callInsertDataset() { + // Construct request + const request = { + projectId, + dataset, + }; + + // Run request + const response = await bigqueryClient.insertDataset(request); + console.log(response); + } + + callInsertDataset(); + // [END bigquery_v2_generated_DatasetService_InsertDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.list_datasets.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.list_datasets.js.baseline new file mode 100644 index 000000000..cf5b38f50 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.list_datasets.js.baseline @@ -0,0 +1,87 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_DatasetService_ListDatasets_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the datasets to be listed + */ + // const projectId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + /** + * Whether to list all datasets, including hidden ones + */ + // const all = true + /** + * An expression for filtering the results of the request by label. + * The syntax is `labels.:`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See Filtering datasets using + * labels (https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + */ + // const filter = 'abc123' + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callListDatasets() { + // Construct request + const request = { + projectId, + }; + + // Run request + const iterable = bigqueryClient.listDatasetsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListDatasets(); + // [END bigquery_v2_generated_DatasetService_ListDatasets_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.patch_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.patch_dataset.js.baseline new file mode 100644 index 000000000..65948dcc5 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.patch_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, dataset) { + // [START bigquery_v2_generated_DatasetService_PatchDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being updated + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the dataset being updated + */ + // const datasetId = 'abc123' + /** + * Required. Datasets resource which will replace or patch the specified + * dataset. + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callPatchDataset() { + // Construct request + const request = { + projectId, + datasetId, + dataset, + }; + + // Run request + const response = await bigqueryClient.patchDataset(request); + console.log(response); + } + + callPatchDataset(); + // [END bigquery_v2_generated_DatasetService_PatchDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.undelete_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.undelete_dataset.js.baseline new file mode 100644 index 000000000..de5b4b62c --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.undelete_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_UndeleteDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset to be undeleted + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of dataset being deleted + */ + // const datasetId = 'abc123' + /** + * Optional. The exact time when the dataset was deleted. If not specified, + * the most recently deleted version is undeleted. Undeleting a dataset + * using deletion time is not supported. + */ + // const deletionTime = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callUndeleteDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.undeleteDataset(request); + console.log(response); + } + + callUndeleteDataset(); + // [END bigquery_v2_generated_DatasetService_UndeleteDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.update_dataset.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.update_dataset.js.baseline new file mode 100644 index 000000000..f047b5fea --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/dataset_service.update_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, dataset) { + // [START bigquery_v2_generated_DatasetService_UpdateDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being updated + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the dataset being updated + */ + // const datasetId = 'abc123' + /** + * Required. Datasets resource which will replace or patch the specified + * dataset. + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callUpdateDataset() { + // Construct request + const request = { + projectId, + datasetId, + dataset, + }; + + // Run request + const response = await bigqueryClient.updateDataset(request); + console.log(response); + } + + callUpdateDataset(); + // [END bigquery_v2_generated_DatasetService_UpdateDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.cancel_job.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.cancel_job.js.baseline new file mode 100644 index 000000000..7a36ce1cf --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.cancel_job.js.baseline @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_CancelJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the job to cancel + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the job to cancel + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callCancelJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.cancelJob(request); + console.log(response); + } + + callCancelJob(); + // [END bigquery_v2_generated_JobService_CancelJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.delete_job.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.delete_job.js.baseline new file mode 100644 index 000000000..5693afaa9 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.delete_job.js.baseline @@ -0,0 +1,75 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_DeleteJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the job for which metadata is to be deleted. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the job for which metadata is to be deleted. If this is + * a parent job which has child jobs, the metadata from all child jobs will be + * deleted as well. Direct deletion of the metadata of child jobs is not + * allowed. + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. Required. + * See details at: + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callDeleteJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.deleteJob(request); + console.log(response); + } + + callDeleteJob(); + // [END bigquery_v2_generated_JobService_DeleteJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_job.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_job.js.baseline new file mode 100644 index 000000000..1d5b9f2bf --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_job.js.baseline @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_GetJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested job. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the requested job. + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callGetJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.getJob(request); + console.log(response); + } + + callGetJob(); + // [END bigquery_v2_generated_JobService_GetJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_query_results.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_query_results.js.baseline new file mode 100644 index 000000000..cd708d171 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.get_query_results.js.baseline @@ -0,0 +1,109 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_GetQueryResults_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the query job. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the query job. + */ + // const jobId = 'abc123' + /** + * Zero-based index of the starting row. + */ + // const startIndex = {} + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * Maximum number of results to read. + */ + // const maxResults = {} + /** + * Optional: Specifies the maximum amount of time, in milliseconds, that the + * client is willing to wait for the query to complete. By default, this limit + * is 10 seconds (10,000 milliseconds). If the query is complete, the + * jobComplete field in the response is true. If the query has not yet + * completed, jobComplete is false. + * You can request a longer timeout period in the timeoutMs field. However, + * the call is not guaranteed to wait for the specified timeout; it typically + * returns after around 200 seconds (200,000 milliseconds), even if the query + * is not complete. + * If jobComplete is false, you can continue to wait for the query to complete + * by calling the getQueryResults method until the jobComplete field in the + * getQueryResults response is true. + */ + // const timeoutMs = {} + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + /** + * Optional. Output format adjustments. + */ + // const formatOptions = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callGetQueryResults() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.getQueryResults(request); + console.log(response); + } + + callGetQueryResults(); + // [END bigquery_v2_generated_JobService_GetQueryResults_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.insert_job.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.insert_job.js.baseline new file mode 100644 index 000000000..544a889ae --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.insert_job.js.baseline @@ -0,0 +1,64 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START bigquery_v2_generated_JobService_InsertJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Project ID of project that will be billed for the job. + */ + // const projectId = 'abc123' + /** + * Jobs resource to insert. + */ + // const job = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callInsertJob() { + // Construct request + const request = { + }; + + // Run request + const response = await bigqueryClient.insertJob(request); + console.log(response); + } + + callInsertJob(); + // [END bigquery_v2_generated_JobService_InsertJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.list_jobs.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.list_jobs.js.baseline new file mode 100644 index 000000000..ed07adc02 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.list_jobs.js.baseline @@ -0,0 +1,99 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START bigquery_v2_generated_JobService_ListJobs_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Project ID of the jobs to list. + */ + // const projectId = 'abc123' + /** + * Whether to display jobs owned by all users in the project. Default False. + */ + // const allUsers = true + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + */ + // const minCreationTime = 1234 + /** + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + */ + // const maxCreationTime = {} + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * Restrict information returned to a set of selected fields + */ + // const projection = {} + /** + * Filter for job state + */ + // const stateFilter = [1,2,3,4] + /** + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + */ + // const parentJobId = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callListJobs() { + // Construct request + const request = { + }; + + // Run request + const iterable = bigqueryClient.listJobsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListJobs(); + // [END bigquery_v2_generated_JobService_ListJobs_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/job_service.query.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.query.js.baseline new file mode 100644 index 000000000..24c7baf73 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/job_service.query.js.baseline @@ -0,0 +1,65 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_JobService_Query_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the query request. + */ + // const projectId = 'abc123' + /** + * The query request body. + */ + // const queryRequest = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callQuery() { + // Construct request + const request = { + projectId, + }; + + // Run request + const response = await bigqueryClient.query(request); + console.log(response); + } + + callQuery(); + // [END bigquery_v2_generated_JobService_Query_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/model_service.delete_model.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.delete_model.js.baseline new file mode 100644 index 000000000..d857afc39 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.delete_model.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId) { + // [START bigquery_v2_generated_ModelService_DeleteModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the model to delete. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the model to delete. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the model to delete. + */ + // const modelId = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callDeleteModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + }; + + // Run request + const response = await bigqueryClient.deleteModel(request); + console.log(response); + } + + callDeleteModel(); + // [END bigquery_v2_generated_ModelService_DeleteModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/model_service.get_model.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.get_model.js.baseline new file mode 100644 index 000000000..ab2dae8cc --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.get_model.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId) { + // [START bigquery_v2_generated_ModelService_GetModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested model. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested model. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the requested model. + */ + // const modelId = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callGetModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + }; + + // Run request + const response = await bigqueryClient.getModel(request); + console.log(response); + } + + callGetModel(); + // [END bigquery_v2_generated_ModelService_GetModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/model_service.list_models.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.list_models.js.baseline new file mode 100644 index 000000000..8bc0d3228 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.list_models.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_ModelService_ListModels_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the models to list. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the models to list. + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call to request the next page of + * results + */ + // const pageToken = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callListModels() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listModelsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListModels(); + // [END bigquery_v2_generated_ModelService_ListModels_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/model_service.patch_model.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.patch_model.js.baseline new file mode 100644 index 000000000..b0c0b4e54 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/model_service.patch_model.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId, model) { + // [START bigquery_v2_generated_ModelService_PatchModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the model to patch. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the model to patch. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the model to patch. + */ + // const modelId = 'abc123' + /** + * Required. Patched model. + * Follows RFC5789 patch semantics. Missing fields are not updated. + * To clear a field, explicitly set to default value. + */ + // const model = {} + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callPatchModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + model, + }; + + // Run request + const response = await bigqueryClient.patchModel(request); + console.log(response); + } + + callPatchModel(); + // [END bigquery_v2_generated_ModelService_PatchModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/project_service.get_service_account.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/project_service.get_service_account.js.baseline new file mode 100644 index 000000000..3502dccac --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/project_service.get_service_account.js.baseline @@ -0,0 +1,61 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_ProjectService_GetServiceAccount_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. ID of the project. + */ + // const projectId = 'abc123' + + // Imports the Bigquery library + const {ProjectServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ProjectServiceClient(); + + async function callGetServiceAccount() { + // Construct request + const request = { + projectId, + }; + + // Run request + const response = await bigqueryClient.getServiceAccount(request); + console.log(response); + } + + callGetServiceAccount(); + // [END bigquery_v2_generated_ProjectService_GetServiceAccount_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.delete_routine.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.delete_routine.js.baseline new file mode 100644 index 000000000..c5fcd7f41 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.delete_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId) { + // [START bigquery_v2_generated_RoutineService_DeleteRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to delete + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to delete + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to delete + */ + // const routineId = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callDeleteRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + }; + + // Run request + const response = await bigqueryClient.deleteRoutine(request); + console.log(response); + } + + callDeleteRoutine(); + // [END bigquery_v2_generated_RoutineService_DeleteRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.get_routine.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.get_routine.js.baseline new file mode 100644 index 000000000..af1f00d7f --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.get_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId) { + // [START bigquery_v2_generated_RoutineService_GetRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested routine + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested routine + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the requested routine + */ + // const routineId = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callGetRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + }; + + // Run request + const response = await bigqueryClient.getRoutine(request); + console.log(response); + } + + callGetRoutine(); + // [END bigquery_v2_generated_RoutineService_GetRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.insert_routine.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.insert_routine.js.baseline new file mode 100644 index 000000000..a6006cb1f --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.insert_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routine) { + // [START bigquery_v2_generated_RoutineService_InsertRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new routine + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the new routine + */ + // const datasetId = 'abc123' + /** + * Required. A routine resource to insert + */ + // const routine = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callInsertRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routine, + }; + + // Run request + const response = await bigqueryClient.insertRoutine(request); + console.log(response); + } + + callInsertRoutine(); + // [END bigquery_v2_generated_RoutineService_InsertRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.list_routines.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.list_routines.js.baseline new file mode 100644 index 000000000..100f58e90 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.list_routines.js.baseline @@ -0,0 +1,84 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_RoutineService_ListRoutines_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routines to list + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routines to list + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + /** + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + */ + // const filter = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callListRoutines() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listRoutinesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListRoutines(); + // [END bigquery_v2_generated_RoutineService_ListRoutines_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.patch_routine.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.patch_routine.js.baseline new file mode 100644 index 000000000..693c45faf --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.patch_routine.js.baseline @@ -0,0 +1,83 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId, routine) { + // [START bigquery_v2_generated_RoutineService_PatchRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to update + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to update + */ + // const routineId = 'abc123' + /** + * Required. A routine resource which will be used to partially + * update the specified routine + */ + // const routine = {} + /** + * Only the Routine fields in the field mask are updated + * by the given routine. Repeated routine fields will be fully replaced + * if contained in the field mask. + */ + // const fieldMask = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callPatchRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + routine, + }; + + // Run request + const response = await bigqueryClient.patchRoutine(request); + console.log(response); + } + + callPatchRoutine(); + // [END bigquery_v2_generated_RoutineService_PatchRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.update_routine.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.update_routine.js.baseline new file mode 100644 index 000000000..af3a54341 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/routine_service.update_routine.js.baseline @@ -0,0 +1,76 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId, routine) { + // [START bigquery_v2_generated_RoutineService_UpdateRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to update + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to update + */ + // const routineId = 'abc123' + /** + * Required. A routine resource which will replace the specified routine + */ + // const routine = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callUpdateRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + routine, + }; + + // Run request + const response = await bigqueryClient.updateRoutine(request); + console.log(response); + } + + callUpdateRoutine(); + // [END bigquery_v2_generated_RoutineService_UpdateRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline new file mode 100644 index 000000000..3536a64dc --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline @@ -0,0 +1,83 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the row access policies to list. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of row access policies to list. + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to list row access policies. + */ + // const tableId = 'abc123' + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + */ + // const pageSize = 1234 + + // Imports the Bigquery library + const {RowAccessPolicyServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RowAccessPolicyServiceClient(); + + async function callListRowAccessPolicies() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const iterable = bigqueryClient.listRowAccessPoliciesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListRowAccessPolicies(); + // [END bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline new file mode 100644 index 000000000..917111d47 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline @@ -0,0 +1,1647 @@ +{ + "clientLibrary": { + "name": "nodejs-bigquery", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.v2", + "version": "v2" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquery_v2_generated_DatasetService_GetDataset_async", + "title": "DatasetService getDataset Sample", + "origin": "API_DEFINITION", + "description": " Returns the dataset specified by datasetID.", + "canonical": true, + "file": "dataset_service.get_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.GetDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_view", + "type": ".google.cloud.bigquery.v2.GetDatasetRequest.DatasetView" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "GetDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.GetDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_InsertDataset_async", + "title": "DatasetService insertDataset Sample", + "origin": "API_DEFINITION", + "description": " Creates a new empty dataset.", + "canonical": true, + "file": "dataset_service.insert_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.InsertDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "InsertDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.InsertDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_PatchDataset_async", + "title": "DatasetService patchDataset Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports RFC5789 patch semantics.", + "canonical": true, + "file": "dataset_service.patch_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.PatchDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "PatchDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.PatchDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_UpdateDataset_async", + "title": "DatasetService updateDataset Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", + "canonical": true, + "file": "dataset_service.update_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UpdateDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "UpdateDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UpdateDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_DeleteDataset_async", + "title": "DatasetService deleteDataset Sample", + "origin": "API_DEFINITION", + "description": " Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", + "canonical": true, + "file": "dataset_service.delete_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.DeleteDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "delete_contents", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "DeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.DeleteDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_ListDatasets_async", + "title": "DatasetService listDatasets Sample", + "origin": "API_DEFINITION", + "description": " Lists all datasets in the specified project to which the user has been granted the READER dataset role.", + "canonical": true, + "file": "dataset_service.list_datasets.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 79, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListDatasets", + "fullName": "google.cloud.bigquery.v2.DatasetService.ListDatasets", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "all", + "type": "TYPE_BOOL" + }, + { + "name": "filter", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.DatasetList", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "ListDatasets", + "fullName": "google.cloud.bigquery.v2.DatasetService.ListDatasets", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_UndeleteDataset_async", + "title": "DatasetService undeleteDataset Sample", + "origin": "API_DEFINITION", + "description": " Undeletes a dataset which is within time travel window based on datasetId. If a time is specified, the dataset version deleted at that time is undeleted, else the last live version is undeleted.", + "canonical": true, + "file": "dataset_service.undelete_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UndeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UndeleteDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "deletion_time", + "type": ".google.protobuf.Timestamp" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "UndeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UndeleteDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_CancelJob_async", + "title": "DatasetService cancelJob Sample", + "origin": "API_DEFINITION", + "description": " Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.", + "canonical": true, + "file": "job_service.cancel_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 69, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CancelJob", + "fullName": "google.cloud.bigquery.v2.JobService.CancelJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.JobCancelResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "CancelJob", + "fullName": "google.cloud.bigquery.v2.JobService.CancelJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_GetJob_async", + "title": "DatasetService getJob Sample", + "origin": "API_DEFINITION", + "description": " Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", + "canonical": true, + "file": "job_service.get_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 69, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetJob", + "fullName": "google.cloud.bigquery.v2.JobService.GetJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Job", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "GetJob", + "fullName": "google.cloud.bigquery.v2.JobService.GetJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_InsertJob_async", + "title": "DatasetService insertJob Sample", + "origin": "API_DEFINITION", + "description": " Starts a new asynchronous job. This API has two different kinds of endpoint URIs, as this method supports a variety of use cases. * The *Metadata* URI is used for most interactions, as it accepts the job configuration directly. * The *Upload* URI is ONLY for the case when you're sending both a load job configuration and a data stream together. In this case, the Upload URI accepts the job configuration and the data as two distinct multipart MIME parts.", + "canonical": true, + "file": "job_service.insert_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertJob", + "fullName": "google.cloud.bigquery.v2.JobService.InsertJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job", + "type": ".google.cloud.bigquery.v2.Job" + } + ], + "resultType": ".google.cloud.bigquery.v2.Job", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "InsertJob", + "fullName": "google.cloud.bigquery.v2.JobService.InsertJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_DeleteJob_async", + "title": "DatasetService deleteJob Sample", + "origin": "API_DEFINITION", + "description": " Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.", + "canonical": true, + "file": "job_service.delete_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 67, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteJob", + "fullName": "google.cloud.bigquery.v2.JobService.DeleteJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "DeleteJob", + "fullName": "google.cloud.bigquery.v2.JobService.DeleteJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_ListJobs_async", + "title": "DatasetService listJobs Sample", + "origin": "API_DEFINITION", + "description": " Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", + "canonical": true, + "file": "job_service.list_jobs.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 91, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListJobs", + "fullName": "google.cloud.bigquery.v2.JobService.ListJobs", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "all_users", + "type": "TYPE_BOOL" + }, + { + "name": "max_results", + "type": ".google.protobuf.Int32Value" + }, + { + "name": "min_creation_time", + "type": "TYPE_UINT64" + }, + { + "name": "max_creation_time", + "type": ".google.protobuf.UInt64Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "projection", + "type": ".google.cloud.bigquery.v2.ListJobsRequest.Projection" + }, + { + "name": "state_filter", + "type": "TYPE_ENUM[]" + }, + { + "name": "parent_job_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.JobList", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "ListJobs", + "fullName": "google.cloud.bigquery.v2.JobService.ListJobs", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_GetQueryResults_async", + "title": "DatasetService getQueryResults Sample", + "origin": "API_DEFINITION", + "description": " RPC to get the results of a query job.", + "canonical": true, + "file": "job_service.get_query_results.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 101, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetQueryResults", + "fullName": "google.cloud.bigquery.v2.JobService.GetQueryResults", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "start_index", + "type": ".google.protobuf.UInt64Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "timeout_ms", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "location", + "type": "TYPE_STRING" + }, + { + "name": "format_options", + "type": ".google.cloud.bigquery.v2.DataFormatOptions" + } + ], + "resultType": ".google.cloud.bigquery.v2.GetQueryResultsResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "GetQueryResults", + "fullName": "google.cloud.bigquery.v2.JobService.GetQueryResults", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_Query_async", + "title": "DatasetService query Sample", + "origin": "API_DEFINITION", + "description": " Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", + "canonical": true, + "file": "job_service.query.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 57, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "Query", + "fullName": "google.cloud.bigquery.v2.JobService.Query", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "query_request", + "type": ".google.cloud.bigquery.v2.QueryRequest" + } + ], + "resultType": ".google.cloud.bigquery.v2.QueryResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "Query", + "fullName": "google.cloud.bigquery.v2.JobService.Query", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_GetModel_async", + "title": "DatasetService getModel Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified model resource by model ID.", + "canonical": true, + "file": "model_service.get_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetModel", + "fullName": "google.cloud.bigquery.v2.ModelService.GetModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Model", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "GetModel", + "fullName": "google.cloud.bigquery.v2.ModelService.GetModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_ListModels_async", + "title": "DatasetService listModels Sample", + "origin": "API_DEFINITION", + "description": " Lists all models in the specified dataset. Requires the READER dataset role. After retrieving the list of models, you can get information about a particular model by calling the models.get method.", + "canonical": true, + "file": "model_service.list_models.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListModels", + "fullName": "google.cloud.bigquery.v2.ModelService.ListModels", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListModelsResponse", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "ListModels", + "fullName": "google.cloud.bigquery.v2.ModelService.ListModels", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_PatchModel_async", + "title": "DatasetService patchModel Sample", + "origin": "API_DEFINITION", + "description": " Patch specific fields in the specified model.", + "canonical": true, + "file": "model_service.patch_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchModel", + "fullName": "google.cloud.bigquery.v2.ModelService.PatchModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + }, + { + "name": "model", + "type": ".google.cloud.bigquery.v2.Model" + } + ], + "resultType": ".google.cloud.bigquery.v2.Model", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "PatchModel", + "fullName": "google.cloud.bigquery.v2.ModelService.PatchModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_DeleteModel_async", + "title": "DatasetService deleteModel Sample", + "origin": "API_DEFINITION", + "description": " Deletes the model specified by modelId from the dataset.", + "canonical": true, + "file": "model_service.delete_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteModel", + "fullName": "google.cloud.bigquery.v2.ModelService.DeleteModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "DeleteModel", + "fullName": "google.cloud.bigquery.v2.ModelService.DeleteModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ProjectService_GetServiceAccount_async", + "title": "DatasetService getServiceAccount Sample", + "origin": "API_DEFINITION", + "description": " RPC to get the service account for a project used for interactions with Google Cloud KMS", + "canonical": true, + "file": "project_service.get_service_account.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 53, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetServiceAccount", + "fullName": "google.cloud.bigquery.v2.ProjectService.GetServiceAccount", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.GetServiceAccountResponse", + "client": { + "shortName": "ProjectServiceClient", + "fullName": "google.cloud.bigquery.v2.ProjectServiceClient" + }, + "method": { + "shortName": "GetServiceAccount", + "fullName": "google.cloud.bigquery.v2.ProjectService.GetServiceAccount", + "service": { + "shortName": "ProjectService", + "fullName": "google.cloud.bigquery.v2.ProjectService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_GetRoutine_async", + "title": "DatasetService getRoutine Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified routine resource by routine ID.", + "canonical": true, + "file": "routine_service.get_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.GetRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "GetRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.GetRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_InsertRoutine_async", + "title": "DatasetService insertRoutine Sample", + "origin": "API_DEFINITION", + "description": " Creates a new routine in the dataset.", + "canonical": true, + "file": "routine_service.insert_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.InsertRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "InsertRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.InsertRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_UpdateRoutine_async", + "title": "DatasetService updateRoutine Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing routine. The update method replaces the entire Routine resource.", + "canonical": true, + "file": "routine_service.update_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 68, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.UpdateRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "UpdateRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.UpdateRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_PatchRoutine_async", + "title": "DatasetService patchRoutine Sample", + "origin": "API_DEFINITION", + "description": " Patches information in an existing routine. The patch method does a partial update to an existing Routine resource.", + "canonical": true, + "file": "routine_service.patch_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 75, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.PatchRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + }, + { + "name": "field_mask", + "type": ".google.protobuf.FieldMask" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "PatchRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.PatchRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_DeleteRoutine_async", + "title": "DatasetService deleteRoutine Sample", + "origin": "API_DEFINITION", + "description": " Deletes the routine specified by routineId from the dataset.", + "canonical": true, + "file": "routine_service.delete_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.DeleteRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "DeleteRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.DeleteRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_ListRoutines_async", + "title": "DatasetService listRoutines Sample", + "origin": "API_DEFINITION", + "description": " Lists all routines in the specified dataset. Requires the READER dataset role.", + "canonical": true, + "file": "routine_service.list_routines.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 76, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListRoutines", + "fullName": "google.cloud.bigquery.v2.RoutineService.ListRoutines", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "filter", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListRoutinesResponse", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "ListRoutines", + "fullName": "google.cloud.bigquery.v2.RoutineService.ListRoutines", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async", + "title": "DatasetService listRowAccessPolicies Sample", + "origin": "API_DEFINITION", + "description": " Lists all row access policies on the specified table.", + "canonical": true, + "file": "row_access_policy_service.list_row_access_policies.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 75, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListRowAccessPolicies", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService.ListRowAccessPolicies", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListRowAccessPoliciesResponse", + "client": { + "shortName": "RowAccessPolicyServiceClient", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyServiceClient" + }, + "method": { + "shortName": "ListRowAccessPolicies", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService.ListRowAccessPolicies", + "service": { + "shortName": "RowAccessPolicyService", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_GetTable_async", + "title": "DatasetService getTable Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", + "canonical": true, + "file": "table_service.get_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 78, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetTable", + "fullName": "google.cloud.bigquery.v2.TableService.GetTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "selected_fields", + "type": "TYPE_STRING" + }, + { + "name": "view", + "type": ".google.cloud.bigquery.v2.GetTableRequest.TableMetadataView" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "GetTable", + "fullName": "google.cloud.bigquery.v2.TableService.GetTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_InsertTable_async", + "title": "DatasetService insertTable Sample", + "origin": "API_DEFINITION", + "description": " Creates a new, empty table in the dataset.", + "canonical": true, + "file": "table_service.insert_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertTable", + "fullName": "google.cloud.bigquery.v2.TableService.InsertTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "InsertTable", + "fullName": "google.cloud.bigquery.v2.TableService.InsertTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_PatchTable_async", + "title": "DatasetService patchTable Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports RFC5789 patch semantics.", + "canonical": true, + "file": "table_service.patch_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 72, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchTable", + "fullName": "google.cloud.bigquery.v2.TableService.PatchTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + }, + { + "name": "autodetect_schema", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "PatchTable", + "fullName": "google.cloud.bigquery.v2.TableService.PatchTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_UpdateTable_async", + "title": "DatasetService updateTable Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing table. The update method replaces the entire Table resource, whereas the patch method only replaces fields that are provided in the submitted Table resource.", + "canonical": true, + "file": "table_service.update_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 72, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateTable", + "fullName": "google.cloud.bigquery.v2.TableService.UpdateTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + }, + { + "name": "autodetect_schema", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "UpdateTable", + "fullName": "google.cloud.bigquery.v2.TableService.UpdateTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_DeleteTable_async", + "title": "DatasetService deleteTable Sample", + "origin": "API_DEFINITION", + "description": " Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", + "canonical": true, + "file": "table_service.delete_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteTable", + "fullName": "google.cloud.bigquery.v2.TableService.DeleteTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "DeleteTable", + "fullName": "google.cloud.bigquery.v2.TableService.DeleteTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_ListTables_async", + "title": "DatasetService listTables Sample", + "origin": "API_DEFINITION", + "description": " Lists all tables in the specified dataset. Requires the READER dataset role.", + "canonical": true, + "file": "table_service.list_tables.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListTables", + "fullName": "google.cloud.bigquery.v2.TableService.ListTables", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.TableList", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "ListTables", + "fullName": "google.cloud.bigquery.v2.TableService.ListTables", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + } + ] +} diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.delete_table.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.delete_table.js.baseline new file mode 100644 index 000000000..2496174f1 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.delete_table.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_TableService_DeleteTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to delete + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to delete + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to delete + */ + // const tableId = 'abc123' + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callDeleteTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const response = await bigqueryClient.deleteTable(request); + console.log(response); + } + + callDeleteTable(); + // [END bigquery_v2_generated_TableService_DeleteTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.get_table.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.get_table.js.baseline new file mode 100644 index 000000000..be8481470 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.get_table.js.baseline @@ -0,0 +1,86 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_TableService_GetTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested table + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested table + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the requested table + */ + // const tableId = 'abc123' + /** + * List of table schema fields to return (comma-separated). + * If unspecified, all fields are returned. + * A fieldMask cannot be used here because the fields will automatically be + * converted from camelCase to snake_case and the conversion will fail if + * there are underscores. Since these are fields in BigQuery table schemas, + * underscores are allowed. + */ + // const selectedFields = 'abc123' + /** + * Optional. Specifies the view that determines which table information is + * returned. By default, basic table information and storage statistics + * (STORAGE_STATS) are returned. + */ + // const view = {} + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callGetTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const response = await bigqueryClient.getTable(request); + console.log(response); + } + + callGetTable(); + // [END bigquery_v2_generated_TableService_GetTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.insert_table.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.insert_table.js.baseline new file mode 100644 index 000000000..2d338eb3f --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.insert_table.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, table) { + // [START bigquery_v2_generated_TableService_InsertTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new table + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the new table + */ + // const datasetId = 'abc123' + /** + * Required. A tables resource to insert + */ + // const table = {} + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callInsertTable() { + // Construct request + const request = { + projectId, + datasetId, + table, + }; + + // Run request + const response = await bigqueryClient.insertTable(request); + console.log(response); + } + + callInsertTable(); + // [END bigquery_v2_generated_TableService_InsertTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.list_tables.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.list_tables.js.baseline new file mode 100644 index 000000000..de4cb4058 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.list_tables.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_TableService_ListTables_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the tables to list + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the tables to list + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callListTables() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listTablesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListTables(); + // [END bigquery_v2_generated_TableService_ListTables_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.patch_table.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.patch_table.js.baseline new file mode 100644 index 000000000..ceafc7c5d --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.patch_table.js.baseline @@ -0,0 +1,80 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId, table) { + // [START bigquery_v2_generated_TableService_PatchTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to update + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to update + */ + // const tableId = 'abc123' + /** + * Required. A tables resource which will replace or patch the specified table + */ + // const table = {} + /** + * Optional. When true will autodetect schema, else will keep original schema. + */ + // const autodetectSchema = true + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callPatchTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + table, + }; + + // Run request + const response = await bigqueryClient.patchTable(request); + console.log(response); + } + + callPatchTable(); + // [END bigquery_v2_generated_TableService_PatchTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/samples/generated/v2/table_service.update_table.js.baseline b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.update_table.js.baseline new file mode 100644 index 000000000..50a2dfc30 --- /dev/null +++ b/baselines/bigquery-v2-esm/samples/generated/v2/table_service.update_table.js.baseline @@ -0,0 +1,80 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId, table) { + // [START bigquery_v2_generated_TableService_UpdateTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to update + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to update + */ + // const tableId = 'abc123' + /** + * Required. A tables resource which will replace or patch the specified table + */ + // const table = {} + /** + * Optional. When true will autodetect schema, else will keep original schema. + */ + // const autodetectSchema = true + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callUpdateTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + table, + }; + + // Run request + const response = await bigqueryClient.updateTable(request); + console.log(response); + } + + callUpdateTable(); + // [END bigquery_v2_generated_TableService_UpdateTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2-esm/tsconfig.esm.json.baseline b/baselines/bigquery-v2-esm/tsconfig.esm.json.baseline new file mode 100644 index 000000000..1ea52fa0e --- /dev/null +++ b/baselines/bigquery-v2-esm/tsconfig.esm.json.baseline @@ -0,0 +1,27 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "module": "es2020", + "moduleResolution": "node", + "esModuleInterop": true, + "sourceMap": false, + "allowJs": true, + "lib": [ + "es2020", + "DOM" + ] + }, + "include": [ + "esm/src/*.ts", + "esm/src/**/*.ts", + "esm/test/*.ts", + "esm/test/**/*.ts", + "esm/system-test/*.ts" + ], + "exclude": [ + "esm/src/json-helper.cjs" + ] +} diff --git a/baselines/bigquery-v2-esm/tsconfig.json.baseline b/baselines/bigquery-v2-esm/tsconfig.json.baseline new file mode 100644 index 000000000..95cf31dc8 --- /dev/null +++ b/baselines/bigquery-v2-esm/tsconfig.json.baseline @@ -0,0 +1,28 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "resolveJsonModule": true, + "moduleResolution": "node", + "allowJs": true, + "esModuleInterop": true, + "sourceMap": false, + "target": "esnext", + "module": "CommonJS", + "declaration": true, + "strict": true, + "isolatedModules": false, + "emitDeclarationOnly": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "esm/src/*.ts", + "esm/src/**/*.ts", + "esm/test/*.ts", + "esm/test/**/*.ts", + "esm/system-test/*.ts" + ] +} diff --git a/baselines/bigquery-v2-esm/webpack.config.cjs.baseline b/baselines/bigquery-v2-esm/webpack.config.cjs.baseline new file mode 100644 index 000000000..f4947d41e --- /dev/null +++ b/baselines/bigquery-v2-esm/webpack.config.cjs.baseline @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'DatasetService', + filename: './dataset-service.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/baselines/bigquery-v2/.eslintignore.baseline b/baselines/bigquery-v2/.eslintignore.baseline new file mode 100644 index 000000000..cfc348ec4 --- /dev/null +++ b/baselines/bigquery-v2/.eslintignore.baseline @@ -0,0 +1,7 @@ +**/node_modules +**/.coverage +build/ +docs/ +protos/ +system-test/ +samples/generated/ diff --git a/baselines/bigquery-v2/.eslintrc.json.baseline b/baselines/bigquery-v2/.eslintrc.json.baseline new file mode 100644 index 000000000..782153495 --- /dev/null +++ b/baselines/bigquery-v2/.eslintrc.json.baseline @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/baselines/bigquery-v2/.gitignore.baseline b/baselines/bigquery-v2/.gitignore.baseline new file mode 100644 index 000000000..d4f03a0df --- /dev/null +++ b/baselines/bigquery-v2/.gitignore.baseline @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +/.coverage +/coverage +/.nyc_output +/docs/ +/out/ +/build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/baselines/bigquery-v2/.jsdoc.js.baseline b/baselines/bigquery-v2/.jsdoc.js.baseline new file mode 100644 index 000000000..6015da5b0 --- /dev/null +++ b/baselines/bigquery-v2/.jsdoc.js.baseline @@ -0,0 +1,55 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2024 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: 'bigquery', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/baselines/bigquery-v2/.mocharc.js.baseline b/baselines/bigquery-v2/.mocharc.js.baseline new file mode 100644 index 000000000..13b67c34e --- /dev/null +++ b/baselines/bigquery-v2/.mocharc.js.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000 +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/baselines/bigquery-v2/.prettierrc.js.baseline b/baselines/bigquery-v2/.prettierrc.js.baseline new file mode 100644 index 000000000..9a8fd6909 --- /dev/null +++ b/baselines/bigquery-v2/.prettierrc.js.baseline @@ -0,0 +1,22 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/baselines/bigquery-v2/README.md.baseline b/baselines/bigquery-v2/README.md.baseline new file mode 100644 index 000000000..39a954110 --- /dev/null +++ b/baselines/bigquery-v2/README.md.baseline @@ -0,0 +1 @@ +Bigquery: Nodejs Client diff --git a/baselines/bigquery-v2/package.json b/baselines/bigquery-v2/package.json new file mode 100644 index 000000000..22d6d1473 --- /dev/null +++ b/baselines/bigquery-v2/package.json @@ -0,0 +1,64 @@ +{ + "name": "bigquery", + "version": "0.1.0", + "description": "Bigquery client for Node.js", + "repository": "googleapis/nodejs-bigquery", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google bigquery", + "bigquery", + "dataset service", + "job service", + "model service", + "project service", + "routine service", + "row access policy service", + "table service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/ && minifyProtoJson", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^4.4.0" + }, + "devDependencies": { + "@types/mocha": "^10.0.7", + "@types/node": "^20.16.4", + "@types/sinon": "^10.0.20", + "c8": "^10.1.2", + "gapic-tools": "^0.4.6", + "gts": "5.3.1", + "jsdoc": "^4.0.3", + "jsdoc-fresh": "^3.0.0", + "jsdoc-region-tag": "^3.0.0", + "mocha": "^10.7.3", + "pack-n-play": "^2.0.3", + "sinon": "^18.0.0", + "typescript": "5.1.6" + }, + "engines": { + "node": ">=v14" + } +} diff --git a/baselines/bigquery-v2/package.json.baseline b/baselines/bigquery-v2/package.json.baseline new file mode 120000 index 000000000..2ff8622f1 --- /dev/null +++ b/baselines/bigquery-v2/package.json.baseline @@ -0,0 +1 @@ +package.json \ No newline at end of file diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline new file mode 100755 index 000000000..fd076037b --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/biglake_config.proto.baseline @@ -0,0 +1,62 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "BigLakeConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for BigLake managed tables. +message BigLakeConfiguration { + // Supported file formats for BigLake tables. + enum FileFormat { + // Default Value. + FILE_FORMAT_UNSPECIFIED = 0; + + // Apache Parquet format. + PARQUET = 1; + } + + // Supported table formats for BigLake tables. + enum TableFormat { + // Default Value. + TABLE_FORMAT_UNSPECIFIED = 0; + + // Apache Iceberg format. + ICEBERG = 1; + } + + // Required. The connection specifying the credentials to be used to read and + // write to external storage, such as Cloud Storage. The connection_id can + // have the form `{project}.{location}.{connection_id}` or + // `projects/{project}/locations/{location}/connections/{connection_id}". + string connection_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The fully qualified location prefix of the external folder where + // table data is stored. The '*' wildcard character is not allowed. The URI + // should be in the format `gs://bucket/path_to_table/` + string storage_uri = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The file format the table data is stored in. + FileFormat file_format = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The table format the metadata only snapshots are stored in. + TableFormat table_format = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/clustering.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/clustering.proto.baseline new file mode 100755 index 000000000..b871f41ec --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/clustering.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ClusteringProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configures table clustering. +message Clustering { + // One or more fields on which data should be clustered. Only top-level, + // non-repeated, simple-type fields are supported. The ordering of the + // clustering fields should be prioritized from most to least important + // for filtering purposes. + // + // Additional information on limitations can be found here: + // https://cloud.google.com/bigquery/docs/creating-clustered-tables#limitations + repeated string fields = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline new file mode 100755 index 000000000..e2c6fb67d --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/data_format_options.proto.baseline @@ -0,0 +1,29 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DataFormatOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for data format adjustments. +message DataFormatOptions { + // Optional. Output timestamp as usec int64. Default is false. + bool use_int64_timestamp = 1 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset.proto.baseline new file mode 100755 index 000000000..62968e292 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset.proto.baseline @@ -0,0 +1,625 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_catalog_dataset_options.proto"; +import "google/cloud/bigquery/v2/external_dataset_reference.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Dataset Service. +// +// It should not be relied on for production use cases at this time. +service DatasetService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Returns the dataset specified by datasetID. + rpc GetDataset(GetDatasetRequest) returns (Dataset) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Creates a new empty dataset. + rpc InsertDataset(InsertDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + // This method supports RFC5789 patch semantics. + rpc PatchDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + rpc UpdateDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Deletes the dataset specified by the datasetId value. Before you can delete + // a dataset, you must delete all its tables, either manually or by specifying + // deleteContents. Immediately after deletion, you can create another dataset + // with the same name. + rpc DeleteDataset(DeleteDatasetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Lists all datasets in the specified project to which the user has been + // granted the READER dataset role. + rpc ListDatasets(ListDatasetsRequest) returns (DatasetList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets" + }; + } + + // Undeletes a dataset which is within time travel window based on datasetId. + // If a time is specified, the dataset version deleted at that time is + // undeleted, else the last live version is undeleted. + rpc UndeleteDataset(UndeleteDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}:undelete" + body: "*" + }; + } +} + +// Grants all resources of particular types in a particular dataset read access +// to the current dataset. +// +// Similar to how individually authorized views work, updates to any resource +// granted through its dataset (including creation of new resources) requires +// read permission to referenced resources, plus write permission to the +// authorizing dataset. +message DatasetAccessEntry { + // Indicates the type of resources in a dataset that the entry applies to. + enum TargetType { + // Do not use. You must set a target type explicitly. + TARGET_TYPE_UNSPECIFIED = 0; + + // This entry applies to views in the dataset. + VIEWS = 1; + + // This entry applies to routines in the dataset. + ROUTINES = 2; + } + + // The dataset this entry applies to + DatasetReference dataset = 1; + + // Which resources in the dataset this entry applies to. Currently, only + // views are supported, but additional target types may be added in the + // future. + repeated TargetType target_types = 2; +} + +// An object that defines dataset access for an entity. +message Access { + // An IAM role ID that should be granted to the user, group, + // or domain specified in this access entry. + // The following legacy mappings will be applied: + // + // * `OWNER`: `roles/bigquery.dataOwner` + // * `WRITER`: `roles/bigquery.dataEditor` + // * `READER`: `roles/bigquery.dataViewer` + // + // This field will accept any of the above formats, but will return only + // the legacy format. For example, if you set this field to + // "roles/bigquery.dataOwner", it will be returned back as "OWNER". + string role = 1; + + // [Pick one] An email address of a user to grant access to. For example: + // fred@example.com. Maps to IAM policy member "user:EMAIL" or + // "serviceAccount:EMAIL". + string user_by_email = 2; + + // [Pick one] An email address of a Google Group to grant access to. + // Maps to IAM policy member "group:GROUP". + string group_by_email = 3; + + // [Pick one] A domain to grant access to. Any users signed in with the domain + // specified will be granted the specified access. Example: "example.com". + // Maps to IAM policy member "domain:DOMAIN". + string domain = 4; + + // [Pick one] A special group to grant access to. Possible values include: + // + // * projectOwners: Owners of the enclosing project. + // * projectReaders: Readers of the enclosing project. + // * projectWriters: Writers of the enclosing project. + // * allAuthenticatedUsers: All authenticated BigQuery users. + // + // Maps to similarly-named IAM members. + string special_group = 5; + + // [Pick one] Some other type of member that appears in the IAM Policy but + // isn't a user, group, domain, or special group. + string iam_member = 7; + + // [Pick one] A view from a different dataset to grant access to. Queries + // executed against that view will have read access to views/tables/routines + // in this dataset. + // The role field is not required when this field is set. If that view is + // updated by any user, access to the view needs to be granted again via an + // update operation. + TableReference view = 6; + + // [Pick one] A routine from a different dataset to grant access to. Queries + // executed against that routine will have read access to + // views/tables/routines in this dataset. Only UDF is supported for now. + // The role field is not required when this field is set. If that routine is + // updated by any user, access to the routine needs to be granted again via + // an update operation. + RoutineReference routine = 8; + + // [Pick one] A grant authorizing all resources of a particular type in a + // particular dataset access to this dataset. Only views are supported for + // now. The role field is not required when this field is set. If that dataset + // is deleted and re-created, its access needs to be granted again via an + // update operation. + DatasetAccessEntry dataset = 9; +} + +// Represents a BigQuery dataset. +message Dataset { + // Indicates the billing model that will be applied to the dataset. + enum StorageBillingModel { + // Value not set. + STORAGE_BILLING_MODEL_UNSPECIFIED = 0; + + // Billing for logical bytes. + LOGICAL = 1; + + // Billing for physical bytes. + PHYSICAL = 2; + } + + // Output only. The resource type. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of the resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The fully-qualified unique name of the dataset in the format + // projectId:datasetId. The dataset name without the project name is given in + // the datasetId field. When creating a new dataset, leave this field blank, + // and instead specify the datasetId field. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. You can + // use this URL in Get or Update requests to the resource. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. A reference that identifies the dataset. + DatasetReference dataset_reference = 5 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for the dataset. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of the dataset. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The default lifetime of all tables in the dataset, in + // milliseconds. The minimum lifetime value is 3600000 milliseconds (one + // hour). To clear an existing default expiration with a PATCH request, set to + // 0. Once this property is set, all newly-created tables in the dataset will + // have an expirationTime property set to the creation time plus the value in + // this property, and changing the value will only affect new tables, not + // existing ones. When the expirationTime for a given table is reached, that + // table will be deleted automatically. + // If a table's expirationTime is modified or removed before the table + // expires, or if you provide an explicit expirationTime when creating a + // table, that value takes precedence over the default expiration time + // indicated by this property. + google.protobuf.Int64Value default_table_expiration_ms = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // This default partition expiration, expressed in milliseconds. + // + // When new time-partitioned tables are created in a dataset where this + // property is set, the table will inherit this value, propagated as the + // `TimePartitioning.expirationMs` property on the new table. If you set + // `TimePartitioning.expirationMs` explicitly when creating a table, + // the `defaultPartitionExpirationMs` of the containing dataset is ignored. + // + // When creating a partitioned table, if `defaultPartitionExpirationMs` + // is set, the `defaultTableExpirationMs` value is ignored and the table + // will not be inherit a table expiration deadline. + google.protobuf.Int64Value default_partition_expiration_ms = 14; + + // The labels associated with this dataset. You can use these + // to organize and group your datasets. + // You can set this property when inserting or updating a dataset. + // See [Creating and Updating Dataset + // Labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#creating_and_updating_dataset_labels) + // for more information. + map labels = 9; + + // Optional. An array of objects that define dataset access for one or more + // entities. You can set this property when inserting or updating a dataset in + // order to control who is allowed to access the data. If unspecified at + // dataset creation time, BigQuery adds default dataset access for the + // following entities: access.specialGroup: projectReaders; access.role: + // READER; access.specialGroup: projectWriters; access.role: WRITER; + // access.specialGroup: projectOwners; access.role: OWNER; + // access.userByEmail: [dataset creator email]; access.role: OWNER; + // If you patch a dataset, then this field is overwritten by the patched + // dataset's access field. To add entities, you must supply the entire + // existing access array in addition to any new entities that you want to add. + repeated Access access = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this dataset was created, in milliseconds since + // the epoch. + int64 creation_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The date when this dataset was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The geographic location where the dataset should reside. See + // https://cloud.google.com/bigquery/docs/locations for supported + // locations. + string location = 13; + + // The default encryption key for all tables in the dataset. + // After this property is set, the encryption key of all newly-created tables + // in the dataset is set to this value unless the table creation request or + // query explicitly overrides the key. + EncryptionConfiguration default_encryption_configuration = 16; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzs = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzi = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Same as `type` in `ListFormatDataset`. + // The type of the dataset, one of: + // + // * DEFAULT - only accessible by owner and authorized accounts, + // * PUBLIC - accessible by everyone, + // * LINKED - linked dataset, + // * EXTERNAL - dataset with definition in external metadata catalog. + string type = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The source dataset reference when the dataset is of type LINKED. + // For all other dataset types it is not set. This field cannot be updated + // once it is set. Any attempt to update this field using Update and Patch API + // Operations will be ignored. + LinkedDatasetSource linked_dataset_source = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Metadata about the LinkedDataset. Filled out when the dataset + // type is LINKED. + LinkedDatasetMetadata linked_dataset_metadata = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Reference to a read-only external dataset defined in data + // catalogs outside of BigQuery. Filled out when the dataset type is EXTERNAL. + ExternalDatasetReference external_dataset_reference = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Options defining open source compatible datasets living in the + // BigQuery catalog. Contains metadata of open source database, schema or + // namespace represented by the current dataset. + ExternalCatalogDatasetOptions external_catalog_dataset_options = 32 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. TRUE if the dataset and its table names are case-insensitive, + // otherwise FALSE. By default, this is FALSE, which means the dataset and its + // table names are case-sensitive. This field does not affect routine + // references. + google.protobuf.BoolValue is_case_insensitive = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default collation specification of future tables + // created in the dataset. If a table is created in this dataset without + // table-level default collation, then the table inherits the dataset default + // collation, which is applied to the string fields that do not have explicit + // collation specified. A change to this field affects only tables created + // afterwards, and does not alter the existing tables. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new tables + // created within this dataset. During table creation, if this field is + // specified, the table within this dataset will inherit the default rounding + // mode of the dataset. Setting the default rounding mode on a table overrides + // this option. Existing tables in the dataset are unaffected. + // If columns are defined during that table creation, + // they will immediately inherit the table's default rounding mode, + // unless otherwise specified. + TableFieldSchema.RoundingMode default_rounding_mode = 26 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the time travel window in hours. The value can be from 48 + // to 168 hours (2 to 7 days). The default value is 168 hours if this is not + // set. + google.protobuf.Int64Value max_time_travel_hours = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Tags for the dataset. To provide tags as inputs, use the + // `resourceTags` field. + repeated GcpTag tags = 24 + [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Updates storage_billing_model for the dataset. + StorageBillingModel storage_billing_model = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for all tables and dataset. If + // set, restrict certain accesses on the dataset and all its tables based on + // the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 27 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this dataset. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 30 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A global tag managed by Resource Manager. +// https://cloud.google.com/iam/docs/tags-access-control#definitions +message GcpTag { + // Required. The namespaced friendly name of the tag key, e.g. + // "12345/environment" where 12345 is org id. + string tag_key = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The friendly short name of the tag value, e.g. "production". + string tag_value = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A dataset source type which refers to another BigQuery dataset. +message LinkedDatasetSource { + // The source dataset reference contains project numbers and not project ids. + DatasetReference source_dataset = 1; +} + +// Metadata about the Linked Dataset. +message LinkedDatasetMetadata { + // Specifies whether Linked Dataset is currently in a linked state or not. + enum LinkState { + // The default value. + // Default to the LINKED state. + LINK_STATE_UNSPECIFIED = 0; + + // Normal Linked Dataset state. Data is queryable via the Linked Dataset. + LINKED = 1; + + // Data publisher or owner has unlinked this Linked Dataset. It means you + // can no longer query or see the data in the Linked Dataset. + UNLINKED = 2; + } + + // Output only. Specifies whether Linked Dataset is currently in a linked + // state or not. + LinkState link_state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a dataset. +message GetDatasetRequest { + // DatasetView specifies which dataset information is returned. + enum DatasetView { + // The default value. + // Default to the FULL view. + DATASET_VIEW_UNSPECIFIED = 0; + + // Includes metadata information for the dataset, such as location, + // etag, lastModifiedTime, etc. + METADATA = 1; + + // Includes ACL information for the dataset, which defines dataset access + // for one or more entities. + ACL = 2; + + // Includes both dataset metadata and ACL information. + FULL = 3; + } + + // Required. Project ID of the requested dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested dataset + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the view that determines which dataset information is + // returned. By default, metadata and ACL information are returned. + DatasetView dataset_view = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting a dataset. +message InsertDatasetRequest { + // Required. Project ID of the new dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource to use for the new dataset + Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Message for updating or patching a dataset. +message UpdateOrPatchDatasetRequest { + // Required. Project ID of the dataset being updated + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the dataset being updated + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource which will replace or patch the specified + // dataset. + Dataset dataset = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting a dataset. +message DeleteDatasetRequest { + // Required. Project ID of the dataset being deleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // If True, delete all the tables in the dataset. + // If False and the dataset contains tables, the request will fail. + // Default is False + bool delete_contents = 3; +} + +message ListDatasetsRequest { + // Required. Project ID of the datasets to be listed + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 2; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 3; + + // Whether to list all datasets, including hidden ones + bool all = 4; + + // An expression for filtering the results of the request by label. + // The syntax is `labels.[:]`. + // Multiple filters can be ANDed together by connecting with a space. + // Example: `labels.department:receiving labels.active`. + // See [Filtering datasets using + // labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + // for details. + string filter = 5; +} + +// A dataset resource with only a subset of fields, to be returned in a list of +// datasets. +message ListFormatDataset { + // The resource type. + // This property always returns the value "bigquery#dataset" + string kind = 1; + + // The fully-qualified, unique, opaque ID of the dataset. + string id = 2; + + // The dataset reference. + // Use this property to access specific parts of the dataset's ID, such as + // project ID or dataset ID. + DatasetReference dataset_reference = 3; + + // The labels associated with this dataset. + // You can use these to organize and group your datasets. + map labels = 4; + + // An alternate name for the dataset. The friendly name is purely + // decorative in nature. + google.protobuf.StringValue friendly_name = 5; + + // The geographic location where the dataset resides. + string location = 6; +} + +// Response format for a page of results when listing datasets. +message DatasetList { + // Output only. The resource type. + // This property always returns the value "bigquery#datasetList" + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash value of the results page. You can use this property to + // determine if the page has changed since the last request. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // A token that can be used to request the next results page. This property is + // omitted on the final results page. + string next_page_token = 3; + + // An array of the dataset resources in the project. + // Each resource contains basic information. + // For full information about a particular dataset resource, use the Datasets: + // get method. This property is omitted when there are no datasets in the + // project. + repeated ListFormatDataset datasets = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request format for undeleting a dataset. +message UndeleteDatasetRequest { + // Required. Project ID of the dataset to be undeleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The exact time when the dataset was deleted. If not specified, + // the most recently deleted version is undeleted. Undeleting a dataset + // using deletion time is not supported. + google.protobuf.Timestamp deletion_time = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline new file mode 100755 index 000000000..03695a4c4 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/dataset_reference.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Identifier for a dataset. +message DatasetReference { + // Required. A unique ID for this dataset, without the project name. The ID + // must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + string dataset_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID of the project containing this dataset. + string project_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline new file mode 100755 index 000000000..72266b110 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/decimal_target_types.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "DecimalTargetTypesProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data types that could be used as a target type when converting decimal +// values. +enum DecimalTargetType { + // Invalid type. + DECIMAL_TARGET_TYPE_UNSPECIFIED = 0; + + // Decimal values could be converted to NUMERIC + // type. + NUMERIC = 1; + + // Decimal values could be converted to BIGNUMERIC + // type. + BIGNUMERIC = 2; + + // Decimal values could be converted to STRING type. + STRING = 3; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline new file mode 100755 index 000000000..ac7ee1679 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/encryption_config.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "EncryptionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for Cloud KMS encryption settings. +message EncryptionConfiguration { + // Optional. Describes the Cloud KMS encryption key that will be used to + // protect destination BigQuery table. The BigQuery Service Account associated + // with your project requires access to this encryption key. + google.protobuf.StringValue kms_key_name = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/error.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/error.proto.baseline new file mode 100755 index 000000000..9cab21c4f --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/error.proto.baseline @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Error details. +message ErrorProto { + // A short error code that summarizes the error. + string reason = 1; + + // Specifies where the error occurred, if present. + string location = 2; + + // Debugging information. This property is internal to Google and should not + // be used. + string debug_info = 3; + + // A human-readable description of the error. + string message = 4; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline new file mode 100755 index 000000000..70d0f1f40 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto.baseline @@ -0,0 +1,39 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogDatasetOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options defining open source compatible datasets living in the BigQuery +// catalog. Contains metadata of open source database, schema +// or namespace represented by the current dataset. +message ExternalCatalogDatasetOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source schema. Maximum size of 2Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The storage location URI for all tables in the dataset. + // Equivalent to hive metastore's database locationUri. Maximum length of 1024 + // characters. + string default_storage_location_uri = 2 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline new file mode 100755 index 000000000..b0833d441 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto.baseline @@ -0,0 +1,87 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogTableOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Metadata about open source compatible table. The fields contained in +// these options correspond to hive metastore's table level properties. +message ExternalCatalogTableOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source table. Corresponds with hive meta store table + // parameters. Maximum size of 4Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A storage descriptor containing information about the physical + // storage of this table. + StorageDescriptor storage_descriptor = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The connection + // is needed to read the open source table from BigQuery Engine. The + // connection_id can have the form + // `..` or + // `projects//locations//connections/`. + string connection_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Contains information about how a table's data is stored and accessed by open +// source query engines. +message StorageDescriptor { + // Optional. The physical location of the table + // (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). + // The maximum length is 2056 bytes. + string location_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the InputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + // The maximum length is 128 characters. + string input_format = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the OutputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + // The maximum length is 128 characters. + string output_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serializer and deserializer information. + SerDeInfo serde_info = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Serializer and deserializer information. +message SerDeInfo { + // Optional. Name of the SerDe. + // The maximum length is 256 characters. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies a fully-qualified class name of the serialization + // library that is responsible for the translation of data between table + // representation and the underlying low-level input and output format + // structures. The maximum length is 256 characters. + string serialization_library = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Key-value pairs that define the initialization parameters for the + // serialization library. + // Maximum size 10 Kib. + map parameters = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline new file mode 100755 index 000000000..610af982a --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_data_config.proto.baseline @@ -0,0 +1,499 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/map_target_type.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ExternalDataConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for external data sources. +message AvroOptions { + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Parquet Options for load and make external tables. +message ParquetOptions { + // Optional. Indicates whether to infer Parquet ENUM logical type as STRING + // instead of BYTES by default. + google.protobuf.BoolValue enum_as_string = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates whether to use schema inference specifically for + // Parquet LIST logical type. + google.protobuf.BoolValue enable_list_inference = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates how to represent a Parquet map if present. + MapTargetType map_target_type = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a CSV data source. +message CsvOptions { + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when reading the data. The default value is 0. This property is + // useful if you have header rows in the file that should be skipped. + // When autodetect is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ("). + // If your data does not contain quoted sections, + // set the property value to an empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + google.protobuf.StringValue quote = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow quoted data sections that + // contain newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should accept rows that are missing + // trailing optional columns. If true, BigQuery treats missing trailing + // columns as null values. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. The default value is false. + google.protobuf.BoolValue allow_jagged_rows = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + // BigQuery decodes the data after the raw, binary data has been split using + // the values of the quote and fieldDelimiter properties. + string encoding = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if the embedded ASCII control characters (the first 32 + // characters in the ASCII-table, from '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when querying a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Json Options for load and make external tables. +message JsonOptions { + // Optional. The character encoding of the data. + // The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + string encoding = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column. +message BigtableColumn { + // [Required] Qualifier of the column. + // Columns in the parent column family that has this exact qualifier are + // exposed as `.` field. + // If the qualifier is valid UTF-8 string, it can be specified in the + // qualifier_string field. Otherwise, a base-64 encoded value must be set to + // qualifier_encoded. + // The column field name is the same as the column qualifier. However, if the + // qualifier is not a valid BigQuery field identifier i.e. does not match + // [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name. + google.protobuf.BytesValue qualifier_encoded = 1; + + // Qualifier string. + google.protobuf.StringValue qualifier_string = 2; + + // Optional. If the qualifier is not a valid BigQuery field identifier i.e. + // does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided + // as the column field name and is used as field name in queries. + string field_name = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type to convert the value in cells of this column. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // 'type' can also be set at the column family level. However, the setting at + // this level takes precedence if 'type' is set at both levels. + string type = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // 'encoding' can also be set at the column family level. However, the setting + // at this level takes precedence if 'encoding' is set at both levels. + string encoding = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set, only the latest version of value in this column + // are exposed. + // 'onlyReadLatest' can also be set at the column family level. However, the + // setting at this level takes precedence if 'onlyReadLatest' is set at both + // levels. + google.protobuf.BoolValue only_read_latest = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column family. +message BigtableColumnFamily { + // Identifier of the column family. + string family_id = 1; + + // Optional. The type to convert the value in cells of this column family. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a type for it. + string type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying an encoding for it. + string encoding = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lists of columns that should be exposed as individual fields as + // opposed to a list of (column name, value) pairs. + // All columns whose qualifier matches a qualifier in this list can be + // accessed as `.`. + // Other columns can be accessed as a list through + // the `.Column` field. + repeated BigtableColumn columns = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set only the latest version of value are exposed for + // all columns in this column family. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a different setting + // for that column. + google.protobuf.BoolValue only_read_latest = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Cloud Bigtable data sources. +message BigtableOptions { + // Optional. List of column families to expose in the table schema along with + // their types. + // This list restricts the column families that can be referenced in queries + // and specifies their value types. + // You can use this list to do type conversions - see the 'type' field for + // more details. + // If you leave this list empty, all column families are present in the table + // schema and their values are read as BYTES. + // During a query only the column families referenced in that query are read + // from Bigtable. + repeated BigtableColumnFamily column_families = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the column families that are not + // specified in columnFamilies list are not exposed in the table schema. + // Otherwise, they are read with BYTES type values. + // The default value is false. + google.protobuf.BoolValue ignore_unspecified_column_families = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the rowkey column families will be read + // and converted to string. Otherwise they are read with BYTES type values and + // users need to manually cast them with CAST if necessary. + // The default value is false. + google.protobuf.BoolValue read_rowkey_as_string = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then each column family will be read as a + // single JSON column. Otherwise they are read as a repeated cell structure + // containing timestamp/value tuples. The default value is false. + google.protobuf.BoolValue output_column_families_as_json = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Sheets data sources. +message GoogleSheetsOptions { + // Optional. The number of rows at the top of a sheet that BigQuery will skip + // when reading the data. The default value is 0. This property is useful if + // you have header rows that should be skipped. When autodetect is on, + // the behavior is the following: + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Range of a sheet to query from. Only used when non-empty. + // Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id + // For example: sheet1!A1:B20 + string range = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +message ExternalDataConfiguration { + // Supported Object Metadata Types. + enum ObjectMetadata { + // Unspecified by default. + OBJECT_METADATA_UNSPECIFIED = 0; + + // A synonym for `SIMPLE`. + DIRECTORY = 1; + + // Directory listing of objects. + SIMPLE = 2; + } + + // MetadataCacheMode identifies if the table should use metadata caching for + // files from external source (eg Google Cloud Storage). + enum MetadataCacheMode { + // Unspecified metadata cache mode. + METADATA_CACHE_MODE_UNSPECIFIED = 0; + + // Set this mode to trigger automatic background refresh of metadata cache + // from the external source. Queries will use the latest available cache + // version within the table's maxStaleness interval. + AUTOMATIC = 1; + + // Set this mode to enable triggering manual refresh of the metadata cache + // from external source. Queries will use the latest manually triggered + // cache version within the table's maxStaleness interval. + MANUAL = 2; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. + // Size limits related to load jobs apply to external data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups, exactly one URI can be specified. Also, + // the '*' wildcard character is not allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default source URIs are expanded against the + // underlying storage. Other options include specifying manifest files. Only + // applicable to object storage systems. + FileSetSpecType file_set_spec_type = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the data. + // Schema is required for CSV and JSON formats if autodetect is not on. + // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, + // Avro, ORC and Parquet formats. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The data format. + // For CSV files, specify "CSV". + // For Google sheets, specify "GOOGLE_SHEETS". + // For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". + // For Avro files, specify "AVRO". + // For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". + // For Apache Iceberg tables, specify "ICEBERG". + // For ORC files, specify "ORC". + // For Parquet files, specify "PARQUET". + // [Beta] For Google Cloud Bigtable, specify "BIGTABLE". + string source_format = 3; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // reading data. If the number of bad records exceeds this value, an invalid + // error is returned in the job result. The default value is 0, which requires + // that all records are valid. This setting is ignored for Google Cloud + // Bigtable, Google Cloud Datastore backups, Avro, ORC and Parquet formats. + google.protobuf.Int32Value max_bad_records = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Try to detect schema and format options automatically. + // Any option specified explicitly will be honored. + google.protobuf.BoolValue autodetect = 5; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. + // The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names + // Google Cloud Bigtable: This setting is ignored. + // Google Cloud Datastore backups: This setting is ignored. + // Avro: This setting is ignored. + // ORC: This setting is ignored. + // Parquet: This setting is ignored. + google.protobuf.BoolValue ignore_unknown_values = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type of the data source. + // Possible values include GZIP and NONE. The default value is NONE. + // This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore + // backups, Avro, ORC and Parquet + // formats. An empty string is an invalid value. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to CSV. + CsvOptions csv_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to JSON. + JsonOptions json_options = 26 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to BIGTABLE. + BigtableOptions bigtable_options = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to GOOGLE_SHEETS. + GoogleSheetsOptions google_sheets_options = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. Not all storage + // formats support hive partitioning -- requesting hive partitioning on an + // unsupported format will lead to an error, as will providing an invalid + // specification. + HivePartitioningOptions hive_partitioning_options = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The + // connection_id can have the form + // `{project_id}.{location_id};{connection_id}` or + // `projects/{project_id}/locations/{location_id}/connections/{connection_id}`. + string connection_id = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 16; + + // Optional. Additional properties to set if sourceFormat is set to AVRO. + AvroOptions avro_options = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. ObjectMetadata is used to create Object Tables. Object Tables + // contain a listing of objects (with their metadata) found at the + // source_uris. If ObjectMetadata is set, source_format should be omitted. + // + // Currently SIMPLE is the only supported Object Metadata type. + optional ObjectMetadata object_metadata = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When creating an external table, the user can provide a reference + // file with the table schema. This is enabled for the following formats: + // AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Metadata Cache Mode for the table. Set this to enable caching of + // metadata from external data source. + MetadataCacheMode metadata_cache_mode = 24 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline new file mode 100755 index 000000000..8d3a3b4c9 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/external_dataset_reference.proto.baseline @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalDatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "bigqueryconnection.googleapis.com/Connection" + pattern: "projects/{project}/locations/{location}/connections/{connection}" +}; + +// Configures the access a dataset defined in an external metadata storage. +message ExternalDatasetReference { + // Required. External source that backs this dataset. + string external_source = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The connection id that is used to access the external_source. + // + // Format: + // projects/{project_id}/locations/{location_id}/connections/{connection_id} + string connection = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigqueryconnection.googleapis.com/Connection" + } + ]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline new file mode 100755 index 000000000..1068d20eb --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/file_set_specification_type.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "FileSetSpecificationTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This enum defines how to interpret source URIs for load jobs and external +// tables. +enum FileSetSpecType { + // This option expands source URIs by listing files from the object store. It + // is the default behavior if FileSetSpecType is not set. + FILE_SET_SPEC_TYPE_FILE_SYSTEM_MATCH = 0; + + // This option indicates that the provided URIs are newline-delimited manifest + // files, with one URI per line. Wildcard URIs are not supported. + FILE_SET_SPEC_TYPE_NEW_LINE_DELIMITED_MANIFEST = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline new file mode 100755 index 000000000..76872bd1a --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/hive_partitioning.proto.baseline @@ -0,0 +1,86 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "HivePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for configuring hive partitioning detect. +message HivePartitioningOptions { + // Optional. When set, what mode of hive partitioning to use when reading + // data. The following modes are supported: + // + // * AUTO: automatically infer partition key name(s) and type(s). + // + // * STRINGS: automatically infer partition key name(s). All types are + // strings. + // + // * CUSTOM: partition key schema is encoded in the source URI prefix. + // + // Not all storage formats support hive partitioning. Requesting hive + // partitioning on an unsupported format will lead to an error. + // Currently supported formats are: JSON, CSV, ORC, Avro and Parquet. + string mode = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When hive partition detection is requested, a common prefix for + // all source uris must be required. The prefix must end immediately before + // the partition key encoding begins. For example, consider files following + // this data layout: + // + // gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro + // + // gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro + // + // When hive partitioning is requested with either AUTO or STRINGS detection, + // the common prefix can be either of gs://bucket/path_to_table or + // gs://bucket/path_to_table/. + // + // CUSTOM detection requires encoding the partitioning schema immediately + // after the common prefix. For CUSTOM, any of + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:STRING}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:STRING} + // + // would all be valid source URI prefixes. + string source_uri_prefix = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, queries over this table require a partition + // filter that can be used for partition elimination to be specified. + // + // Note that this field should only be true when creating a permanent + // external table or querying a temporary external table. + // + // Hive-partitioned loads with require_partition_filter explicitly set to + // true will fail. + google.protobuf.BoolValue require_partition_filter = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. For permanent external tables, this field is populated with + // the hive partition keys in the order they were inferred. The types of the + // partition keys can be deduced by checking the table schema (which will + // include the partition keys). Not every API will populate this field in the + // output. For example, Tables.Get will populate it, but Tables.List will not + // contain this field. + repeated string fields = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job.proto.baseline new file mode 100755 index 000000000..b15e1fb42 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job.proto.baseline @@ -0,0 +1,738 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/data_format_options.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/job_config.proto"; +import "google/cloud/bigquery/v2/job_creation_reason.proto"; +import "google/cloud/bigquery/v2/job_reference.proto"; +import "google/cloud/bigquery/v2/job_stats.proto"; +import "google/cloud/bigquery/v2/job_status.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Job Service. +// +// It should not be relied on for production use cases at this time. +service JobService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/devstorage.full_control," + "https://www.googleapis.com/auth/devstorage.read_only," + "https://www.googleapis.com/auth/devstorage.read_write"; + + // Requests that a job be cancelled. This call will return immediately, and + // the client will need to poll for the job status to see if the cancel + // completed successfully. Cancelled jobs may still incur costs. + rpc CancelJob(CancelJobRequest) returns (JobCancelResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/cancel" + }; + } + + // Returns information about a specific job. Job information is available for + // a six month period after creation. Requires that you're the person who ran + // the job, or have the Is Owner project role. + rpc GetJob(GetJobRequest) returns (Job) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}" + }; + } + + // Starts a new asynchronous job. + // + // This API has two different kinds of endpoint URIs, as this method supports + // a variety of use cases. + // + // * The *Metadata* URI is used for most interactions, as it accepts the job + // configuration directly. + // * The *Upload* URI is ONLY for the case when you're sending both a load job + // configuration and a data stream together. In this case, the Upload URI + // accepts the job configuration and the data as two distinct multipart MIME + // parts. + rpc InsertJob(InsertJobRequest) returns (Job) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs" + body: "job" + }; + } + + // Requests the deletion of the metadata of a job. This call returns when the + // job's metadata is deleted. + rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/delete" + }; + } + + // Lists all jobs that you started in the specified project. Job information + // is available for a six month period after creation. The job list is sorted + // in reverse chronological order, by job creation time. Requires the Can View + // project role, or the Is Owner project role if you set the allUsers + // property. + rpc ListJobs(ListJobsRequest) returns (JobList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs" + }; + } + + // RPC to get the results of a query job. + rpc GetQueryResults(GetQueryResultsRequest) + returns (GetQueryResultsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/queries/{job_id=*}" + }; + } + + // Runs a BigQuery SQL query synchronously and returns query results if the + // query completes within a specified timeout. + rpc Query(PostQueryRequest) returns (QueryResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/queries" + body: "query_request" + }; + } +} + +message Job { + // Output only. The type of the resource. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Opaque ID field of the job. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Email address of the user who ran the job. + string user_email = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 6 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Reference describing the unique-per-user name of the job. + JobReference job_reference = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The status of this job. Examine this value when polling an + // asynchronous job to see if the job is complete. + JobStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Full-projection-only] String representation of identity of + // requesting party. Populated for both first- and third-party identities. + // Only present for APIs that support third-party identities. + string principal_subject = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The reason why a Job was created. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Describes format of a jobs cancellation request. +message CancelJobRequest { + // Required. Project ID of the job to cancel + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job to cancel + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a jobs cancellation response. +message JobCancelResponse { + // The resource type of the response. + string kind = 1; + + // The final state of the job. + Job job = 2; +} + +// Describes format of a jobs get request. +message GetJobRequest { + // Required. Project ID of the requested job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the requested job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a job insertion request. +message InsertJobRequest { + // Project ID of project that will be billed for the job. + string project_id = 1; + + // Jobs resource to insert. + Job job = 3; +} + +// Describes the format of a jobs deletion request. +message DeleteJobRequest { + // Required. Project ID of the job for which metadata is to be deleted. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job for which metadata is to be deleted. If this is + // a parent job which has child jobs, the metadata from all child jobs will be + // deleted as well. Direct deletion of the metadata of child jobs is not + // allowed. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. Required. + // See details at: + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes the format of the list jobs request. +message ListJobsRequest { + // Projection is used to control what job information is returned. + enum Projection { + option allow_alias = true; + + // Does not include the job configuration + minimal = 0; + + // Does not include the job configuration + MINIMAL = 0; + + // Includes all job data + full = 1; + + // Includes all job data + FULL = 1; + } + + // StateFilter allows filtration by job execution state. + enum StateFilter { + option allow_alias = true; + + // Finished jobs + done = 0; + + // Finished jobs + DONE = 0; + + // Pending jobs + pending = 1; + + // Pending jobs + PENDING = 1; + + // Running jobs + running = 2; + + // Running jobs. + RUNNING = 2; + } + + // Project ID of the jobs to list. + string project_id = 1; + + // Whether to display jobs owned by all users in the project. Default False. + bool all_users = 2; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.Int32Value max_results = 3; + + // Min value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created after or at this timestamp are returned. + uint64 min_creation_time = 4; + + // Max value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created before or at this timestamp are returned. + google.protobuf.UInt64Value max_creation_time = 5; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 6; + + // Restrict information returned to a set of selected fields + Projection projection = 7; + + // Filter for job state + repeated StateFilter state_filter = 8; + + // If set, show only child jobs of the specified parent. Otherwise, show all + // top-level jobs. + string parent_job_id = 9; +} + +// ListFormatJob is a partial projection of job information returned as part +// of a jobs.list response. +message ListFormatJob { + // Unique opaque ID of the job. + string id = 1; + + // The resource type. + string kind = 2; + + // Unique opaque ID of the job. + JobReference job_reference = 3; + + // Running state of the job. When the state is DONE, errorResult can be + // checked to determine whether the job succeeded or failed. + string state = 4; + + // A result object that will be present only if the job has failed. + ErrorProto error_result = 5; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 7 [(google.api.field_behavior) = REQUIRED]; + + // [Full-projection-only] Describes the status of this job. + JobStatus status = 8; + + // [Full-projection-only] Email address of the user who ran the job. + string user_email = 9; + + // [Full-projection-only] String representation of identity of requesting + // party. Populated for both first- and third-party identities. Only present + // for APIs that support third-party identities. + string principal_subject = 10; +} + +// JobList is the response format for a jobs.list call. +message JobList { + // A hash of this page of results. + string etag = 1; + + // The resource type of the response. + string kind = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // List of jobs that were requested. + repeated ListFormatJob jobs = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request object of GetQueryResults. +message GetQueryResultsRequest { + // Required. Project ID of the query job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the query job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Zero-based index of the starting row. + google.protobuf.UInt64Value start_index = 3; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // Maximum number of results to read. + google.protobuf.UInt32Value max_results = 5; + + // Optional: Specifies the maximum amount of time, in milliseconds, that the + // client is willing to wait for the query to complete. By default, this limit + // is 10 seconds (10,000 milliseconds). If the query is complete, the + // jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 7; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response object of GetQueryResults. +message GetQueryResultsResponse { + // The resource type of the response. + string kind = 1; + + // A hash of this response. + string etag = 2; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 3; + + // Reference to the BigQuery Job that was created to run the query. This field + // will be present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + JobReference job_reference = 4; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. Present only + // when the query completes successfully. + google.protobuf.UInt64Value total_rows = 5; + + // A token used for paging results. When this token is non-empty, it + // indicates additional results are available. + string page_token = 6; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. Present only + // when the query completes successfully. + // + // The REST-based representation of this data leverages a series of + // JSON f,v objects for indicating fields and values. + repeated google.protobuf.Struct rows = 7; + + // The total number of bytes processed for this query. + google.protobuf.Int64Value total_bytes_processed = 8; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 9; + + // Output only. The first errors or warnings encountered during the running + // of the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 11; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for the query request. +message PostQueryRequest { + // Required. Project ID of the query request. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query request body. + QueryRequest query_request = 2; +} + +// Describes the format of the jobs.query request. +message QueryRequest { + // Job Creation Mode provides different options on job creation. + enum JobCreationMode { + // If unspecified JOB_CREATION_REQUIRED is the default. + JOB_CREATION_MODE_UNSPECIFIED = 0; + + // Default. Job creation is always required. + JOB_CREATION_REQUIRED = 1; + + // Job creation is optional. Returning immediate results is prioritized. + // BigQuery will automatically determine if a Job needs to be created. + // The conditions under which BigQuery can decide to not create a Job are + // subject to change. If Job creation is required, JOB_CREATION_REQUIRED + // mode should be used, which is the default. + JOB_CREATION_OPTIONAL = 2; + } + + // The resource type of the request. + string kind = 2; + + // Required. A query string to execute, using Google Standard SQL or legacy + // SQL syntax. Example: "SELECT COUNT(f1) FROM + // myProjectId.myDatasetId.myTableId". + string query = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The maximum number of rows of data to return per page of + // results. Setting this flag to a small value such as 1000 and then paging + // through results might improve reliability when the query result set is + // large. In addition to this limit, responses are also limited to 10 MB. By + // default, there is no maximum row count, and only the byte limit applies. + google.protobuf.UInt32Value max_results = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default datasetId and projectId to assume for any + // unqualified table names in the query. If not set, all table names in the + // query string must be qualified in the format 'datasetId.tableId'. + DatasetReference default_dataset = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional: Specifies the maximum amount of time, in milliseconds, + // that the client is willing to wait for the query to complete. By default, + // this limit is 10 seconds (10,000 milliseconds). If the query is complete, + // the jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, BigQuery doesn't run the job. Instead, if the + // query is valid, BigQuery returns statistics about the job such as how many + // bytes would be processed. If the query is invalid, an error returns. The + // default value is false. + bool dry_run = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. The default value is true. + google.protobuf.BoolValue use_query_cache = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Specifies whether to use BigQuery's legacy SQL dialect for this query. The + // default value is true. If set to false, the query will use BigQuery's + // GoogleSQL: https://cloud.google.com/bigquery/sql-reference/ When + // useLegacySql is set to false, the value of flattenResults is ignored; query + // will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 10; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 11; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 12; + + // The geographic location where the job should run. See details at + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 13; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 16 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this query. + // Labels can be used to organize and group query jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label keys must start with a letter + // and each label in the list must have a different key. + map labels = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Limits the bytes billed for this query. Queries with + // bytes billed above this limit will fail (without incurring a charge). + // If unspecified, the project default is used. + google.protobuf.Int64Value maximum_bytes_billed = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique user provided identifier to ensure idempotent behavior + // for queries. Note that this is different from the job_id. It has the + // following properties: + // + // 1. It is case-sensitive, limited to up to 36 ASCII characters. A UUID is + // recommended. + // + // 2. Read only queries can ignore this token since they are nullipotent by + // definition. + // + // 3. For the purposes of idempotency ensured by the request_id, a request + // is considered duplicate of another only if they have the same request_id + // and are actually duplicates. When determining whether a request is a + // duplicate of another request, all parameters in the request that + // may affect the result are considered. For example, query, + // connection_properties, query_parameters, use_legacy_sql are parameters + // that affect the result and are considered when determining whether a + // request is a duplicate, but properties like timeout_ms don't + // affect the result and are thus not considered. Dry run query + // requests are never considered duplicate of another request. + // + // 4. When a duplicate mutating query request is detected, it returns: + // a. the results of the mutation if it completes successfully within + // the timeout. + // b. the running operation if it is still in progress at the end of the + // timeout. + // + // 5. Its lifetime is limited to 15 minutes. In other words, if two + // requests are sent with the same request_id, but more than 15 minutes + // apart, idempotency is not guaranteed. + string request_id = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, creates a new session using a randomly generated + // session_id. If false, runs query with an existing session_id passed in + // ConnectionProperty, otherwise runs query in non-session mode. + // + // The session location will be set to QueryRequest.location if it is present, + // otherwise it's set to the default location based on existing routing logic. + google.protobuf.BoolValue create_session = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, jobs are always required. + // + // If set, the query request will follow the behavior described + // JobCreationMode. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationMode job_creation_mode = 22 + [(google.api.field_behavior) = OPTIONAL]; +} + +message QueryResponse { + // The resource type. + string kind = 1; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 2; + + // Reference to the Job that was created to run the query. This field will be + // present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + // + // If job_creation_mode was set to `JOB_CREATION_OPTIONAL` and the query + // completes without creating a job, this field will be empty. + JobReference job_reference = 3; + + // Optional. The reason why a Job was created. + // + // Only relevant when a job_reference is present in the response. + // If job_reference is not present it will always be unset. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Auto-generated ID for the query. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + string query_id = 14; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. + google.protobuf.UInt64Value total_rows = 4; + + // A token used for paging results. A non-empty token indicates that + // additional results are available. To see additional results, + // query the + // [`jobs.getQueryResults`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults) + // method. For more information, see [Paging through table + // data](https://cloud.google.com/bigquery/docs/paging-results). + string page_token = 5; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. + repeated google.protobuf.Struct rows = 6; + + // The total number of bytes processed for this query. If this query was a dry + // run, this is the number of bytes that would be processed if the query were + // run. + google.protobuf.Int64Value total_bytes_processed = 7; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 8; + + // Output only. The first errors or warnings encountered during the running of + // the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 10; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_config.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_config.proto.baseline new file mode 100755 index 000000000..9f42488c8 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_config.proto.baseline @@ -0,0 +1,814 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/system_variable.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Properties for the destination table. +message DestinationTableProperties { + // Optional. Friendly name for the destination table. If the table already + // exists, it should be same as the existing friendly name. + google.protobuf.StringValue friendly_name = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The description for the destination table. + // This will only be used if the destination table is newly created. + // If the table already exists and a value different than the current + // description is provided, the job will fail. + google.protobuf.StringValue description = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this table. You can use these to + // organize and group your tables. This will only be used if the destination + // table is newly created. If the table already exists and labels are + // different than the current labels are provided, the job will fail. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A connection-level property to customize query behavior. Under JDBC, these +// correspond directly to connection properties passed to the DriverManager. +// Under ODBC, these correspond to properties in the connection string. +// +// Currently supported connection properties: +// +// * **dataset_project_id**: represents the default project for datasets that +// are used in the query. Setting the +// system variable `@@dataset_project_id` achieves the same behavior. For +// more information about system variables, see: +// https://cloud.google.com/bigquery/docs/reference/system-variables +// +// * **time_zone**: represents the default timezone used to run the query. +// +// * **session_id**: associates the query with a given session. +// +// * **query_label**: associates the query with a given job label. If set, +// all subsequent queries in a script or session will have this label. For the +// format in which a you can specify a query label, see labels +// in the JobConfiguration resource type: +// https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration +// +// * **service_account**: indicates the service account to use to run a +// continuous query. If set, the query job uses the service account to access +// Google Cloud resources. Service account access is bounded by the IAM +// permissions that you have granted to the service account. +// +// Additional properties are allowed, but ignored. Specifying multiple +// connection properties with the same key returns an error. +message ConnectionProperty { + // The key of the property to set. + string key = 1; + + // The value of the property to set. + string value = 2; +} + +// JobConfigurationQuery configures a BigQuery query job. +message JobConfigurationQuery { + // [Required] SQL query text to execute. The useLegacySql field can be used + // to indicate whether the query uses legacy SQL or GoogleSQL. + string query = 1; + + // Optional. Describes the table where the query results should be stored. + // This property must be set for large results that exceed the maximum + // response size. For queries that produce anonymous (cached) results, this + // field will be populated by BigQuery. + TableReference destination_table = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. You can specify external table definitions, which operate as + // ephemeral tables that can be queried. These definitions are configured + // using a JSON map, where the string key represents the table identifier, and + // the value is the corresponding external data configuration object. + map external_table_definitions = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 4; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints, and uses the schema from the query result. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default dataset to use for unqualified + // table names in the query. This setting does not alter behavior of + // unqualified dataset names. Setting the system variable + // `@@dataset_id` achieves the same behavior. See + // https://cloud.google.com/bigquery/docs/reference/system-variables for more + // information on system variables. + DatasetReference default_dataset = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a priority for the query. Possible values include + // INTERACTIVE and BATCH. The default value is INTERACTIVE. + string priority = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, allows the query + // to produce arbitrarily large result tables at a slight cost in performance. + // Requires destinationTable to be set. + // For GoogleSQL queries, this flag is ignored and large results are + // always allowed. However, you must still set destinationTable when result + // size exceeds the allowed maximum response size. + google.protobuf.BoolValue allow_large_results = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. Moreover, the query cache is only available when a + // query does not have a destination table specified. The default value is + // true. + google.protobuf.BoolValue use_query_cache = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, flattens all nested + // and repeated fields in the query results. + // allowLargeResults must be true if this is set to false. + // For GoogleSQL queries, this flag is ignored and results are never + // flattened. + google.protobuf.BoolValue flatten_results = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Limits the bytes billed for this job. Queries that will have + // bytes billed beyond this limit will fail (without incurring a charge). + // If unspecified, this will be set to your project default. + google.protobuf.Int64Value maximum_bytes_billed = 14; + + // Optional. Specifies whether to use BigQuery's legacy SQL dialect for this + // query. The default value is true. If set to false, the query will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // When useLegacySql is set to false, the value of flattenResults is ignored; + // query will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 16; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 17; + + // Output only. System variables for GoogleSQL queries. A system variable is + // output if the variable is settable and its value differs from the system + // default. + // "@@" prefix is not included in the name of the System variables. + optional SystemVariables system_variables = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Allows the schema of the destination table to be updated as a side effect + // of the query job. Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 18; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 19; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 22; + + // Clustering specification for the destination table. + Clustering clustering = 20; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 21; + + // Options controlling the execution of scripts. + ScriptOptions script_options = 24; + + // Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 33; + + // If this property is true, the job creates a new session using a randomly + // generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 34; + + // Optional. Whether to run the query as continuous or a regular query. + // Continuous query is currently in experimental stage and not ready for + // general usage. + google.protobuf.BoolValue continuous = 36 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options related to script execution. +message ScriptOptions { + // KeyResultStatementKind controls how the key result is determined. + enum KeyResultStatementKind { + // Default value. + KEY_RESULT_STATEMENT_KIND_UNSPECIFIED = 0; + + // The last result determines the key result. + LAST = 1; + + // The first SELECT statement determines the key result. + FIRST_SELECT = 2; + } + + // Timeout period for each statement in a script. + google.protobuf.Int64Value statement_timeout_ms = 1; + + // Limit on the number of bytes billed per statement. Exceeding this budget + // results in an error. + google.protobuf.Int64Value statement_byte_budget = 2; + + // Determines which statement in the script represents the "key result", + // used to populate the schema and query results of the script job. + // Default is LAST. + KeyResultStatementKind key_result_statement = 4; +} + +// JobConfigurationLoad contains the configuration properties for loading data +// into a destination table. +message JobConfigurationLoad { + // Indicates the character map used for column names. + enum ColumnNameCharacterMap { + // Unspecified column name character map. + COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED = 0; + + // Support flexible column name and reject invalid column names. + STRICT = 1; + + // Support alphanumeric + underscore characters and names must start with a + // letter or underscore. Invalid column names will be normalized. + V1 = 2; + + // Support flexible column name. Invalid column names will be normalized. + V2 = 3; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. + // For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. Size limits related to load jobs apply to external + // data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups: + // Exactly one URI can be specified. Also, the '*' wildcard character is not + // allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default, source URIs are expanded against the + // underlying storage. You can also specify manifest files to control how the + // file set is constructed. This option is only applicable to object storage + // systems. + FileSetSpecType file_set_spec_type = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the destination table. The schema can be + // omitted if the destination table already exists, or if you're loading data + // from Google Cloud Datastore. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The destination table to load the data into. + TableReference destination_table = 3; + + // Optional. [Experimental] Properties with which to create the destination + // table if it is new. + DestinationTableProperties destination_table_properties = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints and uses the schema from the load job. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_APPEND. + // Each action is atomic and only occurs if BigQuery is able to complete the + // job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when loading a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when loading the data. The default value is 0. This property is useful + // if you have header rows in the file that should be skipped. When autodetect + // is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int32Value skip_leading_rows = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. BigQuery decodes the data after + // the raw, binary data has been split using the values of the `quote` and + // `fieldDelimiter` properties. + // + // If you don't specify an encoding, or if you specify a UTF-8 encoding when + // the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to + // UTF-8. Generally, your data loads successfully, but it may not match + // byte-for-byte what you expect. To avoid this, specify the correct encoding + // by using the `--encoding` flag. + // + // If BigQuery can't convert a character other than the ASCII `0` character, + // BigQuery converts the character to the standard Unicode replacement + // character: �. + string encoding = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ('"'). + // If your data does not contain quoted sections, set the property value to an + // empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + // @default " + google.protobuf.StringValue quote = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // running the job. If the number of bad records exceeds this value, an + // invalid error is returned in the job result. + // The default value is 0, which requires that all records are valid. + // This is only supported for CSV and NEWLINE_DELIMITED_JSON file formats. + google.protobuf.Int32Value max_bad_records = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Indicates if BigQuery should allow quoted data sections that contain + // newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 15; + + // Optional. The format of the data files. + // For CSV files, specify "CSV". For datastore backups, + // specify "DATASTORE_BACKUP". For newline-delimited JSON, + // specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". + // For parquet, specify "PARQUET". For orc, specify "ORC". + // The default value is CSV. + string source_format = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Accept rows that are missing trailing optional columns. + // The missing values are treated as nulls. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. + // The default value is false. + // Only applicable to CSV, ignored for other formats. + google.protobuf.BoolValue allow_jagged_rows = 17 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names in the table schema + // Avro, Parquet, ORC: Fields in the file schema that don't exist in the + // table schema. + google.protobuf.BoolValue ignore_unknown_values = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity + // properties to load into BigQuery from a Cloud Datastore backup. Property + // names are case sensitive and must be top-level properties. If no properties + // are specified, BigQuery loads all properties. If any named property isn't + // found in the Cloud Datastore backup, an invalid error is returned in the + // job result. + repeated string projection_fields = 19; + + // Optional. Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + google.protobuf.BoolValue autodetect = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Allows the schema of the destination table to be updated as a side effect + // of the load job if a schema is autodetected or supplied in the job + // configuration. + // Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 21; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 22; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 26; + + // Clustering specification for the destination table. + Clustering clustering = 23; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 24; + + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The user can provide a reference file with the reader schema. + // This file is only loaded if it is part of source URIs, but is not loaded + // otherwise. It is enabled for the following formats: AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. + // Not all storage formats support hive partitioning -- requesting hive + // partitioning on an unsupported format will lead to an error, as will + // providing an invalid specification. + HivePartitioningOptions hive_partitioning_options = 37 + [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 39; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 42 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When sourceFormat is set to "CSV", this indicates whether the + // embedded ASCII control characters (the first 32 characters in the + // ASCII-table, from + // '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the load job behavior. + // Currently, only the 'session_id' connection property is supported, and is + // used to resolve _SESSION appearing as the dataset id. + repeated ConnectionProperty connection_properties = 46 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this property is true, the job creates a new session using a + // randomly generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Character map supported for column names in CSV/Parquet loads. + // Defaults to STRICT and can be overridden by Project Config Service. Using + // this option with unsupporting load formats will result in an error. + ColumnNameCharacterMap column_name_character_map = 50 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. [Experimental] Configures the load job to copy files directly to + // the destination BigLake managed table, bypassing file content reading and + // rewriting. + // + // Copying files only is supported when all the following are true: + // + // * `source_uris` are located in the same Cloud Storage location as the + // destination table's `storage_uri` location. + // * `source_format` is `PARQUET`. + // * `destination_table` is an existing BigLake managed table. The table's + // schema does not have flexible column names. The table's columns do not + // have type parameters other than precision and scale. + // * No options other than the above are specified. + google.protobuf.BoolValue copy_files_only = 51 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationTableCopy configures a job that copies data from one table +// to another. +// For more information on copying tables, see [Copy a +// table](https://cloud.google.com/bigquery/docs/managing-tables#copy-table). +message JobConfigurationTableCopy { + // Indicates different operation types supported in table copy job. + enum OperationType { + // Unspecified operation type. + OPERATION_TYPE_UNSPECIFIED = 0; + + // The source and destination table have the same table type. + COPY = 1; + + // The source table type is TABLE and + // the destination table type is SNAPSHOT. + SNAPSHOT = 2; + + // The source table type is SNAPSHOT and + // the destination table type is TABLE. + RESTORE = 3; + + // The source and destination table have the same table type, + // but only bill for unique data. + CLONE = 4; + } + + // [Pick one] Source table to copy. + TableReference source_table = 1; + + // [Pick one] Source tables to copy. + repeated TableReference source_tables = 2; + + // [Required] The destination table. + TableReference destination_table = 3; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data and uses the schema and table constraints from the source table. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration destination_encryption_configuration = 6; + + // Optional. Supported operation types in table copy job. + OperationType operation_type = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The time when the destination table expires. Expired tables will + // be deleted and their storage reclaimed. + google.protobuf.Timestamp destination_expiration_time = 9 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationExtract configures a job that exports data from a BigQuery +// table into Google Cloud Storage. +message JobConfigurationExtract { + // Options related to model extraction. + message ModelExtractOptions { + // The 1-based ID of the trial to be exported from a hyperparameter tuning + // model. If not specified, the trial with id = + // [Model](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId + // is exported. This field is ignored for models not trained with + // hyperparameter tuning. + google.protobuf.Int64Value trial_id = 1; + } + + // Required. Source reference for the export. + oneof source { + // A reference to the table being exported. + TableReference source_table = 1; + + // A reference to the model being exported. + ModelReference source_model = 9; + } + + // [Pick one] A list of fully-qualified Google Cloud Storage URIs where the + // extracted table should be written. + repeated string destination_uris = 3; + + // Optional. Whether to print out a header row in the results. + // Default is true. Not applicable when extracting models. + google.protobuf.BoolValue print_header = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When extracting data in CSV format, this defines the + // delimiter to use between fields in the exported data. + // Default is ','. Not applicable when extracting models. + string field_delimiter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The exported file format. Possible values include CSV, + // NEWLINE_DELIMITED_JSON, PARQUET, or AVRO for tables and ML_TF_SAVED_MODEL + // or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. + // Tables with nested or repeated fields cannot be exported as CSV. The + // default value for models is ML_TF_SAVED_MODEL. + string destination_format = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type to use for exported files. Possible values + // include DEFLATE, GZIP, NONE, SNAPPY, and ZSTD. The default value is NONE. + // Not all compression formats are support for all file formats. DEFLATE is + // only supported for Avro. ZSTD is only supported for Parquet. Not applicable + // when extracting models. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Whether to use logical types when extracting to AVRO format. Not applicable + // when extracting models. + google.protobuf.BoolValue use_avro_logical_types = 13; + + // Optional. Model extract options only applicable when extracting models. + ModelExtractOptions model_extract_options = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +message JobConfiguration { + // Output only. The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or + // UNKNOWN. + string job_type = 8; + + // [Pick one] Configures a query job. + JobConfigurationQuery query = 1; + + // [Pick one] Configures a load job. + JobConfigurationLoad load = 2; + + // [Pick one] Copies a table. + JobConfigurationTableCopy copy = 3; + + // [Pick one] Configures an extract job. + JobConfigurationExtract extract = 4; + + // Optional. If set, don't actually run this job. A valid query will return + // a mostly empty response with some processing statistics, while an invalid + // query will return the same error it would if it wasn't a dry run. Behavior + // of non-query jobs is undefined. + google.protobuf.BoolValue dry_run = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job timeout in milliseconds. If this time limit is exceeded, + // BigQuery will attempt to stop a longer job, but may not always succeed in + // canceling it before the job completes. For example, a job that takes more + // than 60 seconds to complete has a better chance of being stopped than a job + // that takes 10 seconds to complete. + google.protobuf.Int64Value job_timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this job. You can use these to organize and + // group your jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label values are optional. Label + // keys must start with a letter and each label in the list must have a + // different key. + map labels = 7; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline new file mode 100755 index 000000000..0cede032b --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_creation_reason.proto.baseline @@ -0,0 +1,60 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JobCreationReasonProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Reason about why a Job was created from a +// [`jobs.query`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) +// method when used with `JOB_CREATION_OPTIONAL` Job creation mode. +// +// For +// [`jobs.insert`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) +// method calls it will always be `REQUESTED`. +// +// [Preview](https://cloud.google.com/products/#product-launch-stages) +message JobCreationReason { + // Indicates the high level reason why a job was created. + enum Code { + // Reason is not specified. + CODE_UNSPECIFIED = 0; + + // Job creation was requested. + REQUESTED = 1; + + // The query request ran beyond a system defined timeout specified by the + // [timeoutMs field in the + // QueryRequest](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#queryrequest). + // As a result it was considered a long running operation for which a job + // was created. + LONG_RUNNING = 2; + + // The results from the query cannot fit in the response. + LARGE_RESULTS = 3; + + // BigQuery has determined that the query needs to be executed as a Job. + OTHER = 4; + } + + // Output only. Specifies the high level reason why a Job was created. + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_reference.proto.baseline new file mode 100755 index 000000000..d7f3ece6f --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_reference.proto.baseline @@ -0,0 +1,45 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// A job reference is a fully qualified identifier for referring to a job. +message JobReference { + // Required. The ID of the project containing this job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the job. The ID must contain only letters (a-z, A-Z), + // numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 + // characters. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The geographic location of the job. The default value is US. + // + // For more information about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations + google.protobuf.StringValue location = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated string location_alternative = 5; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_stats.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_stats.proto.baseline new file mode 100755 index 000000000..877e00392 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_stats.proto.baseline @@ -0,0 +1,1439 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/model.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatsProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; +option (google.api.resource_definition) = { + type: "storage.googleapis.com/Bucket" + pattern: "*" +}; + +// An operation within a stage. +message ExplainQueryStep { + // Machine-readable operation type. + string kind = 1; + + // Human-readable description of the step(s). + repeated string substeps = 2; +} + +// A single stage of query execution. +message ExplainQueryStage { + // Indicates the type of compute mode. + enum ComputeMode { + // ComputeMode type not specified. + COMPUTE_MODE_UNSPECIFIED = 0; + + // This stage was processed using BigQuery slots. + BIGQUERY = 1; + + // This stage was processed using BI Engine compute. + BI_ENGINE = 2; + } + + // Human-readable name for the stage. + string name = 1; + + // Unique ID for the stage within the plan. + google.protobuf.Int64Value id = 2; + + // Stage start time represented as milliseconds since the epoch. + int64 start_ms = 3; + + // Stage end time represented as milliseconds since the epoch. + int64 end_ms = 4; + + // IDs for stages that are inputs to this stage. + repeated int64 input_stages = 5; + + // Relative amount of time the average shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_avg = 6; + + // Milliseconds the average shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_avg = 7; + + // Relative amount of time the slowest shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_max = 8; + + // Milliseconds the slowest shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_max = 9; + + // Relative amount of time the average shard spent reading input. + google.protobuf.DoubleValue read_ratio_avg = 10; + + // Milliseconds the average shard spent reading input. + google.protobuf.Int64Value read_ms_avg = 11; + + // Relative amount of time the slowest shard spent reading input. + google.protobuf.DoubleValue read_ratio_max = 12; + + // Milliseconds the slowest shard spent reading input. + google.protobuf.Int64Value read_ms_max = 13; + + // Relative amount of time the average shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_avg = 14; + + // Milliseconds the average shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_avg = 15; + + // Relative amount of time the slowest shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_max = 16; + + // Milliseconds the slowest shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_max = 17; + + // Relative amount of time the average shard spent on writing output. + google.protobuf.DoubleValue write_ratio_avg = 18; + + // Milliseconds the average shard spent on writing output. + google.protobuf.Int64Value write_ms_avg = 19; + + // Relative amount of time the slowest shard spent on writing output. + google.protobuf.DoubleValue write_ratio_max = 20; + + // Milliseconds the slowest shard spent on writing output. + google.protobuf.Int64Value write_ms_max = 21; + + // Total number of bytes written to shuffle. + google.protobuf.Int64Value shuffle_output_bytes = 22; + + // Total number of bytes written to shuffle and spilled to disk. + google.protobuf.Int64Value shuffle_output_bytes_spilled = 23; + + // Number of records read into the stage. + google.protobuf.Int64Value records_read = 24; + + // Number of records written by the stage. + google.protobuf.Int64Value records_written = 25; + + // Number of parallel input segments to be processed + google.protobuf.Int64Value parallel_inputs = 26; + + // Number of parallel input segments completed. + google.protobuf.Int64Value completed_parallel_inputs = 27; + + // Current status for this stage. + string status = 28; + + // List of operations within the stage in dependency order (approximately + // chronological). + repeated ExplainQueryStep steps = 29; + + // Slot-milliseconds used by the stage. + google.protobuf.Int64Value slot_ms = 30; + + // Output only. Compute mode for this stage. + ComputeMode compute_mode = 31 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Summary of the state of query execution at a given time. +message QueryTimelineSample { + // Milliseconds elapsed since the start of query execution. + google.protobuf.Int64Value elapsed_ms = 1; + + // Cumulative slot-ms consumed by the query. + google.protobuf.Int64Value total_slot_ms = 2; + + // Total units of work remaining for the query. This number can be revised + // (increased or decreased) while the query is running. + google.protobuf.Int64Value pending_units = 3; + + // Total parallel units of work completed by this query. + google.protobuf.Int64Value completed_units = 4; + + // Total number of active workers. This does not correspond directly to + // slot usage. This is the largest value observed since the last sample. + google.protobuf.Int64Value active_units = 5; + + // Units of work that can be scheduled immediately. Providing additional slots + // for these units of work will accelerate the query, if no other query in + // the reservation needs additional slots. + google.protobuf.Int64Value estimated_runnable_units = 7; +} + +// The external service cost is a portion of the total cost, these costs are not +// additive with total_bytes_billed. Moreover, this field only track external +// service costs that will show up as BigQuery costs (e.g. training BigQuery +// ML job with google cloud CAIP or Automl Tables services), not other costs +// which may be accrued by running the query (e.g. reading from Bigtable or +// Cloud Storage). The external service costs with different billing sku (e.g. +// CAIP job is charged based on VM usage) are converted to BigQuery +// billed_bytes and slot_ms with equivalent amount of US dollars. Services may +// not directly correlate to these metrics, but these are the equivalents for +// billing purposes. +// Output only. +message ExternalServiceCost { + // External service name. + string external_service = 1; + + // External service cost in terms of bigquery bytes processed. + google.protobuf.Int64Value bytes_processed = 2; + + // External service cost in terms of bigquery bytes billed. + google.protobuf.Int64Value bytes_billed = 3; + + // External service cost in terms of bigquery slot milliseconds. + google.protobuf.Int64Value slot_ms = 4; + + // Non-preemptable reserved slots used for external job. + // For example, reserved slots for Cloua AI Platform job are the VM usages + // converted to BigQuery slot with equivalent mount of price. + int64 reserved_slot_count = 5; +} + +// Statistics for the EXPORT DATA statement as part of Query Job. EXTRACT +// JOB statistics are populated in JobStatistics4. +message ExportDataStatistics { + // Number of destination files generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value file_count = 1; + + // [Alpha] Number of destination rows generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value row_count = 2; +} + +// Reason why BI Engine didn't accelerate the query (or sub-query). +message BiEngineReason { + // Indicates the high-level reason for no/partial acceleration + enum Code { + // BiEngineReason not specified. + CODE_UNSPECIFIED = 0; + + // No reservation available for BI Engine acceleration. + NO_RESERVATION = 1; + + // Not enough memory available for BI Engine acceleration. + INSUFFICIENT_RESERVATION = 2; + + // This particular SQL text is not supported for acceleration by BI Engine. + UNSUPPORTED_SQL_TEXT = 4; + + // Input too large for acceleration by BI Engine. + INPUT_TOO_LARGE = 5; + + // Catch-all code for all other cases for partial or disabled acceleration. + OTHER_REASON = 6; + + // One or more tables were not eligible for BI Engine acceleration. + TABLE_EXCLUDED = 7; + } + + // Output only. High-level BI Engine reason for partial or disabled + // acceleration + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Free form human-readable reason for partial or disabled + // acceleration. + string message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BI Engine specific query. +// Populated as part of JobStatistics2 +message BiEngineStatistics { + // Indicates the type of BI Engine acceleration. + enum BiEngineMode { + // BiEngineMode type not specified. + ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine disabled the acceleration. bi_engine_reasons + // specifies a more detailed reason. + DISABLED = 1; + + // Part of the query was accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL = 2; + + // All of the query was accelerated using BI Engine. + FULL = 3; + } + + // Indicates the type of BI Engine acceleration. + enum BiEngineAccelerationMode { + // BiEngineMode type not specified. + BI_ENGINE_ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine acceleration was attempted but disabled. bi_engine_reasons + // specifies a more detailed reason. + BI_ENGINE_DISABLED = 1; + + // Some inputs were accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL_INPUT = 2; + + // All of the query inputs were accelerated using BI Engine. + FULL_INPUT = 3; + + // All of the query was accelerated using BI Engine. + FULL_QUERY = 4; + } + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineMode bi_engine_mode = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineAccelerationMode acceleration_mode = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // In case of DISABLED or PARTIAL bi_engine_mode, these contain the + // explanatory reasons as to why BI Engine could not accelerate. + // In case the full query was accelerated, this field is not populated. + repeated BiEngineReason bi_engine_reasons = 2; +} + +// Reason about why no search index was used in the search query (or +// sub-query). +message IndexUnusedReason { + // Indicates the high-level reason for the scenario when no search index was + // used. + enum Code { + // Code not specified. + CODE_UNSPECIFIED = 0; + + // Indicates the search index configuration has not been created. + INDEX_CONFIG_NOT_AVAILABLE = 1; + + // Indicates the search index creation has not been completed. + PENDING_INDEX_CREATION = 2; + + // Indicates the base table has been truncated (rows have been removed + // from table with TRUNCATE TABLE statement) since the last time the search + // index was refreshed. + BASE_TABLE_TRUNCATED = 3; + + // Indicates the search index configuration has been changed since the last + // time the search index was refreshed. + INDEX_CONFIG_MODIFIED = 4; + + // Indicates the search query accesses data at a timestamp before the last + // time the search index was refreshed. + TIME_TRAVEL_QUERY = 5; + + // Indicates the usage of search index will not contribute to any pruning + // improvement for the search function, e.g. when the search predicate is in + // a disjunction with other non-search predicates. + NO_PRUNING_POWER = 6; + + // Indicates the search index does not cover all fields in the search + // function. + UNINDEXED_SEARCH_FIELDS = 7; + + // Indicates the search index does not support the given search query + // pattern. + UNSUPPORTED_SEARCH_PATTERN = 8; + + // Indicates the query has been optimized by using a materialized view. + OPTIMIZED_WITH_MATERIALIZED_VIEW = 9; + + // Indicates the query has been secured by data masking, and thus search + // indexes are not applicable. + SECURED_BY_DATA_MASKING = 11; + + // Indicates that the search index and the search function call do not + // have the same text analyzer. + MISMATCHED_TEXT_ANALYZER = 12; + + // Indicates the base table is too small (below a certain threshold). + // The index does not provide noticeable search performance gains + // when the base table is too small. + BASE_TABLE_TOO_SMALL = 13; + + // Indicates that the total size of indexed base tables in your organization + // exceeds your region's limit and the index is not used in the query. To + // index larger base tables, you can + // use + // your own reservation for index-management jobs. + BASE_TABLE_TOO_LARGE = 14; + + // Indicates that the estimated performance gain from using the search index + // is too low for the given search query. + ESTIMATED_PERFORMANCE_GAIN_TOO_LOW = 15; + + // Indicates that search indexes can not be used for search query with + // STANDARD edition. + NOT_SUPPORTED_IN_STANDARD_EDITION = 17; + + // Indicates that an option in the search function that cannot make use of + // the index has been selected. + INDEX_SUPPRESSED_BY_FUNCTION_OPTION = 18; + + // Indicates that the query was cached, and thus the search index was not + // used. + QUERY_CACHE_HIT = 19; + + // The index cannot be used in the search query because it is stale. + STALE_INDEX = 20; + + // Indicates an internal error that causes the search index to be unused. + INTERNAL_ERROR = 10; + + // Indicates that the reason search indexes cannot be used in the query is + // not covered by any of the other IndexUnusedReason options. + OTHER_REASON = 16; + } + + // Specifies the high-level reason for the scenario when no search index was + // used. + optional Code code = 1; + + // Free form human-readable reason for the scenario when no search index was + // used. + optional string message = 2; + + // Specifies the base table involved in the reason that no search index was + // used. + optional TableReference base_table = 3; + + // Specifies the name of the unused search index, if available. + optional string index_name = 4; +} + +// Statistics for a search query. +// Populated as part of JobStatistics2. +message SearchStatistics { + // Indicates the type of search index usage in the entire search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No search indexes were used in the search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the search query used search indexes. See [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use search indexes. + PARTIALLY_USED = 2; + + // The entire search query used search indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Statistics for a vector search query. +// Populated as part of JobStatistics2. +message VectorSearchStatistics { + // Indicates the type of vector index usage in the entire vector search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No vector indexes were used in the vector search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the vector search query used vector indexes. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use vector indexes. + PARTIALLY_USED = 2; + + // The entire vector search query used vector indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the vector search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Query optimization information for a QUERY job. +message QueryInfo { + // Output only. Information about query optimizations. + google.protobuf.Struct optimization_details = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a LOAD query. +message LoadQueryStatistics { + // Output only. Number of source files in a LOAD query. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a LOAD query. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a LOAD query. + // Note that while a LOAD query is in the running state, this value may + // change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note that while a LOAD query + // is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered while processing a LOAD + // query. Note that if the job has failed because of more bad records + // encountered than the maximum allowed in the load job configuration, then + // this number can be less than the total number of bad records present in the + // input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a query job. +message JobStatistics2 { + // Output only. Describes execution plan for the query. + repeated ExplainQueryStage query_plan = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The original estimate of bytes processed for the job. + google.protobuf.Int64Value estimated_bytes_processed = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of partitions processed from all partitioned + // tables referenced in the job. + google.protobuf.Int64Value total_partitions_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For dry-run jobs, totalBytesProcessed is an estimate and this + // field specifies the accuracy of the estimate. Possible values can be: + // UNKNOWN: accuracy of the estimate is unknown. + // PRECISE: estimate is precise. + // LOWER_BOUND: estimate is lower bound of what the query would cost. + // UPPER_BOUND: estimate is upper bound of what the query would cost. + string total_bytes_processed_accuracy = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If the project is configured to use on-demand pricing, + // then this field contains the total bytes billed for the job. + // If the project is configured to use flat-rate pricing, then you are + // not billed for bytes and this field is informational only. + google.protobuf.Int64Value total_bytes_billed = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Billing tier for the job. This is a BigQuery-specific concept + // which is not related to the Google Cloud notion of "free tier". The value + // here is a measure of the query's resource consumption relative to the + // amount of data scanned. For on-demand queries, the limit is 100, and all + // queries within this limit are billed at the standard on-demand rates. + // On-demand queries that exceed this limit will fail with a + // billingTierLimitExceeded error. + google.protobuf.Int32Value billing_tier = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced tables for the job. Queries that reference more + // than 50 tables will not have a complete list. + repeated TableReference referenced_tables = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced routines for the job. + repeated RoutineReference referenced_routines = 24 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the results. Present only for successful dry + // run of non-legacy SQL queries. + TableSchema schema = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows affected by a DML statement. Present + // only for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 32 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. GoogleSQL only: list of undeclared query + // parameters detected during a dry run validation. + repeated QueryParameter undeclared_query_parameters = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The type of query statement, if valid. + // Possible values: + // + // * `SELECT`: + // [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) + // statement. + // * `ASSERT`: + // [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) + // statement. + // * `INSERT`: + // [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) + // statement. + // * `UPDATE`: + // [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) + // statement. + // * `DELETE`: + // [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `MERGE`: + // [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `CREATE_TABLE`: [`CREATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) + // statement, without `AS SELECT`. + // * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS + // SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) + // statement. + // * `CREATE_VIEW`: [`CREATE + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) + // statement. + // * `CREATE_MODEL`: [`CREATE + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) + // statement. + // * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) + // statement. + // * `CREATE_FUNCTION`: [`CREATE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) + // statement. + // * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) + // statement. + // * `CREATE_PROCEDURE`: [`CREATE + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) + // statement. + // * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS + // POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) + // statement. + // * `CREATE_SCHEMA`: [`CREATE + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) + // statement. + // * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) + // statement. + // * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) + // statement. + // * `DROP_TABLE`: [`DROP + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) + // statement. + // * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) + // statement. + // * `DROP_VIEW`: [`DROP + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) + // statement. + // * `DROP_MODEL`: [`DROP + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) + // statement. + // * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) + // statement. + // * `DROP_FUNCTION` : [`DROP + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) + // statement. + // * `DROP_TABLE_FUNCTION` : [`DROP TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) + // statement. + // * `DROP_PROCEDURE`: [`DROP + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) + // statement. + // * `DROP_SEARCH_INDEX`: [`DROP SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) + // statement. + // * `DROP_SCHEMA`: [`DROP + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) + // statement. + // * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) + // statement. + // * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS + // POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) + // statement. + // * `ALTER_TABLE`: [`ALTER + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) + // statement. + // * `ALTER_VIEW`: [`ALTER + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) + // statement. + // * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) + // statement. + // * `ALTER_SCHEMA`: [`ALTER + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) + // statement. + // * `SCRIPT`: + // [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). + // * `TRUNCATE_TABLE`: [`TRUNCATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) + // statement. + // * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) + // statement. + // * `EXPORT_DATA`: [`EXPORT + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) + // statement. + // * `EXPORT_MODEL`: [`EXPORT + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) + // statement. + // * `LOAD_DATA`: [`LOAD + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) + // statement. + // * `CALL`: + // [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) + // statement. + string statement_type = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL operation performed, possibly + // dependent on the pre-existence of the DDL target. + string ddl_operation_performed = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target table. Present only for + // CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. + TableReference ddl_target_table = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The table after rename. Present only for ALTER TABLE RENAME TO + // query. + TableReference ddl_destination_table = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target row access policy. Present only for + // CREATE/DROP ROW ACCESS POLICY queries. + RowAccessPolicyReference ddl_target_row_access_policy = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of row access policies affected by a DDL statement. + // Present only for DROP ALL ROW ACCESS POLICIES queries. + google.protobuf.Int64Value ddl_affected_row_access_policy_count = 27 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Beta] The DDL target routine. Present only for + // CREATE/DROP FUNCTION/PROCEDURE queries. + RoutineReference ddl_target_routine = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target dataset. Present only for CREATE/ALTER/DROP + // SCHEMA(dataset) queries. + DatasetReference ddl_target_dataset = 30 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a BigQuery ML training job. + MlStatistics ml_statistics = 23 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Stats for EXPORT DATA statement. + ExportDataStatistics export_data_statistics = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job cost breakdown as bigquery internal cost and external + // service costs. + repeated ExternalServiceCost external_service_costs = 28 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. BI Engine specific Statistics. + BiEngineStatistics bi_engine_statistics = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a LOAD query. + LoadQueryStatistics load_query_statistics = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced table for DCL statement. + TableReference dcl_target_table = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced view for DCL statement. + TableReference dcl_target_view = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced dataset for DCL statement. + DatasetReference dcl_target_dataset = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Search query specific statistics. + SearchStatistics search_statistics = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Vector Search query specific statistics. + VectorSearchStatistics vector_search_statistics = 44 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Performance insights. + PerformanceInsights performance_insights = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query optimization information for a QUERY job. + QueryInfo query_info = 39 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a Spark procedure job. + SparkStatistics spark_statistics = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes transferred for cross-cloud queries such as Cross + // Cloud Transfer and CREATE TABLE AS SELECT (CTAS). + google.protobuf.Int64Value transferred_bytes = 41 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of materialized views of a query job. + MaterializedViewStatistics materialized_view_statistics = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of metadata cache usage in a query for BigLake + // tables. + MetadataCacheStatistics metadata_cache_statistics = 43 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a load job. +message JobStatistics3 { + // Output only. Number of source files in a load job. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a load job. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a load job. + // Note that while an import job is in the running state, this + // value may change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note + // that while a load job is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered. Note that if the job + // has failed because of more bad records encountered than the maximum + // allowed in the load job configuration, then this number can be less than + // the total number of bad records present in the input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for an extract job. +message JobStatistics4 { + // Output only. Number of files per destination URI or URI pattern + // specified in the extract configuration. These values will be in the same + // order as the URIs specified in the 'destinationUris' field. + repeated int64 destination_uri_file_counts = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of user bytes extracted into the result. This is the + // byte count as computed by BigQuery for billing purposes + // and doesn't have any relationship with the number of actual + // result bytes extracted in the desired format. + google.protobuf.Int64Value input_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a copy job. +message CopyJobStatistics { + // Output only. Number of rows copied to the destination table. + google.protobuf.Int64Value copied_rows = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes copied to the destination table. + google.protobuf.Int64Value copied_logical_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to a BigQuery ML training job. +message MlStatistics { + // Training type. + enum TrainingType { + // Unspecified training type. + TRAINING_TYPE_UNSPECIFIED = 0; + + // Single training with fixed parameter space. + SINGLE_TRAINING = 1; + + // [Hyperparameter tuning + // training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + HPARAM_TUNING = 2; + } + + // Output only. Maximum number of iterations specified as max_iterations in + // the 'CREATE MODEL' query. The actual number of iterations may be less than + // this number due to early stop. + int64 max_iterations = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Results for all completed iterations. + // Empty for [hyperparameter tuning + // jobs](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + repeated Model.TrainingRun.IterationResult iteration_results = 2; + + // Output only. The type of the model that is being trained. + Model.ModelType model_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Training type of the job. + TrainingType training_type = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter tuning + // job](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // sorted by trial_id. + repeated Model.HparamTuningTrial hparam_trials = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to the child job of a script. +message ScriptStatistics { + // Describes how the job is evaluated. + enum EvaluationKind { + // Default value. + EVALUATION_KIND_UNSPECIFIED = 0; + + // The statement appears directly in the script. + STATEMENT = 1; + + // The statement evaluates an expression that appears in the script. + EXPRESSION = 2; + } + + // Represents the location of the statement/expression being evaluated. + // Line and column numbers are defined as follows: + // + // - Line and column numbers start with one. That is, line 1 column 1 denotes + // the start of the script. + // - When inside a stored procedure, all line/column numbers are relative + // to the procedure body, not the script in which the procedure was defined. + // - Start/end positions exclude leading/trailing comments and whitespace. + // The end position always ends with a ";", when present. + // - Multi-byte Unicode characters are treated as just one column. + // - If the original script (or procedure definition) contains TAB characters, + // a tab "snaps" the indentation forward to the nearest multiple of 8 + // characters, plus 1. For example, a TAB on column 1, 2, 3, 4, 5, 6 , or 8 + // will advance the next character to column 9. A TAB on column 9, 10, 11, + // 12, 13, 14, 15, or 16 will advance the next character to column 17. + message ScriptStackFrame { + // Output only. One-based start line. + int32 start_line = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based start column. + int32 start_column = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end line. + int32 end_line = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end column. + int32 end_column = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the active procedure, empty if in a top-level + // script. + string procedure_id = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Text of the current statement/expression. + string text = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Whether this child job was a statement or expression. + EvaluationKind evaluation_kind = 1; + + // Stack trace showing the line/column/procedure name of each frame on the + // stack at the point where the current evaluation happened. The leaf frame + // is first, the primary script is last. Never empty. + repeated ScriptStackFrame stack_frames = 2; +} + +// Statistics for row-level security. +message RowLevelSecurityStatistics { + // Whether any accessed data was protected by row access policies. + bool row_level_security_applied = 1; +} + +// Statistics for data-masking. +message DataMaskingStatistics { + // Whether any accessed data was protected by the data masking. + bool data_masking_applied = 1; +} + +// Statistics for a single job execution. +message JobStatistics { + // [Alpha] Information of a multi-statement transaction. + message TransactionInfo { + // Output only. [Alpha] Id of the transaction. + string transaction_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Creation time of this job, in milliseconds since the epoch. + // This field will be present on all jobs. + int64 creation_time = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Start time of this job, in milliseconds since the epoch. + // This field will be present when the job transitions from the PENDING state + // to either RUNNING or DONE. + int64 start_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. End time of this job, in milliseconds since the epoch. This + // field will be present whenever a job is in the DONE state. + int64 end_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [TrustedTester] Job progress (0.0 -> 1.0) for LOAD and + // EXTRACT jobs. + google.protobuf.DoubleValue completion_ratio = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Quotas which delayed this job's start time. + repeated string quota_deferments = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a query job. + JobStatistics2 query = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a load job. + JobStatistics3 load = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for an extract job. + JobStatistics4 extract = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a copy job. + CopyJobStatistics copy = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the primary reservation assigned to this job. Note + // that this could be different than reservations reported in the reservation + // usage field if parent reservations were used to execute this job. + string reservation_id = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of child jobs executed. + int64 num_child_jobs = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this is a child job, specifies the job ID of the parent. + string parent_job_id = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this a child job of a script, specifies information about + // the context of this job within the script. + ScriptStatistics script_statistics = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for row-level security. Present only for query and + // extract jobs. + RowLevelSecurityStatistics row_level_security_statistics = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for data-masking. Present only for query and + // extract jobs. + DataMaskingStatistics data_masking_statistics = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Alpha] Information of the multi-statement transaction if this + // job is part of one. + // + // This property is only expected on a child job or a job that is in a + // session. A script parent job is not part of the transaction started in the + // script. + TransactionInfo transaction_info = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The duration in milliseconds of the execution of the final + // attempt of this job, as BigQuery may internally re-attempt to execute the + // job. + int64 final_execution_duration_ms = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of edition corresponding to the reservation for this job + // at the time of this update. + ReservationEdition edition = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Detailed statistics for DML statements +message DmlStats { + // Output only. Number of inserted Rows. Populated by DML INSERT and MERGE + // statements + google.protobuf.Int64Value inserted_row_count = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of deleted Rows. populated by DML DELETE, MERGE and + // TRUNCATE statements. + google.protobuf.Int64Value deleted_row_count = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of updated Rows. Populated by DML UPDATE and MERGE + // statements. + google.protobuf.Int64Value updated_row_count = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights for the job. +message PerformanceInsights { + // Output only. Average execution ms of previous runs. Indicates the job ran + // slow compared to previous executions. To find previous executions, use + // INFORMATION_SCHEMA tables and filter jobs with same query hash. + int64 avg_previous_execution_ms = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Standalone query stage performance insights, for exploring + // potential improvements. + repeated StagePerformanceStandaloneInsight + stage_performance_standalone_insights = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query stage performance insights compared to previous runs, + // for diagnosing performance regression. + repeated StagePerformanceChangeInsight stage_performance_change_insights = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights compared to the previous executions for a specific +// stage. +message StagePerformanceChangeInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Input data change insight of the query stage. + optional InputDataChange input_data_change = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Details about the input data change insight. +message InputDataChange { + // Output only. Records read difference percentage compared to a previous run. + float records_read_diff_percentage = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Standalone performance insights for a specific stage. +message StagePerformanceStandaloneInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has a slot contention issue. + optional bool slot_contention = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has insufficient shuffle quota. + optional bool insufficient_shuffle_quota = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the stage had the following reasons for being + // disqualified from BI Engine execution. + repeated BiEngineReason bi_engine_reasons = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. High cardinality joins in the stage. + repeated HighCardinalityJoin high_cardinality_joins = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Partition skew in the stage. + optional PartitionSkew partition_skew = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// High cardinality join detailed information. +message HighCardinalityJoin { + // Output only. Count of left input rows. + int64 left_rows = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of right input rows. + int64 right_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of the output rows. + int64 output_rows = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The index of the join operator in the ExplainQueryStep lists. + int32 step_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Partition skew detailed information. +message PartitionSkew { + // Details about source stages which produce skewed data. + message SkewSource { + // Output only. Stage id of the skew source stage. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Source stages which produce skewed data. + repeated SkewSource skew_sources = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BigSpark query. +// Populated as part of JobStatistics2 +message SparkStatistics { + // Spark job logs can be filtered by these fields in Cloud Logging. + message LoggingInfo { + // Output only. Resource type used for logging. + string resource_type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Project ID where the Spark logs were written. + string project_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Spark job ID if a Spark job is created successfully. + optional string spark_job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Location where the Spark job is executed. + // A location is selected by BigQueury for jobs configured to run in a + // multi-region. + optional string spark_job_location = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Endpoints returned from Dataproc. + // Key list: + // - history_server_endpoint: A link to Spark job UI. + map endpoints = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Logging info is used to generate a link to Cloud Logging. + optional LoggingInfo logging_info = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The Cloud KMS encryption key that is used to protect the + // resources created by the Spark job. If the Spark procedure uses the invoker + // security mode, the Cloud KMS encryption key is either inferred from the + // provided system variable, + // `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery + // job's project (if the CMEK organization policy is enforced). Otherwise, the + // Cloud KMS key is either inferred from the Spark connection associated with + // the procedure (if it is provided), or from the default key of the Spark + // connection's project if the CMEK organization policy is enforced. + // + // Example: + // + // * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` + optional string kms_key_name = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Output only. The Google Cloud Storage bucket that is used as the default + // file system by the Spark application. This field is only filled when the + // Spark procedure uses the invoker security mode. The `gcsStagingBucket` + // bucket is inferred from the `@@spark_proc_properties.staging_bucket` system + // variable (if it is provided). Otherwise, BigQuery creates a default staging + // bucket for the job and returns the bucket name in this field. + // + // Example: + // + // * `gs://[bucket_name]` + optional string gcs_staging_bucket = 6 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; +} + +// Statistics of materialized views considered in a query job. +message MaterializedViewStatistics { + // Materialized views considered for the query job. Only certain materialized + // views are used. For a detailed list, see the child message. + // + // If many materialized views are considered, then the list might be + // incomplete. + repeated MaterializedView materialized_view = 1; +} + +// A materialized view considered for a query job. +message MaterializedView { + // Reason why a materialized view was not chosen for a query. For more + // information, see [Understand why materialized views were + // rejected](https://cloud.google.com/bigquery/docs/materialized-views-use#understand-rejected). + enum RejectedReason { + // Default unspecified value. + REJECTED_REASON_UNSPECIFIED = 0; + + // View has no cached data because it has not refreshed yet. + NO_DATA = 1; + + // The estimated cost of the view is more expensive than another view or the + // base table. + // + // Note: The estimate cost might not match the billed cost. + COST = 2; + + // View has no cached data because a base table is truncated. + BASE_TABLE_TRUNCATED = 3; + + // View is invalidated because of a data change in one or more base tables. + // It could be any recent change if the + // [`max_staleness`](https://cloud.google.com/bigquery/docs/materialized-views-create#max_staleness) + // option is not set for the view, or otherwise any change outside of the + // staleness window. + BASE_TABLE_DATA_CHANGE = 4; + + // View is invalidated because a base table's partition expiration has + // changed. + BASE_TABLE_PARTITION_EXPIRATION_CHANGE = 5; + + // View is invalidated because a base table's partition has expired. + BASE_TABLE_EXPIRED_PARTITION = 6; + + // View is invalidated because a base table has an incompatible metadata + // change. + BASE_TABLE_INCOMPATIBLE_METADATA_CHANGE = 7; + + // View is invalidated because it was refreshed with a time zone other than + // that of the current job. + TIME_ZONE = 8; + + // View is outside the time travel window. + OUT_OF_TIME_TRAVEL_WINDOW = 9; + + // View is inaccessible to the user because of a fine-grained security + // policy on one of its base tables. + BASE_TABLE_FINE_GRAINED_SECURITY_POLICY = 10; + + // One of the view's base tables is too stale. For example, the cached + // metadata of a BigLake external table needs to be updated. + BASE_TABLE_TOO_STALE = 11; + } + + // The candidate materialized view. + optional TableReference table_reference = 1; + + // Whether the materialized view is chosen for the query. + // + // A materialized view can be chosen to rewrite multiple parts of the same + // query. If a materialized view is chosen to rewrite any part of the query, + // then this field is true, even if the materialized view was not chosen to + // rewrite others parts. + optional bool chosen = 2; + + // If present, specifies a best-effort estimation of the bytes saved by using + // the materialized view rather than its base tables. + optional int64 estimated_bytes_saved = 3; + + // If present, specifies the reason why the materialized view was not chosen + // for the query. + optional RejectedReason rejected_reason = 4; +} + +// Table level detail on the usage of metadata caching. Only set for Metadata +// caching eligible tables referenced in the query. +message TableMetadataCacheUsage { + // Reasons for not using metadata caching. + enum UnusedReason { + // Unused reasons not specified. + UNUSED_REASON_UNSPECIFIED = 0; + + // Metadata cache was outside the table's maxStaleness. + EXCEEDED_MAX_STALENESS = 1; + + // Metadata caching feature is not enabled. [Update BigLake tables] + // (/bigquery/docs/create-cloud-storage-table-biglake#update-biglake-tables) + // to enable the metadata caching. + METADATA_CACHING_NOT_ENABLED = 3; + + // Other unknown reason. + OTHER_REASON = 2; + } + + // Metadata caching eligible table referenced in the query. + optional TableReference table_reference = 1; + + // Reason for not using metadata caching for the table. + optional UnusedReason unused_reason = 2; + + // Free form human-readable reason metadata caching was unused for + // the job. + optional string explanation = 3; + + // Duration since last refresh as of this job for managed tables (indicates + // metadata cache staleness as seen by this job). + google.protobuf.Duration staleness = 5; + + // [Table + // type](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type). + string table_type = 6; +} + +// Statistics for metadata caching in BigLake tables. +message MetadataCacheStatistics { + // Set for the Metadata caching eligible tables referenced in the query. + repeated TableMetadataCacheUsage table_metadata_cache_usage = 1; +} + +// The type of editions. +// Different features and behaviors are provided to different editions +// Capacity commitments and reservations are linked to editions. +enum ReservationEdition { + // Default value, which will be treated as ENTERPRISE. + RESERVATION_EDITION_UNSPECIFIED = 0; + + // Standard edition. + STANDARD = 1; + + // Enterprise edition. + ENTERPRISE = 2; + + // Enterprise plus edition. + ENTERPRISE_PLUS = 3; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_status.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_status.proto.baseline new file mode 100755 index 000000000..71f0a33dc --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/job_status.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/error.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatusProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message JobStatus { + // Output only. Final error result of the job. If present, indicates that the + // job has completed and was unsuccessful. + ErrorProto error_result = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The first errors encountered during the running of the job. + // The final message includes the number of errors that caused the process to + // stop. Errors here do not necessarily mean that the job has not completed or + // was unsuccessful. + repeated ErrorProto errors = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Running state of the job. Valid states include 'PENDING', + // 'RUNNING', and 'DONE'. + string state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/json_extension.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/json_extension.proto.baseline new file mode 100755 index 000000000..49338d746 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/json_extension.proto.baseline @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JsonExtensionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Used to indicate that a JSON variant, rather than normal JSON, is being used +// as the source_format. This should only be used in combination with the +// JSON source format. +enum JsonExtension { + // The default if provided value is not one included in the enum, or the value + // is not specified. The source formate is parsed without any modification. + JSON_EXTENSION_UNSPECIFIED = 0; + + // Use GeoJSON variant of JSON. See https://tools.ietf.org/html/rfc7946. + GEOJSON = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline new file mode 100755 index 000000000..391bd5ae4 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/location_metadata.proto.baseline @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "LocationMetadataProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// BigQuery-specific metadata about a location. This will be set on +// google.cloud.location.Location.metadata in Cloud Location API +// responses. +message LocationMetadata { + // The legacy BigQuery location ID, e.g. “EU” for the “europe” location. + // This is for any API consumers that need the legacy “US” and “EU” locations. + string legacy_location_id = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline new file mode 100755 index 000000000..dc66e7d7a --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/map_target_type.proto.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "MapTargetTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Indicates the map target type. Only applies to parquet maps. +enum MapTargetType { + // In this mode, the map will have the following schema: + // struct map_field_name { repeated struct key_value { key value } }. + MAP_TARGET_TYPE_UNSPECIFIED = 0; + + // In this mode, the map will have the following schema: + // repeated struct map_field_name { key value }. + ARRAY_OF_STRUCT = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model.proto.baseline new file mode 100755 index 000000000..dc3311876 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model.proto.baseline @@ -0,0 +1,2040 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Model Service. +// +// It should not be relied on for production use cases at this time. +service ModelService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified model resource by model ID. + rpc GetModel(GetModelRequest) returns (Model) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } + + // Lists all models in the specified dataset. Requires the READER dataset + // role. After retrieving the list of models, you can get information about a + // particular model by calling the models.get method. + rpc ListModels(ListModelsRequest) returns (ListModelsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models" + }; + option (google.api.method_signature) = "project_id,dataset_id,max_results"; + } + + // Patch specific fields in the specified model. + rpc PatchModel(PatchModelRequest) returns (Model) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + body: "model" + }; + option (google.api.method_signature) = + "project_id,dataset_id,model_id,model"; + } + + // Deletes the model specified by modelId from the dataset. + rpc DeleteModel(DeleteModelRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } +} + +// Remote Model Info +message RemoteModelInfo { + // Supported service type for remote model. + enum RemoteServiceType { + // Unspecified remote service type. + REMOTE_SERVICE_TYPE_UNSPECIFIED = 0; + + // V3 Cloud AI Translation API. See more details at [Cloud Translation API] + // (https://cloud.google.com/translate/docs/reference/rest). + CLOUD_AI_TRANSLATE_V3 = 1; + + // V1 Cloud AI Vision API See more details at [Cloud Vision API] + // (https://cloud.google.com/vision/docs/reference/rest). + CLOUD_AI_VISION_V1 = 2; + + // V1 Cloud AI Natural Language API. See more details at [REST Resource: + // documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents). + CLOUD_AI_NATURAL_LANGUAGE_V1 = 3; + + // V2 Speech-to-Text API. See more details at [Google Cloud Speech-to-Text + // V2 API](https://cloud.google.com/speech-to-text/v2/docs) + CLOUD_AI_SPEECH_TO_TEXT_V2 = 7; + } + + // Remote services are services outside of BigQuery used by remote models for + // predictions. A remote service is backed by either an arbitrary endpoint or + // a selected remote service type, but not both. + oneof remote_service { + // Output only. The endpoint for remote model. + string endpoint = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The remote service type for remote model. + RemoteServiceType remote_service_type = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Fully qualified name of the user-provided connection object of + // the remote model. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Max number of rows in each batch sent to the remote service. + // If unset, the number of rows in each batch is set dynamically. + int64 max_batching_rows = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The model version for LLM. + string remote_model_version = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The name of the speech recognizer to use for speech + // recognition. The expected format is + // `projects/{project}/locations/{location}/recognizers/{recognizer}`. + // Customers can specify this field at model creation. If not specified, a + // default recognizer `projects/{model + // project}/locations/global/recognizers/_` will be used. See more details at + // [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) + string speech_recognizer = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single transform column. +message TransformColumn { + // Output only. Name of the column. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data type of the column after the transform. + StandardSqlDataType type = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The SQL expression used in the column transform. + string transform_sql = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Model { + // Indicates the type of the Model. + enum ModelType { + // Default value. + MODEL_TYPE_UNSPECIFIED = 0; + + // Linear regression model. + LINEAR_REGRESSION = 1; + + // Logistic regression based classification model. + LOGISTIC_REGRESSION = 2; + + // K-means clustering model. + KMEANS = 3; + + // Matrix factorization model. + MATRIX_FACTORIZATION = 4; + + // DNN classifier model. + DNN_CLASSIFIER = 5; + + // An imported TensorFlow model. + TENSORFLOW = 6; + + // DNN regressor model. + DNN_REGRESSOR = 7; + + // An imported XGBoost model. + XGBOOST = 8; + + // Boosted tree regressor model. + BOOSTED_TREE_REGRESSOR = 9; + + // Boosted tree classifier model. + BOOSTED_TREE_CLASSIFIER = 10; + + // ARIMA model. + ARIMA = 11; + + // AutoML Tables regression model. + AUTOML_REGRESSOR = 12; + + // AutoML Tables classification model. + AUTOML_CLASSIFIER = 13; + + // Prinpical Component Analysis model. + PCA = 14; + + // Wide-and-deep classifier model. + DNN_LINEAR_COMBINED_CLASSIFIER = 16; + + // Wide-and-deep regressor model. + DNN_LINEAR_COMBINED_REGRESSOR = 17; + + // Autoencoder model. + AUTOENCODER = 18; + + // New name for the ARIMA model. + ARIMA_PLUS = 19; + + // ARIMA with external regressors. + ARIMA_PLUS_XREG = 23; + + // Random forest regressor model. + RANDOM_FOREST_REGRESSOR = 24; + + // Random forest classifier model. + RANDOM_FOREST_CLASSIFIER = 25; + + // An imported TensorFlow Lite model. + TENSORFLOW_LITE = 26; + + // An imported ONNX model. + ONNX = 28; + + // Model to capture the columns and logic in the TRANSFORM clause along with + // statistics useful for ML analytic functions. + TRANSFORM_ONLY = 29; + } + + // Loss metric to evaluate model training performance. + enum LossType { + // Default value. + LOSS_TYPE_UNSPECIFIED = 0; + + // Mean squared loss, used for linear regression. + MEAN_SQUARED_LOSS = 1; + + // Mean log loss, used for logistic regression. + MEAN_LOG_LOSS = 2; + } + + // Distance metric used to compute the distance between two points. + enum DistanceType { + // Default value. + DISTANCE_TYPE_UNSPECIFIED = 0; + + // Eculidean distance. + EUCLIDEAN = 1; + + // Cosine distance. + COSINE = 2; + } + + // Indicates the method to split input data into multiple tables. + enum DataSplitMethod { + // Default value. + DATA_SPLIT_METHOD_UNSPECIFIED = 0; + + // Splits data randomly. + RANDOM = 1; + + // Splits data with the user provided tags. + CUSTOM = 2; + + // Splits data sequentially. + SEQUENTIAL = 3; + + // Data split will be skipped. + NO_SPLIT = 4; + + // Splits data automatically: Uses NO_SPLIT if the data size is small. + // Otherwise uses RANDOM. + AUTO_SPLIT = 5; + } + + // Type of supported data frequency for time series forecasting models. + enum DataFrequency { + // Default value. + DATA_FREQUENCY_UNSPECIFIED = 0; + + // Automatically inferred from timestamps. + AUTO_FREQUENCY = 1; + + // Yearly data. + YEARLY = 2; + + // Quarterly data. + QUARTERLY = 3; + + // Monthly data. + MONTHLY = 4; + + // Weekly data. + WEEKLY = 5; + + // Daily data. + DAILY = 6; + + // Hourly data. + HOURLY = 7; + + // Per-minute data. + PER_MINUTE = 8; + } + + // Type of supported holiday regions for time series forecasting models. + enum HolidayRegion { + // Holiday region unspecified. + HOLIDAY_REGION_UNSPECIFIED = 0; + + // Global. + GLOBAL = 1; + + // North America. + NA = 2; + + // Japan and Asia Pacific: Korea, Greater China, India, Australia, and New + // Zealand. + JAPAC = 3; + + // Europe, the Middle East and Africa. + EMEA = 4; + + // Latin America and the Caribbean. + LAC = 5; + + // United Arab Emirates + AE = 6; + + // Argentina + AR = 7; + + // Austria + AT = 8; + + // Australia + AU = 9; + + // Belgium + BE = 10; + + // Brazil + BR = 11; + + // Canada + CA = 12; + + // Switzerland + CH = 13; + + // Chile + CL = 14; + + // China + CN = 15; + + // Colombia + CO = 16; + + // Czechoslovakia + CS = 17; + + // Czech Republic + CZ = 18; + + // Germany + DE = 19; + + // Denmark + DK = 20; + + // Algeria + DZ = 21; + + // Ecuador + EC = 22; + + // Estonia + EE = 23; + + // Egypt + EG = 24; + + // Spain + ES = 25; + + // Finland + FI = 26; + + // France + FR = 27; + + // Great Britain (United Kingdom) + GB = 28; + + // Greece + GR = 29; + + // Hong Kong + HK = 30; + + // Hungary + HU = 31; + + // Indonesia + ID = 32; + + // Ireland + IE = 33; + + // Israel + IL = 34; + + // India + IN = 35; + + // Iran + IR = 36; + + // Italy + IT = 37; + + // Japan + JP = 38; + + // Korea (South) + KR = 39; + + // Latvia + LV = 40; + + // Morocco + MA = 41; + + // Mexico + MX = 42; + + // Malaysia + MY = 43; + + // Nigeria + NG = 44; + + // Netherlands + NL = 45; + + // Norway + NO = 46; + + // New Zealand + NZ = 47; + + // Peru + PE = 48; + + // Philippines + PH = 49; + + // Pakistan + PK = 50; + + // Poland + PL = 51; + + // Portugal + PT = 52; + + // Romania + RO = 53; + + // Serbia + RS = 54; + + // Russian Federation + RU = 55; + + // Saudi Arabia + SA = 56; + + // Sweden + SE = 57; + + // Singapore + SG = 58; + + // Slovenia + SI = 59; + + // Slovakia + SK = 60; + + // Thailand + TH = 61; + + // Turkey + TR = 62; + + // Taiwan + TW = 63; + + // Ukraine + UA = 64; + + // United States + US = 65; + + // Venezuela + VE = 66; + + // Viet Nam + VN = 67; + + // South Africa + ZA = 68; + } + + // Enums for seasonal period. + message SeasonalPeriod { + // Seasonal period type. + enum SeasonalPeriodType { + // Unspecified seasonal period. + SEASONAL_PERIOD_TYPE_UNSPECIFIED = 0; + + // No seasonality + NO_SEASONALITY = 1; + + // Daily period, 24 hours. + DAILY = 2; + + // Weekly period, 7 days. + WEEKLY = 3; + + // Monthly period, 30 days or irregular. + MONTHLY = 4; + + // Quarterly period, 90 days or irregular. + QUARTERLY = 5; + + // Yearly period, 365 days or irregular. + YEARLY = 6; + } + } + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + enum ColorSpace { + // Unspecified color space + COLOR_SPACE_UNSPECIFIED = 0; + + // RGB + RGB = 1; + + // HSV + HSV = 2; + + // YIQ + YIQ = 3; + + // YUV + YUV = 4; + + // GRAYSCALE + GRAYSCALE = 5; + } + + // Enums for kmeans model type. + message KmeansEnums { + // Indicates the method used to initialize the centroids for KMeans + // clustering algorithm. + enum KmeansInitializationMethod { + // Unspecified initialization method. + KMEANS_INITIALIZATION_METHOD_UNSPECIFIED = 0; + + // Initializes the centroids randomly. + RANDOM = 1; + + // Initializes the centroids using data specified in + // kmeans_initialization_column. + CUSTOM = 2; + + // Initializes with kmeans++. + KMEANS_PLUS_PLUS = 3; + } + } + + // Enums for XGBoost model type. + message BoostedTreeOptionEnums { + // Booster types supported. Refer to booster parameter in XGBoost. + enum BoosterType { + // Unspecified booster type. + BOOSTER_TYPE_UNSPECIFIED = 0; + + // Gbtree booster. + GBTREE = 1; + + // Dart booster. + DART = 2; + } + + // Type of normalization algorithm for boosted tree models using dart + // booster. Refer to normalize_type in XGBoost. + enum DartNormalizeType { + // Unspecified dart normalize type. + DART_NORMALIZE_TYPE_UNSPECIFIED = 0; + + // New trees have the same weight of each of dropped trees. + TREE = 1; + + // New trees have the same weight of sum of dropped trees. + FOREST = 2; + } + + // Tree construction algorithm used in boosted tree models. + // Refer to tree_method in XGBoost. + enum TreeMethod { + // Unspecified tree method. + TREE_METHOD_UNSPECIFIED = 0; + + // Use heuristic to choose the fastest method. + AUTO = 1; + + // Exact greedy algorithm. + EXACT = 2; + + // Approximate greedy algorithm using quantile sketch and gradient + // histogram. + APPROX = 3; + + // Fast histogram optimized approximate greedy algorithm. + HIST = 4; + } + } + + // Enums for hyperparameter tuning. + message HparamTuningEnums { + // Available evaluation metrics used as hyperparameter tuning objectives. + enum HparamTuningObjective { + // Unspecified evaluation metric. + HPARAM_TUNING_OBJECTIVE_UNSPECIFIED = 0; + + // Mean absolute error. + // mean_absolute_error = AVG(ABS(label - predicted)) + MEAN_ABSOLUTE_ERROR = 1; + + // Mean squared error. + // mean_squared_error = AVG(POW(label - predicted, 2)) + MEAN_SQUARED_ERROR = 2; + + // Mean squared log error. + // mean_squared_log_error = AVG(POW(LN(1 + label) - LN(1 + predicted), 2)) + MEAN_SQUARED_LOG_ERROR = 3; + + // Mean absolute error. + // median_absolute_error = APPROX_QUANTILES(absolute_error, 2)[OFFSET(1)] + MEDIAN_ABSOLUTE_ERROR = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + // r_squared = 1 - SUM(squared_error)/(COUNT(label)*VAR_POP(label)) + R_SQUARED = 5; + + // Explained variance. + // explained_variance = 1 - VAR_POP(label_error)/VAR_POP(label) + EXPLAINED_VARIANCE = 6; + + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged metric + // treating each class as a binary classifier. + PRECISION = 7; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + RECALL = 8; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a globally micro-averaged metric. + ACCURACY = 9; + + // The F1 score is an average of recall and precision. For multiclass this + // is a macro-averaged metric. + F1_SCORE = 10; + + // Logorithmic Loss. For multiclass this is a macro-averaged metric. + LOG_LOSS = 11; + + // Area Under an ROC Curve. For multiclass this is a macro-averaged + // metric. + ROC_AUC = 12; + + // Davies-Bouldin Index. + DAVIES_BOULDIN_INDEX = 13; + + // Mean Average Precision. + MEAN_AVERAGE_PRECISION = 14; + + // Normalized Discounted Cumulative Gain. + NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN = 15; + + // Average Rank. + AVERAGE_RANK = 16; + } + } + + // Indicates the learning rate optimization strategy to use. + enum LearnRateStrategy { + // Default value. + LEARN_RATE_STRATEGY_UNSPECIFIED = 0; + + // Use line search to determine learning rate. + LINE_SEARCH = 1; + + // Use a constant learning rate. + CONSTANT = 2; + } + + // Indicates the optimization strategy used for training. + enum OptimizationStrategy { + // Default value. + OPTIMIZATION_STRATEGY_UNSPECIFIED = 0; + + // Uses an iterative batch gradient descent algorithm. + BATCH_GRADIENT_DESCENT = 1; + + // Uses a normal equation to solve linear regression problem. + NORMAL_EQUATION = 2; + } + + // Indicates the training algorithm to use for matrix factorization models. + enum FeedbackType { + // Default value. + FEEDBACK_TYPE_UNSPECIFIED = 0; + + // Use weighted-als for implicit feedback problems. + IMPLICIT = 1; + + // Use nonweighted-als for explicit feedback problems. + EXPLICIT = 2; + } + + // Evaluation metrics for regression and explicit feedback type matrix + // factorization models. + message RegressionMetrics { + // Mean absolute error. + google.protobuf.DoubleValue mean_absolute_error = 1; + + // Mean squared error. + google.protobuf.DoubleValue mean_squared_error = 2; + + // Mean squared log error. + google.protobuf.DoubleValue mean_squared_log_error = 3; + + // Median absolute error. + google.protobuf.DoubleValue median_absolute_error = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + google.protobuf.DoubleValue r_squared = 5; + } + + // Aggregate metrics for classification/classifier models. For multi-class + // models, the metrics are either macro-averaged or micro-averaged. When + // macro-averaged, the metrics are calculated for each label and then an + // unweighted average is taken of those values. When micro-averaged, the + // metric is calculated globally by counting the total number of correctly + // predicted rows. + message AggregateClassificationMetrics { + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged + // metric treating each class as a binary classifier. + google.protobuf.DoubleValue precision = 1; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue recall = 2; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a micro-averaged metric. + google.protobuf.DoubleValue accuracy = 3; + + // Threshold at which the metrics are computed. For binary + // classification models this is the positive class threshold. + // For multi-class classfication models this is the confidence + // threshold. + google.protobuf.DoubleValue threshold = 4; + + // The F1 score is an average of recall and precision. For multiclass + // this is a macro-averaged metric. + google.protobuf.DoubleValue f1_score = 5; + + // Logarithmic Loss. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue log_loss = 6; + + // Area Under a ROC Curve. For multiclass this is a macro-averaged + // metric. + google.protobuf.DoubleValue roc_auc = 7; + } + + // Evaluation metrics for binary classification/classifier models. + message BinaryClassificationMetrics { + // Confusion matrix for binary classification models. + message BinaryConfusionMatrix { + // Threshold value used when computing each of the following metric. + google.protobuf.DoubleValue positive_class_threshold = 1; + + // Number of true samples predicted as true. + google.protobuf.Int64Value true_positives = 2; + + // Number of false samples predicted as true. + google.protobuf.Int64Value false_positives = 3; + + // Number of true samples predicted as false. + google.protobuf.Int64Value true_negatives = 4; + + // Number of false samples predicted as false. + google.protobuf.Int64Value false_negatives = 5; + + // The fraction of actual positive predictions that had positive actual + // labels. + google.protobuf.DoubleValue precision = 6; + + // The fraction of actual positive labels that were given a positive + // prediction. + google.protobuf.DoubleValue recall = 7; + + // The equally weighted average of recall and precision. + google.protobuf.DoubleValue f1_score = 8; + + // The fraction of predictions given the correct label. + google.protobuf.DoubleValue accuracy = 9; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Binary confusion matrix at multiple thresholds. + repeated BinaryConfusionMatrix binary_confusion_matrix_list = 2; + + // Label representing the positive class. + string positive_label = 3; + + // Label representing the negative class. + string negative_label = 4; + } + + // Evaluation metrics for multi-class classification/classifier models. + message MultiClassClassificationMetrics { + // Confusion matrix for multi-class classification models. + message ConfusionMatrix { + // A single entry in the confusion matrix. + message Entry { + // The predicted label. For confidence_threshold > 0, we will + // also add an entry indicating the number of items under the + // confidence threshold. + string predicted_label = 1; + + // Number of items being predicted as this label. + google.protobuf.Int64Value item_count = 2; + } + + // A single row in the confusion matrix. + message Row { + // The original label of this row. + string actual_label = 1; + + // Info describing predicted label distribution. + repeated Entry entries = 2; + } + + // Confidence threshold used when computing the entries of the + // confusion matrix. + google.protobuf.DoubleValue confidence_threshold = 1; + + // One row per actual label. + repeated Row rows = 2; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Confusion matrix at different thresholds. + repeated ConfusionMatrix confusion_matrix_list = 2; + } + + // Evaluation metrics for clustering models. + message ClusteringMetrics { + // Message containing the information about one cluster. + message Cluster { + // Representative value of a single feature within the cluster. + message FeatureValue { + // Representative value of a categorical feature. + message CategoricalValue { + // Represents the count of a single category within the cluster. + message CategoryCount { + // The name of category. + string category = 1; + + // The count of training samples matching the category within the + // cluster. + google.protobuf.Int64Value count = 2; + } + + // Counts of all categories for the categorical feature. If there are + // more than ten categories, we return top ten (by count) and return + // one more CategoryCount with category "_OTHER_" and count as + // aggregate counts of remaining categories. + repeated CategoryCount category_counts = 1; + } + + // The feature column name. + string feature_column = 1; + + // Value. + oneof value { + // The numerical feature value. This is the centroid value for this + // feature. + google.protobuf.DoubleValue numerical_value = 2; + + // The categorical feature value. + CategoricalValue categorical_value = 3; + } + } + + // Centroid id. + int64 centroid_id = 1; + + // Values of highly variant features for this cluster. + repeated FeatureValue feature_values = 2; + + // Count of training data rows that were assigned to this cluster. + google.protobuf.Int64Value count = 3; + } + + // Davies-Bouldin index. + google.protobuf.DoubleValue davies_bouldin_index = 1; + + // Mean of squared distances between each sample to its cluster centroid. + google.protobuf.DoubleValue mean_squared_distance = 2; + + // Information for all clusters. + repeated Cluster clusters = 3; + } + + // Evaluation metrics used by weighted-ALS models specified by + // feedback_type=implicit. + message RankingMetrics { + // Calculates a precision per user for all the items by ranking them and + // then averages all the precisions across all the users. + google.protobuf.DoubleValue mean_average_precision = 1; + + // Similar to the mean squared error computed in regression and explicit + // recommendation models except instead of computing the rating directly, + // the output from evaluate is computed against a preference which is 1 or 0 + // depending on if the rating exists or not. + google.protobuf.DoubleValue mean_squared_error = 2; + + // A metric to determine the goodness of a ranking calculated from the + // predicted confidence by comparing it to an ideal rank measured by the + // original ratings. + google.protobuf.DoubleValue normalized_discounted_cumulative_gain = 3; + + // Determines the goodness of a ranking by computing the percentile rank + // from the predicted confidence and dividing it by the original rank. + google.protobuf.DoubleValue average_rank = 4; + } + + // Model evaluation metrics for ARIMA forecasting models. + message ArimaForecastingMetrics { + // Model evaluation metrics for a single ARIMA forecasting model. + message ArimaSingleModelForecastingMetrics { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 2; + + // Is arima model fitted with drift or not. It is always false when d + // is not 1. + google.protobuf.BoolValue has_drift = 3; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 4; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 9; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 5; + + // If true, holiday_effect is a part of time series decomposition result. + google.protobuf.BoolValue has_holiday_effect = 6; + + // If true, spikes_and_dips is a part of time series decomposition result. + google.protobuf.BoolValue has_spikes_and_dips = 7; + + // If true, step_changes is a part of time series decomposition result. + google.protobuf.BoolValue has_step_changes = 8; + } + + // Repeated as there can be many metric sets (one for each model) in + // auto-arima and the large-scale case. + repeated ArimaSingleModelForecastingMetrics + arima_single_model_forecasting_metrics = 6; + } + + // Model evaluation metrics for dimensionality reduction models. + message DimensionalityReductionMetrics { + // Total percentage of variance explained by the selected principal + // components. + google.protobuf.DoubleValue total_explained_variance_ratio = 1; + } + + // Evaluation metrics of a model. These are either computed on all training + // data or just the eval data based on whether eval data was used during + // training. These are not present for imported models. + message EvaluationMetrics { + // Metrics. + oneof metrics { + // Populated for regression models and explicit feedback type matrix + // factorization models. + RegressionMetrics regression_metrics = 1; + + // Populated for binary classification/classifier models. + BinaryClassificationMetrics binary_classification_metrics = 2; + + // Populated for multi-class classification/classifier models. + MultiClassClassificationMetrics multi_class_classification_metrics = 3; + + // Populated for clustering models. + ClusteringMetrics clustering_metrics = 4; + + // Populated for implicit feedback type matrix factorization models. + RankingMetrics ranking_metrics = 5; + + // Populated for ARIMA models. + ArimaForecastingMetrics arima_forecasting_metrics = 6; + + // Evaluation metrics when the model is a dimensionality reduction model, + // which currently includes PCA. + DimensionalityReductionMetrics dimensionality_reduction_metrics = 7; + } + } + + // Data split result. This contains references to the training and evaluation + // data tables that were used to train the model. + message DataSplitResult { + // Table reference of the training data after split. + TableReference training_table = 1; + + // Table reference of the evaluation data after split. + TableReference evaluation_table = 2; + + // Table reference of the test data after split. + TableReference test_table = 3; + } + + // Arima order, can be used for both non-seasonal and seasonal parts. + message ArimaOrder { + // Order of the autoregressive part. + google.protobuf.Int64Value p = 1; + + // Order of the differencing part. + google.protobuf.Int64Value d = 2; + + // Order of the moving-average part. + google.protobuf.Int64Value q = 3; + } + + // ARIMA model fitting metrics. + message ArimaFittingMetrics { + // Log-likelihood. + google.protobuf.DoubleValue log_likelihood = 1; + + // AIC. + google.protobuf.DoubleValue aic = 2; + + // Variance. + google.protobuf.DoubleValue variance = 3; + } + + // Global explanations containing the top most important features + // after training. + message GlobalExplanation { + // Explanation for a single feature. + message Explanation { + // The full feature name. For non-numerical features, will be formatted + // like `.`. Overall size of feature + // name will always be truncated to first 120 characters. + string feature_name = 1; + + // Attribution of feature. + google.protobuf.DoubleValue attribution = 2; + } + + // A list of the top global explanations. Sorted by absolute value of + // attribution in descending order. + repeated Explanation explanations = 1; + + // Class label for this set of global explanations. Will be empty/null for + // binary logistic and linear regression models. Sorted alphabetically in + // descending order. + string class_label = 2; + } + + // Encoding methods for categorical features. + message CategoryEncodingMethod { + // Supported encoding methods for categorical features. + enum EncodingMethod { + // Unspecified encoding method. + ENCODING_METHOD_UNSPECIFIED = 0; + + // Applies one-hot encoding. + ONE_HOT_ENCODING = 1; + + // Applies label encoding. + LABEL_ENCODING = 2; + + // Applies dummy encoding. + DUMMY_ENCODING = 3; + } + } + + // PCA solver options. + message PcaSolverOptionEnums { + // Enums for supported PCA solvers. + enum PcaSolver { + // Default value. + UNSPECIFIED = 0; + + // Full eigen-decoposition. + FULL = 1; + + // Randomized SVD. + RANDOMIZED = 2; + + // Auto. + AUTO = 3; + } + } + + // Model registry options. + message ModelRegistryOptionEnums { + // Enums for supported model registries. + enum ModelRegistry { + // Default value. + MODEL_REGISTRY_UNSPECIFIED = 0; + + // Vertex AI. + VERTEX_AI = 1; + } + } + + // Information about a single training query run for the model. + message TrainingRun { + // Options used in model training. + message TrainingOptions { + // The maximum number of iterations in training. Used only for iterative + // training algorithms. + int64 max_iterations = 1; + + // Type of loss function used during training run. + LossType loss_type = 2; + + // Learning rate in training. Used only for iterative training algorithms. + double learn_rate = 3; + + // L1 regularization coefficient. + google.protobuf.DoubleValue l1_regularization = 4; + + // L2 regularization coefficient. + google.protobuf.DoubleValue l2_regularization = 5; + + // When early_stop is true, stops training when accuracy improvement is + // less than 'min_relative_progress'. Used only for iterative training + // algorithms. + google.protobuf.DoubleValue min_relative_progress = 6; + + // Whether to train a model from the last checkpoint. + google.protobuf.BoolValue warm_start = 7; + + // Whether to stop early when the loss doesn't improve significantly + // any more (compared to min_relative_progress). Used only for iterative + // training algorithms. + google.protobuf.BoolValue early_stop = 8; + + // Name of input label columns in training data. + repeated string input_label_columns = 9; + + // The data split type for training and evaluation, e.g. RANDOM. + DataSplitMethod data_split_method = 10; + + // The fraction of evaluation data over the whole input data. The rest + // of data will be used as training data. The format should be double. + // Accurate to two decimal places. + // Default value is 0.2. + double data_split_eval_fraction = 11; + + // The column to split data with. This column won't be used as a + // feature. + // 1. When data_split_method is CUSTOM, the corresponding column should + // be boolean. The rows with true value tag are eval data, and the false + // are training data. + // 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION + // rows (from smallest to largest) in the corresponding column are used + // as training data, and the rest are eval data. It respects the order + // in Orderable data types: + // https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties + string data_split_column = 12; + + // The strategy to determine learn rate for the current iteration. + LearnRateStrategy learn_rate_strategy = 13; + + // Specifies the initial learning rate for the line search learn rate + // strategy. + double initial_learn_rate = 16; + + // Weights associated with each label class, for rebalancing the + // training data. Only applicable for classification models. + map label_class_weights = 17; + + // User column specified for matrix factorization models. + string user_column = 18; + + // Item column specified for matrix factorization models. + string item_column = 19; + + // Distance type for clustering models. + DistanceType distance_type = 20; + + // Number of clusters for clustering models. + int64 num_clusters = 21; + + // Google Cloud Storage URI from which the model was imported. Only + // applicable for imported models. + string model_uri = 22; + + // Optimization strategy for training linear regression models. + OptimizationStrategy optimization_strategy = 23; + + // Hidden units for dnn models. + repeated int64 hidden_units = 24; + + // Batch size for dnn models. + int64 batch_size = 25; + + // Dropout probability for dnn models. + google.protobuf.DoubleValue dropout = 26; + + // Maximum depth of a tree for boosted tree models. + int64 max_tree_depth = 27; + + // Subsample fraction of the training data to grow tree to prevent + // overfitting for boosted tree models. + double subsample = 28; + + // Minimum split loss for boosted tree models. + google.protobuf.DoubleValue min_split_loss = 29; + + // Booster type for boosted tree models. + BoostedTreeOptionEnums.BoosterType booster_type = 60; + + // Number of parallel trees constructed during each iteration for boosted + // tree models. + google.protobuf.Int64Value num_parallel_tree = 61; + + // Type of normalization algorithm for boosted tree models using + // dart booster. + BoostedTreeOptionEnums.DartNormalizeType dart_normalize_type = 62; + + // Tree construction algorithm for boosted tree models. + BoostedTreeOptionEnums.TreeMethod tree_method = 63; + + // Minimum sum of instance weight needed in a child for boosted tree + // models. + google.protobuf.Int64Value min_tree_child_weight = 64; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + google.protobuf.DoubleValue colsample_bytree = 65; + + // Subsample ratio of columns for each level for boosted tree models. + google.protobuf.DoubleValue colsample_bylevel = 66; + + // Subsample ratio of columns for each node(split) for boosted tree + // models. + google.protobuf.DoubleValue colsample_bynode = 67; + + // Num factors specified for matrix factorization models. + int64 num_factors = 30; + + // Feedback type that specifies which algorithm to run for matrix + // factorization. + FeedbackType feedback_type = 31; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + google.protobuf.DoubleValue wals_alpha = 32; + + // The method used to initialize the centroids for kmeans algorithm. + KmeansEnums.KmeansInitializationMethod kmeans_initialization_method = 33; + + // The column used to provide the initial centroids for kmeans algorithm + // when kmeans_initialization_method is CUSTOM. + string kmeans_initialization_column = 34; + + // Column to be designated as time series timestamp for ARIMA model. + string time_series_timestamp_column = 35; + + // Column to be designated as time series data for ARIMA model. + string time_series_data_column = 36; + + // Whether to enable auto ARIMA or not. + google.protobuf.BoolValue auto_arima = 37; + + // A specification of the non-seasonal part of the ARIMA model: the three + // components (p, d, q) are the AR order, the degree of differencing, and + // the MA order. + ArimaOrder non_seasonal_order = 38; + + // The data frequency of a time series. + DataFrequency data_frequency = 39; + + // Whether or not p-value test should be computed for this model. Only + // available for linear and logistic regression models. + google.protobuf.BoolValue calculate_p_values = 40; + + // Include drift when fitting an ARIMA model. + google.protobuf.BoolValue include_drift = 41; + + // The geographical region based on which the holidays are considered in + // time series modeling. If a valid value is specified, then holiday + // effects modeling is enabled. + HolidayRegion holiday_region = 42; + + // A list of geographical regions that are used for time series modeling. + repeated HolidayRegion holiday_regions = 71; + + // The time series id column that was used during ARIMA model training. + string time_series_id_column = 43; + + // The time series id columns that were used during ARIMA model training. + repeated string time_series_id_columns = 51; + + // The number of periods ahead that need to be forecasted. + int64 horizon = 44; + + // The max value of the sum of non-seasonal p and q. + int64 auto_arima_max_order = 46; + + // The min value of the sum of non-seasonal p and q. + int64 auto_arima_min_order = 83; + + // Number of trials to run this hyperparameter tuning job. + int64 num_trials = 47; + + // Maximum number of trials to run in parallel. + int64 max_parallel_trials = 48; + + // The target evaluation metrics to optimize the hyperparameters for. + repeated HparamTuningEnums.HparamTuningObjective + hparam_tuning_objectives = 54; + + // If true, perform decompose time series and save the results. + google.protobuf.BoolValue decompose_time_series = 50; + + // If true, clean spikes and dips in the input time series. + google.protobuf.BoolValue clean_spikes_and_dips = 52; + + // If true, detect step changes and make data adjustment in the input time + // series. + google.protobuf.BoolValue adjust_step_changes = 53; + + // If true, enable global explanation during training. + google.protobuf.BoolValue enable_global_explain = 55; + + // Number of paths for the sampled Shapley explain method. + int64 sampled_shapley_num_paths = 56; + + // Number of integral steps for the integrated gradients explain method. + int64 integrated_gradients_num_steps = 57; + + // Categorical feature encoding method. + CategoryEncodingMethod.EncodingMethod category_encoding_method = 58; + + // Based on the selected TF version, the corresponding docker image is + // used to train external models. + string tf_version = 70; + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + ColorSpace color_space = 72; + + // Name of the instance weight column for training data. + // This column isn't be used as a feature. + string instance_weight_column = 73; + + // Smoothing window size for the trend component. When a positive value is + // specified, a center moving average smoothing is applied on the history + // trend. When the smoothing window is out of the boundary at the + // beginning or the end of the trend, the first element or the last + // element is padded to fill the smoothing window before the average is + // applied. + int64 trend_smoothing_window_size = 74; + + // The fraction of the interpolated length of the time series that's used + // to model the time series trend component. All of the time points of the + // time series are used to model the non-trend component. This training + // option accelerates modeling training without sacrificing much + // forecasting accuracy. You can use this option with + // `minTimeSeriesLength` but not with `maxTimeSeriesLength`. + double time_series_length_fraction = 75; + + // The minimum number of time points in a time series that are used in + // modeling the trend component of the time series. If you use this option + // you must also set the `timeSeriesLengthFraction` option. This training + // option ensures that enough time points are available when you use + // `timeSeriesLengthFraction` in trend modeling. This is particularly + // important when forecasting multiple time series in a single query using + // `timeSeriesIdColumn`. If the total number of time points is less than + // the `minTimeSeriesLength` value, then the query uses all available time + // points. + int64 min_time_series_length = 76; + + // The maximum number of time points in a time series that can be used in + // modeling the trend component of the time series. Don't use this option + // with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. + int64 max_time_series_length = 77; + + // User-selected XGBoost versions for training of XGBoost models. + string xgboost_version = 78; + + // Whether to use approximate feature contribution method in XGBoost model + // explanation for global explain. + google.protobuf.BoolValue approx_global_feature_contrib = 84; + + // Whether the model should include intercept during model training. + google.protobuf.BoolValue fit_intercept = 85; + + // Number of principal components to keep in the PCA model. Must be <= the + // number of features. + int64 num_principal_components = 86; + + // The minimum ratio of cumulative explained variance that needs to be + // given by the PCA model. + double pca_explained_variance_ratio = 87; + + // If true, scale the feature values by dividing the feature standard + // deviation. Currently only apply to PCA. + google.protobuf.BoolValue scale_features = 88; + + // The solver for PCA. + PcaSolverOptionEnums.PcaSolver pca_solver = 89; + + // Whether to calculate class weights automatically based on the + // popularity of each label. + google.protobuf.BoolValue auto_class_weights = 90; + + // Activation function of the neural nets. + string activation_fn = 91; + + // Optimizer used for training the neural nets. + string optimizer = 92; + + // Budget in hours for AutoML training. + double budget_hours = 93; + + // Whether to standardize numerical features. Default to true. + google.protobuf.BoolValue standardize_features = 94; + + // L1 regularization coefficient to activations. + double l1_reg_activation = 95; + + // The model registry. + ModelRegistryOptionEnums.ModelRegistry model_registry = 96; + + // The version aliases to apply in Vertex AI model registry. Always + // overwrite if the version aliases exists in a existing model. + repeated string vertex_ai_model_version_aliases = 97; + } + + // Information about a single iteration of the training run. + message IterationResult { + // Information about a single cluster for clustering model. + message ClusterInfo { + // Centroid id. + int64 centroid_id = 1; + + // Cluster radius, the average distance from centroid + // to each point assigned to the cluster. + google.protobuf.DoubleValue cluster_radius = 2; + + // Cluster size, the total number of points assigned to the cluster. + google.protobuf.Int64Value cluster_size = 3; + } + + // (Auto-)arima fitting result. Wrap everything in ArimaResult for easier + // refactoring if we want to use model-specific iteration results. + message ArimaResult { + // Arima coefficients. + message ArimaCoefficients { + // Auto-regressive coefficients, an array of double. + repeated double auto_regressive_coefficients = 1; + + // Moving-average coefficients, an array of double. + repeated double moving_average_coefficients = 2; + + // Intercept coefficient, just a double not an array. + google.protobuf.DoubleValue intercept_coefficient = 3; + } + + // Arima model information. + message ArimaModelInfo { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima coefficients. + ArimaCoefficients arima_coefficients = 2; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 3; + + // Whether Arima model fitted with drift or not. It is always false + // when d is not 1. + google.protobuf.BoolValue has_drift = 4; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 5; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 10; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 6; + + // If true, holiday_effect is a part of time series decomposition + // result. + google.protobuf.BoolValue has_holiday_effect = 7; + + // If true, spikes_and_dips is a part of time series decomposition + // result. + google.protobuf.BoolValue has_spikes_and_dips = 8; + + // If true, step_changes is a part of time series decomposition + // result. + google.protobuf.BoolValue has_step_changes = 9; + } + + // This message is repeated because there are multiple arima models + // fitted in auto-arima. For non-auto-arima model, its size is one. + repeated ArimaModelInfo arima_model_info = 1; + + // Seasonal periods. Repeated because multiple periods are supported for + // one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 2; + } + + // Principal component infos, used only for eigen decomposition based + // models, e.g., PCA. Ordered by explained_variance in the descending + // order. + message PrincipalComponentInfo { + // Id of the principal component. + google.protobuf.Int64Value principal_component_id = 1; + + // Explained variance by this principal component, which is simply the + // eigenvalue. + google.protobuf.DoubleValue explained_variance = 2; + + // Explained_variance over the total explained variance. + google.protobuf.DoubleValue explained_variance_ratio = 3; + + // The explained_variance is pre-ordered in the descending order to + // compute the cumulative explained variance ratio. + google.protobuf.DoubleValue cumulative_explained_variance_ratio = 4; + } + + // Index of the iteration, 0 based. + google.protobuf.Int32Value index = 1; + + // Time taken to run the iteration in milliseconds. + google.protobuf.Int64Value duration_ms = 4; + + // Loss computed on the training data at the end of iteration. + google.protobuf.DoubleValue training_loss = 5; + + // Loss computed on the eval data at the end of iteration. + google.protobuf.DoubleValue eval_loss = 6; + + // Learn rate used for this iteration. + double learn_rate = 7; + + // Information about top clusters for clustering models. + repeated ClusterInfo cluster_infos = 8; + + // Arima result. + ArimaResult arima_result = 9; + + // The information of the principal components. + repeated PrincipalComponentInfo principal_component_infos = 10; + } + + // Output only. Options that were used for this training run, includes + // user specified and default options that were used. + TrainingOptions training_options = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The start time of this training run. + google.protobuf.Timestamp start_time = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Output of each iteration run, results.size() <= + // max_iterations. + repeated IterationResult results = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The evaluation metrics over training/eval data that were + // computed at the end of training. + EvaluationMetrics evaluation_metrics = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data split result of the training run. Only set when the + // input data is actually split. + DataSplitResult data_split_result = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the model level. Applies to both regression and classification models. + GlobalExplanation model_level_global_explanation = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the class level. Applies to classification models only. + repeated GlobalExplanation class_level_global_explanations = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The model id in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_id = 14; + + // Output only. The model version in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_version = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Search space for a double hyperparameter. + message DoubleHparamSearchSpace { + // Range of a double hyperparameter. + message DoubleRange { + // Min value of the double parameter. + google.protobuf.DoubleValue min = 1; + + // Max value of the double parameter. + google.protobuf.DoubleValue max = 2; + } + + // Discrete candidates of a double hyperparameter. + message DoubleCandidates { + // Candidates for the double parameter in increasing order. + repeated google.protobuf.DoubleValue candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the double hyperparameter. + DoubleRange range = 1; + + // Candidates of the double hyperparameter. + DoubleCandidates candidates = 2; + } + } + + // Search space for an int hyperparameter. + message IntHparamSearchSpace { + // Range of an int hyperparameter. + message IntRange { + // Min value of the int parameter. + google.protobuf.Int64Value min = 1; + + // Max value of the int parameter. + google.protobuf.Int64Value max = 2; + } + + // Discrete candidates of an int hyperparameter. + message IntCandidates { + // Candidates for the int parameter in increasing order. + repeated google.protobuf.Int64Value candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the int hyperparameter. + IntRange range = 1; + + // Candidates of the int hyperparameter. + IntCandidates candidates = 2; + } + } + + // Search space for string and enum. + message StringHparamSearchSpace { + // Canididates for the string or enum parameter in lower case. + repeated string candidates = 1; + } + + // Search space for int array. + message IntArrayHparamSearchSpace { + // An array of int. + message IntArray { + // Elements in the int array. + repeated int64 elements = 1; + } + + // Candidates for the int array parameter. + repeated IntArray candidates = 1; + } + + // Hyperparameter search spaces. + // These should be a subset of training_options. + message HparamSearchSpaces { + // Learning rate of training jobs. + DoubleHparamSearchSpace learn_rate = 2; + + // L1 regularization coefficient. + DoubleHparamSearchSpace l1_reg = 3; + + // L2 regularization coefficient. + DoubleHparamSearchSpace l2_reg = 4; + + // Number of clusters for k-means. + IntHparamSearchSpace num_clusters = 26; + + // Number of latent factors to train on. + IntHparamSearchSpace num_factors = 31; + + // Hidden units for neural network models. + IntArrayHparamSearchSpace hidden_units = 34; + + // Mini batch sample size. + IntHparamSearchSpace batch_size = 37; + + // Dropout probability for dnn model training and boosted tree models + // using dart booster. + DoubleHparamSearchSpace dropout = 38; + + // Maximum depth of a tree for boosted tree models. + IntHparamSearchSpace max_tree_depth = 41; + + // Subsample the training data to grow tree to prevent overfitting for + // boosted tree models. + DoubleHparamSearchSpace subsample = 42; + + // Minimum split loss for boosted tree models. + DoubleHparamSearchSpace min_split_loss = 43; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + DoubleHparamSearchSpace wals_alpha = 49; + + // Booster type for boosted tree models. + StringHparamSearchSpace booster_type = 56; + + // Number of parallel trees for boosted tree models. + IntHparamSearchSpace num_parallel_tree = 57; + + // Dart normalization type for boosted tree models. + StringHparamSearchSpace dart_normalize_type = 58; + + // Tree construction algorithm for boosted tree models. + StringHparamSearchSpace tree_method = 59; + + // Minimum sum of instance weight needed in a child for boosted tree models. + IntHparamSearchSpace min_tree_child_weight = 60; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + DoubleHparamSearchSpace colsample_bytree = 61; + + // Subsample ratio of columns for each level for boosted tree models. + DoubleHparamSearchSpace colsample_bylevel = 62; + + // Subsample ratio of columns for each node(split) for boosted tree models. + DoubleHparamSearchSpace colsample_bynode = 63; + + // Activation functions of neural network models. + StringHparamSearchSpace activation_fn = 67; + + // Optimizer of TF models. + StringHparamSearchSpace optimizer = 68; + } + + // Training info of a trial in [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models. + message HparamTuningTrial { + // Current status of the trial. + enum TrialStatus { + // Default value. + TRIAL_STATUS_UNSPECIFIED = 0; + + // Scheduled but not started. + NOT_STARTED = 1; + + // Running state. + RUNNING = 2; + + // The trial succeeded. + SUCCEEDED = 3; + + // The trial failed. + FAILED = 4; + + // The trial is infeasible due to the invalid params. + INFEASIBLE = 5; + + // Trial stopped early because it's not promising. + STOPPED_EARLY = 6; + } + + // 1-based index of the trial. + int64 trial_id = 1; + + // Starting time of the trial. + int64 start_time_ms = 2; + + // Ending time of the trial. + int64 end_time_ms = 3; + + // The hyperprameters selected for this trial. + TrainingRun.TrainingOptions hparams = 4; + + // Evaluation metrics of this trial calculated on the test data. + // Empty in Job API. + EvaluationMetrics evaluation_metrics = 5; + + // The status of the trial. + TrialStatus status = 6; + + // Error message for FAILED and INFEASIBLE trial. + string error_message = 7; + + // Loss computed on the training data at the end of trial. + google.protobuf.DoubleValue training_loss = 8; + + // Loss computed on the eval data at the end of trial. + google.protobuf.DoubleValue eval_loss = 9; + + // Hyperparameter tuning evaluation metrics of this trial calculated on the + // eval data. Unlike evaluation_metrics, only the fields corresponding to + // the hparam_tuning_objectives are set. + EvaluationMetrics hparam_tuning_evaluation_metrics = 10; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Unique identifier for this model. + ModelReference model_reference = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this model was created, in millisecs since the + // epoch. + int64 creation_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this model was last modified, in millisecs since + // the epoch. + int64 last_modified_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. A user-friendly description of this model. + string description = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A descriptive name for this model. + string friendly_name = 14 [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this model. You can use these to organize + // and group your models. Label keys and values can be no longer + // than 63 characters, can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // Label values are optional. Label keys must start with a letter and each + // label in the list must have a different key. + map labels = 15; + + // Optional. The time when this model expires, in milliseconds since the + // epoch. If not present, the model will persist indefinitely. Expired models + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created models. + int64 expiration_time = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the model resides. This value + // is inherited from the dataset. + string location = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). This shows the + // encryption configuration of the model data while stored in BigQuery + // storage. This field can be used with PatchModel to update encryption key + // for an already encrypted model. + EncryptionConfiguration encryption_configuration = 17; + + // Output only. Type of the model resource. + ModelType model_type = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Information for all training runs in increasing order of start_time. + repeated TrainingRun training_runs = 9; + + // Output only. Input feature columns for the model inference. If the model is + // trained with TRANSFORM clause, these are the input of the TRANSFORM clause. + repeated StandardSqlField feature_columns = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Label columns that were used to train this model. + // The output of the model will have a "predicted_" prefix to these columns. + repeated StandardSqlField label_columns = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This field will be populated if a TRANSFORM clause was used to + // train a model. TRANSFORM clause (if used) takes feature_columns as input + // and outputs transform_columns. transform_columns then are used to train the + // model. + repeated TransformColumn transform_columns = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. All hyperparameter search spaces in this model. + HparamSearchSpaces hparam_search_spaces = 18 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The default trial_id to use in TVFs when the trial_id is not + // passed in. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the best trial ID. For multi-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the smallest trial ID among all Pareto optimal trials. + int64 default_trial_id = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // model sorted by trial_id. + repeated HparamTuningTrial hparam_trials = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it only contains the best trial. For multi-objective + // [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it contains all Pareto optimal trials sorted by trial_id. + repeated int64 optimal_trial_ids = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Remote model info + RemoteModelInfo remote_model_info = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a BigQuery ML model. +message GetModelRequest { + // Required. Project ID of the requested model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the requested model. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +message PatchModelRequest { + // Required. Project ID of the model to patch. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to patch. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to patch. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. Patched model. + // Follows RFC5789 patch semantics. Missing fields are not updated. + // To clear a field, explicitly set to default value. + Model model = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting BigQuery ML models. +message DeleteModelRequest { + // Required. Project ID of the model to delete. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to delete. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to delete. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for listing BigQuery ML models. +message ListModelsRequest { + // Required. Project ID of the models to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the models to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call to request the next page of + // results + string page_token = 4; +} + +// Response format for a single page when listing BigQuery ML models. +message ListModelsResponse { + // Models in the requested dataset. Only the following fields are populated: + // model_reference, model_type, creation_time, last_modified_time and + // labels. + repeated Model models = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model_reference.proto.baseline new file mode 100755 index 000000000..9f190254e --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/model_reference.proto.baseline @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a model. +message ModelReference { + // Required. The ID of the project containing this model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the model. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline new file mode 100755 index 000000000..f331cb56a --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/partitioning_definition.proto.baseline @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "PartitioningDefinitionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The partitioning information, which includes managed table, external table +// and metastore partitioned table partition information. +message PartitioningDefinition { + // Optional. Details about each partitioning column. This field is output only + // for all partitioning types other than metastore partitioned tables. + // BigQuery native tables only support 1 partitioning column. Other table + // types may support 0, 1 or more partitioning columns. + // For metastore partitioned tables, the order must match the definition order + // in the Hive Metastore, where it must match the physical layout of the + // table. For example, + // + // CREATE TABLE a_table(id BIGINT, name STRING) + // PARTITIONED BY (city STRING, state STRING). + // + // In this case the values must be ['city', 'state'] in that order. + repeated PartitionedColumn partitioned_column = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The partitioning column information. +message PartitionedColumn { + // Required. The name of the partition column. + optional string field = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline new file mode 100755 index 000000000..35f40a0a0 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/privacy_policy.proto.baseline @@ -0,0 +1,169 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "PrivacyPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents privacy policy associated with "aggregation threshold" method. +message AggregationThresholdPolicy { + // Optional. The threshold for the "aggregation threshold" policy. + optional int64 threshold = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column(s) associated with this policy. + // For now, only one column per data source object (table, view) is allowed as + // a privacy unit column. + // Representing as a repeated field in metadata for extensibility to + // multiple columns in future. + // Duplicates and Repeated struct fields are not allowed. + // For nested fields, use dot notation ("outer.inner") + repeated string privacy_unit_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy associated with "differential privacy" method. +message DifferentialPrivacyPolicy { + // Optional. The maximum epsilon value that a query can consume. If the + // subscriber specifies epsilon as a parameter in a SELECT query, it must be + // less than or equal to this value. The epsilon parameter controls the amount + // of noise that is added to the groups — a higher epsilon means less noise. + optional double max_epsilon_per_query = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The delta value that is used per query. Delta represents the + // probability that any row will fail to be epsilon differentially private. + // Indicates the risk associated with exposing aggregate rows in the result of + // a query. + optional double delta_per_query = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum groups contributed value that is used per query. + // Represents the maximum number of groups to which each protected entity can + // contribute. Changing this value does not improve or worsen privacy. The + // best value for accuracy and utility depends on the query and data. + optional int64 max_groups_contributed = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column associated with this policy. Differential + // privacy policies can only have one privacy unit column per data source + // object (table, view). + optional string privacy_unit_column = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total epsilon budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of epsilon they request in their query. If there is sufficient + // budget, then the subscriber query attempts to complete. It might still fail + // due to other reasons, in which case the charge is refunded. If there is + // insufficient budget the query is rejected. There might be multiple charge + // attempts if a single query references multiple views. In this case there + // must be sufficient budget for all charges or the query is rejected and + // charges are refunded in best effort. The budget does not have a refresh + // policy and can only be updated via ALTER VIEW or circumvented by creating a + // new view that can be queried with a fresh budget. + optional double epsilon_budget = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total delta budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of delta that is pre-defined by the contributor through the privacy + // policy delta_per_query field. If there is sufficient budget, then the + // subscriber query attempts to complete. It might still fail due to other + // reasons, in which case the charge is refunded. If there is insufficient + // budget the query is rejected. There might be multiple charge attempts if a + // single query references multiple views. In this case there must be + // sufficient budget for all charges or the query is rejected and charges are + // refunded in best effort. The budget does not have a refresh policy and can + // only be updated via ALTER VIEW or circumvented by creating a new view that + // can be queried with a fresh budget. + optional double delta_budget = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The epsilon budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double epsilon_budget_remaining = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The delta budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double delta_budget_remaining = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents privacy policy associated with "join restrictions". Join +// restriction gives data providers the ability to enforce joins on the +// 'join_allowed_columns' when data is queried from a privacy protected view. +message JoinRestrictionPolicy { + // Enum for Join Restrictions policy. + enum JoinCondition { + // A join is neither required nor restricted on any column. Default value. + JOIN_CONDITION_UNSPECIFIED = 0; + + // A join is required on at least one of the specified columns. + JOIN_ANY = 1; + + // A join is required on all specified columns. + JOIN_ALL = 2; + + // A join is not required, but if present it is only permitted on + // 'join_allowed_columns' + JOIN_NOT_REQUIRED = 3; + + // Joins are blocked for all queries. + JOIN_BLOCKED = 4; + } + + // Optional. Specifies if a join is required or not on queries for the view. + // Default is JOIN_CONDITION_UNSPECIFIED. + optional JoinCondition join_condition = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The only columns that joins are allowed on. + // This field is must be specified for join_conditions JOIN_ANY and JOIN_ALL + // and it cannot be set for JOIN_BLOCKED. + repeated string join_allowed_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy that contains the privacy requirements specified by +// the data owner. Currently, this is only supported on views. +message PrivacyPolicy { + // Privacy policy associated with this requirement specification. Only one of + // the privacy methods is allowed per data source object. + oneof privacy_policy { + // Optional. Policy used for aggregation thresholds. + AggregationThresholdPolicy aggregation_threshold_policy = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Policy used for differential privacy. + DifferentialPrivacyPolicy differential_privacy_policy = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Join restriction policy is outside of the one of policies, since + // this policy can be set along with other policies. This policy gives data + // providers the ability to enforce joins on the 'join_allowed_columns' when + // data is queried from a privacy protected view. + optional JoinRestrictionPolicy join_restriction_policy = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/project.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/project.proto.baseline new file mode 100755 index 000000000..f04d3962e --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/project.proto.baseline @@ -0,0 +1,61 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ProjectProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Project Service. +// +// It should not be relied on for production use cases at this time. +service ProjectService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // RPC to get the service account for a project used for interactions with + // Google Cloud KMS + rpc GetServiceAccount(GetServiceAccountRequest) + returns (GetServiceAccountResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/serviceAccount" + }; + } +} + +// Request object of GetServiceAccount +message GetServiceAccountRequest { + // Required. ID of the project. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Response object of GetServiceAccount +message GetServiceAccountResponse { + // The resource type of the response. + string kind = 1; + + // The service account email address. + string email = 2; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline new file mode 100755 index 000000000..e65a95b80 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/query_parameter.proto.baseline @@ -0,0 +1,101 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "QueryParameterProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The type of a struct parameter. +message QueryParameterStructType { + // Optional. The name of this field. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this field. + QueryParameterType type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Human-oriented description of the field. + string description = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The type of a query parameter. +message QueryParameterType { + // Required. The top level type of this field. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The type of the array's elements, if this is an array. + QueryParameterType array_type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The types of the fields of this struct, in order, if this is a + // struct. + repeated QueryParameterStructType struct_types = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The element type of the range, if this is a range. + QueryParameterType range_element_type = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents the value of a range. +message RangeValue { + // Optional. The start value of the range. A missing value represents an + // unbounded start. + QueryParameterValue start = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The end value of the range. A missing value represents an + // unbounded end. + QueryParameterValue end = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The value of a query parameter. +message QueryParameterValue { + // Optional. The value of this value, if a simple scalar type. + google.protobuf.StringValue value = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The array values, if this is an array type. + repeated QueryParameterValue array_values = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // The struct field values. + map struct_values = 3; + + // Optional. The range value, if this is a range type. + RangeValue range_value = 6 [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated google.protobuf.Value alt_struct_values = 5; +} + +// A parameter given to a query. +message QueryParameter { + // Optional. If unset, this is a positional parameter. Otherwise, should be + // unique within a query. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this parameter. + QueryParameterType parameter_type = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The value of this parameter. + QueryParameterValue parameter_value = 3 + [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline new file mode 100755 index 000000000..1cfded0c9 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/range_partitioning.proto.baseline @@ -0,0 +1,47 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RangePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RangePartitioning { + // Defines the ranges for range partitioning. + message Range { + // Required. The start of range partitioning, inclusive. This field is an + // INT64 value represented as a string. + string start = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The end of range partitioning, exclusive. This field is an + // INT64 value represented as a string. + string end = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The width of each interval. This field is an INT64 value + // represented as a string. + string interval = 3 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The name of the column to partition the table on. It must be a + // top-level, INT64 column whose mode is NULLABLE or REQUIRED. + string field = 1 [(google.api.field_behavior) = REQUIRED]; + + // Defines the ranges for range partitioning. + Range range = 2; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline new file mode 100755 index 000000000..5d9422db3 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/restriction_config.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RestrictionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RestrictionConfig { + // RestrictionType specifies the type of dataset/table restriction. + enum RestrictionType { + // Should never be used. + RESTRICTION_TYPE_UNSPECIFIED = 0; + + // Restrict data egress. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RESTRICTED_DATA_EGRESS = 1; + } + + // Output only. Specifies the type of dataset/table restriction. + RestrictionType type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine.proto.baseline new file mode 100755 index 000000000..352b74524 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine.proto.baseline @@ -0,0 +1,540 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Routine Service. +// +// It should not be relied on for production use cases at this time. +service RoutineService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified routine resource by routine ID. + rpc GetRoutine(GetRoutineRequest) returns (Routine) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Creates a new routine in the dataset. + rpc InsertRoutine(InsertRoutineRequest) returns (Routine) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + body: "routine" + }; + } + + // Updates information in an existing routine. The update method replaces the + // entire Routine resource. + rpc UpdateRoutine(UpdateRoutineRequest) returns (Routine) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + body: "routine" + }; + } + + // Patches information in an existing routine. The patch method does a partial + // update to an existing Routine resource. + rpc PatchRoutine(PatchRoutineRequest) returns (Routine) {} + + // Deletes the routine specified by routineId from the dataset. + rpc DeleteRoutine(DeleteRoutineRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Lists all routines in the specified dataset. Requires the READER dataset + // role. + rpc ListRoutines(ListRoutinesRequest) returns (ListRoutinesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + }; + } +} + +// A user-defined function or a stored procedure. +message Routine { + // The fine-grained type of the routine. + enum RoutineType { + // Default value. + ROUTINE_TYPE_UNSPECIFIED = 0; + + // Non-built-in persistent scalar function. + SCALAR_FUNCTION = 1; + + // Stored procedure. + PROCEDURE = 2; + + // Non-built-in persistent TVF. + TABLE_VALUED_FUNCTION = 3; + + // Non-built-in persistent aggregate function. + AGGREGATE_FUNCTION = 4; + } + + // The language of the routine. + enum Language { + // Default value. + LANGUAGE_UNSPECIFIED = 0; + + // SQL language. + SQL = 1; + + // JavaScript language. + JAVASCRIPT = 2; + + // Python language. + PYTHON = 3; + + // Java language. + JAVA = 4; + + // Scala language. + SCALA = 5; + } + + // Input/output argument of a function or a stored procedure. + message Argument { + // Represents the kind of a given argument. + enum ArgumentKind { + // Default value. + ARGUMENT_KIND_UNSPECIFIED = 0; + + // The argument is a variable with fully specified type, which can be a + // struct or an array, but not a table. + FIXED_TYPE = 1; + + // The argument is any type, including struct or array, but not a table. + // To be added: FIXED_TABLE, ANY_TABLE + ANY_TYPE = 2; + } + + // The input/output mode of the argument. + enum Mode { + // Default value. + MODE_UNSPECIFIED = 0; + + // The argument is input-only. + IN = 1; + + // The argument is output-only. + OUT = 2; + + // The argument is both an input and an output. + INOUT = 3; + } + + // Optional. The name of this argument. Can be absent for function return + // argument. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defaults to FIXED_TYPE. + ArgumentKind argument_kind = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the argument is input or output. + // Can be set for procedures only. + Mode mode = 3; + + // Required unless argument_kind = ANY_TYPE. + StandardSqlDataType data_type = 4; + + // Optional. Whether the argument is an aggregate function parameter. + // Must be Unset for routine types other than AGGREGATE_FUNCTION. + // For AGGREGATE_FUNCTION, if set to false, it is equivalent to adding "NOT + // AGGREGATE" clause in DDL; Otherwise, it is equivalent to omitting "NOT + // AGGREGATE" clause in DDL. + google.protobuf.BoolValue is_aggregate = 6 + [(google.api.field_behavior) = OPTIONAL]; + } + + // JavaScript UDF determinism levels. + // + // If all JavaScript UDFs are DETERMINISTIC, the query result is + // potentially cachable (see below). If any JavaScript UDF is + // NOT_DETERMINISTIC, the query result is not cacheable. + // + // Even if a JavaScript UDF is deterministic, many other factors can prevent + // usage of cached query results. Example factors include but not limited to: + // DDL/DML, non-deterministic SQL function calls, update of referenced + // tables/views/UDFs or imported JavaScript libraries. + // + // SQL UDFs cannot have determinism specified. Their determinism is + // automatically determined. + enum DeterminismLevel { + // The determinism of the UDF is unspecified. + DETERMINISM_LEVEL_UNSPECIFIED = 0; + + // The UDF is deterministic, meaning that 2 function calls with the same + // inputs always produce the same result, even across 2 query runs. + DETERMINISTIC = 1; + + // The UDF is not deterministic. + NOT_DETERMINISTIC = 2; + } + + // Security mode. + enum SecurityMode { + // The security mode of the routine is unspecified. + SECURITY_MODE_UNSPECIFIED = 0; + + // The routine is to be executed with the privileges of the user who + // defines it. + DEFINER = 1; + + // The routine is to be executed with the privileges of the user who + // invokes it. + INVOKER = 2; + } + + // Options for a remote user-defined function. + message RemoteFunctionOptions { + // Endpoint of the user-provided remote service, e.g. + // ```https://us-east1-my_gcf_project.cloudfunctions.net/remote_add``` + string endpoint = 1; + + // Fully qualified name of the user-provided connection object which holds + // the authentication information to send requests to the remote service. + // Format: + // ```"projects/{projectId}/locations/{locationId}/connections/{connectionId}"``` + string connection = 2; + + // User-defined context as a set of key/value pairs, which will be sent as + // function invocation context together with batched arguments in the + // requests to the remote service. The total number of bytes of keys and + // values must be less than 8KB. + map user_defined_context = 3; + + // Max number of rows in each batch sent to the remote service. + // If absent or if 0, BigQuery dynamically decides the number of rows in a + // batch. + int64 max_batching_rows = 4; + } + + // Data governance type values. Only supports `DATA_MASKING`. + enum DataGovernanceType { + // The data governance type is unspecified. + DATA_GOVERNANCE_TYPE_UNSPECIFIED = 0; + + // The data governance type is data masking. + DATA_MASKING = 1; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this routine. + RoutineReference routine_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The type of routine. + RoutineType routine_type = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this routine was created, in milliseconds since + // the epoch. + int64 creation_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this routine was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defaults to "SQL" if remote_function_options field is absent, not + // set otherwise. + Language language = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. + repeated Argument arguments = 7; + + // Optional if language = "SQL"; required otherwise. + // Cannot be set if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return type is inferred from definition_body at query time + // in each query that references this routine. If present, then the evaluated + // result will be cast to the specified returned type at query time. + // + // For example, for the functions created with the following statements: + // + // * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` + // + // * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` + // + // * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` + // + // The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and + // is absent for `Increment` (inferred as FLOAT64 at query time). + // + // Suppose the function `Add` is replaced by + // `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` + // + // Then the inferred return type of `Increment` is automatically changed to + // INT64 at query time, while the return type of `Decrement` remains FLOAT64. + StandardSqlDataType return_type = 10; + + // Optional. Can be set only if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return table type is inferred from definition_body at query + // time in each query that references this routine. If present, then the + // columns in the evaluated table result will be cast to match the column + // types specified in return table type, at query time. + StandardSqlTableType return_table_type = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If language = "JAVASCRIPT", this field stores the path of the + // imported JAVASCRIPT libraries. + repeated string imported_libraries = 8; + + // Required. The body of the routine. + // + // For functions, this is the expression in the AS clause. + // + // If language=SQL, it is the substring inside (but excluding) the + // parentheses. For example, for the function created with the following + // statement: + // + // `CREATE FUNCTION JoinLines(x string, y string) as (concat(x, "\n", y))` + // + // The definition_body is `concat(x, "\n", y)` (\n is not replaced with + // linebreak). + // + // If language=JAVASCRIPT, it is the evaluated string in the AS clause. + // For example, for the function created with the following statement: + // + // `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return "\n";\n'` + // + // The definition_body is + // + // `return "\n";\n` + // + // Note that both \n are replaced with linebreaks. + string definition_body = 9; + + // Optional. The description of the routine, if defined. + string description = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The determinism level of the JavaScript UDF, if defined. + DeterminismLevel determinism_level = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The security mode of the routine, if defined. If not defined, the + // security mode is automatically determined from the routine's configuration. + SecurityMode security_mode = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Use this option to catch many common errors. Error checking is + // not exhaustive, and successfully creating a procedure doesn't guarantee + // that the procedure will successfully execute at runtime. If `strictMode` is + // set to `TRUE`, the procedure body is further checked for errors such as + // non-existent tables or columns. The `CREATE PROCEDURE` statement fails if + // the body fails any of these checks. + // + // If `strictMode` is set to `FALSE`, the procedure body is checked only for + // syntax. For procedures that invoke themselves recursively, specify + // `strictMode=FALSE` to avoid non-existent procedure errors during + // validation. + // + // Default value is `TRUE`. + google.protobuf.BoolValue strict_mode = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Remote function specific options. + RemoteFunctionOptions remote_function_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Spark specific options. + SparkOptions spark_options = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `DATA_MASKING`, the function is validated and made + // available as a masking function. For more information, see [Create custom + // masking + // routines](https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask). + DataGovernanceType data_governance_type = 17 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options for a user-defined Spark routine. +message SparkOptions { + // Fully qualified name of the user-provided Spark connection object. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 1; + + // Runtime version. If not specified, the default runtime version is used. + string runtime_version = 2; + + // Custom container image for the runtime environment. + string container_image = 3; + + // Configuration properties as a set of key/value pairs, which will be passed + // on to the Spark application. For more information, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html) and the + // [procedure option + // list](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list). + map properties = 4; + + // The main file/jar URI of the Spark application. Exactly one of the + // definition_body field and the main_file_uri field must be set for Python. + // Exactly one of main_class and main_file_uri field + // should be set for Java/Scala language type. + string main_file_uri = 5; + + // Python files to be placed on the PYTHONPATH for PySpark application. + // Supported file types: `.py`, `.egg`, and `.zip`. For more information + // about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string py_file_uris = 6; + + // JARs to include on the driver and executor CLASSPATH. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string jar_uris = 7; + + // Files to be placed in the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string file_uris = 8; + + // Archive files to be extracted into the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string archive_uris = 9; + + // The fully qualified name of a class in jar_uris, for example, + // com.example.wordcount. Exactly one of main_class and main_jar_uri field + // should be set for Java/Scala language type. + string main_class = 10; +} + +// Describes the format for getting information about a routine. +message GetRoutineRequest { + // Required. Project ID of the requested routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the requested routine + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for inserting a routine. +message InsertRoutineRequest { + // Required. Project ID of the new routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource to insert + Routine routine = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for updating a routine. +message UpdateRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will replace the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for the partial update (patch) of a routine. +message PatchRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will be used to partially + // update the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; + + // Only the Routine fields in the field mask are updated + // by the given routine. Repeated routine fields will be fully replaced + // if contained in the field mask. + google.protobuf.FieldMask field_mask = 5; +} + +// Describes the format for deleting a routine. +message DeleteRoutineRequest { + // Required. Project ID of the routine to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to delete + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for listing routines. +message ListRoutinesRequest { + // Required. Project ID of the routines to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routines to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; + + // If set, then only the Routines matching this filter are returned. + // The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + // is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + string filter = 6; +} + +// Describes the format of a single result page when listing routines. +message ListRoutinesResponse { + // Routines in the requested dataset. Unless read_mask is set in the request, + // only the following fields are populated: + // etag, project_id, dataset_id, routine_id, routine_type, creation_time, + // last_modified_time, language, and remote_function_options. + repeated Routine routines = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline new file mode 100755 index 000000000..65ab1ae8d --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/routine_reference.proto.baseline @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a routine. +message RoutineReference { + // Required. The ID of the project containing this routine. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this routine. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the routine. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline new file mode 100755 index 000000000..c6eb2e9b7 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy.proto.baseline @@ -0,0 +1,108 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Service for interacting with row access policies. +service RowAccessPolicyService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Lists all row access policies on the specified table. + rpc ListRowAccessPolicies(ListRowAccessPoliciesRequest) + returns (ListRowAccessPoliciesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}/rowAccessPolicies" + }; + } +} + +// Request message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesRequest { + // Required. Project ID of the row access policies to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of row access policies to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to list row access policies. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // The maximum number of results to return in a single response page. Leverage + // the page tokens to iterate through the entire collection. + int32 page_size = 5; +} + +// Response message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesResponse { + // Row access policies on the requested table. + repeated RowAccessPolicy row_access_policies = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} + +// Represents access on a subset of rows on the specified table, defined by its +// filter predicate. Access to the subset of rows is controlled by its IAM +// policy. +message RowAccessPolicy { + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this row access policy. + RowAccessPolicyReference row_access_policy_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. A SQL boolean expression that represents the rows defined by this + // row access policy, similar to the boolean expression in a WHERE clause of a + // SELECT query on a table. + // References to other tables, routines, and temporary functions are not + // supported. + // + // Examples: region="EU" + // date_field = CAST('2019-9-27' as DATE) + // nullable_field is not NULL + // numeric_field BETWEEN 1.0 AND 5.0 + string filter_predicate = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this row access policy was created, in + // milliseconds since the epoch. + google.protobuf.Timestamp creation_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this row access policy was last modified, in + // milliseconds since the epoch. + google.protobuf.Timestamp last_modified_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline new file mode 100755 index 000000000..28028dab0 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto.baseline @@ -0,0 +1,41 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a row access policy. +message RowAccessPolicyReference { + // Required. The ID of the project containing this row access policy. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this row access policy. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table containing this row access policy. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the row access policy. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string policy_id = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/session_info.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/session_info.proto.baseline new file mode 100755 index 000000000..333ab3b7e --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/session_info.proto.baseline @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SessionInfoProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// [Preview] Information related to sessions. +message SessionInfo { + // Output only. The id of the session. + string session_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline new file mode 100755 index 000000000..0f63b2d5f --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/standard_sql.proto.baseline @@ -0,0 +1,166 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "StandardSqlProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data type of a variable such as a function argument. +// Examples include: +// +// * INT64: `{"typeKind": "INT64"}` +// +// * ARRAY: +// +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "STRING"} +// } +// +// * STRUCT>: +// +// { +// "typeKind": "STRUCT", +// "structType": +// { +// "fields": +// [ +// { +// "name": "x", +// "type": {"typeKind": "STRING"} +// }, +// { +// "name": "y", +// "type": +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "DATE"} +// } +// } +// ] +// } +// } +// +// * RANGE: +// +// { +// "typeKind": "RANGE", +// "rangeElementType": {"typeKind": "DATE"} +// } +message StandardSqlDataType { + // The kind of the datatype. + enum TypeKind { + // Invalid type. + TYPE_KIND_UNSPECIFIED = 0; + + // Encoded as a string in decimal format. + INT64 = 2; + + // Encoded as a boolean "false" or "true". + BOOL = 5; + + // Encoded as a number, or string "NaN", "Infinity" or "-Infinity". + FLOAT64 = 7; + + // Encoded as a string value. + STRING = 8; + + // Encoded as a base64 string per RFC 4648, section 4. + BYTES = 9; + + // Encoded as an RFC 3339 timestamp with mandatory "Z" time zone string: + // 1985-04-12T23:20:50.52Z + TIMESTAMP = 19; + + // Encoded as RFC 3339 full-date format string: 1985-04-12 + DATE = 10; + + // Encoded as RFC 3339 partial-time format string: 23:20:50.52 + TIME = 20; + + // Encoded as RFC 3339 full-date "T" partial-time: 1985-04-12T23:20:50.52 + DATETIME = 21; + + // Encoded as fully qualified 3 part: 0-5 15 2:30:45.6 + INTERVAL = 26; + + // Encoded as WKT + GEOGRAPHY = 22; + + // Encoded as a decimal string. + NUMERIC = 23; + + // Encoded as a decimal string. + BIGNUMERIC = 24; + + // Encoded as a string. + JSON = 25; + + // Encoded as a list with types matching Type.array_type. + ARRAY = 16; + + // Encoded as a list with fields of type Type.struct_type[i]. List is used + // because a JSON object cannot have duplicate field names. + STRUCT = 17; + + // Encoded as a pair with types matching range_element_type. Pairs must + // begin with "[", end with ")", and be separated by ", ". + RANGE = 29; + } + + // Required. The top level type of this field. + // Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). + TypeKind type_kind = 1 [(google.api.field_behavior) = REQUIRED]; + + // For complex types, the sub type information. + oneof sub_type { + // The type of the array's elements, if type_kind = "ARRAY". + StandardSqlDataType array_element_type = 2; + + // The fields of this struct, in order, if type_kind = "STRUCT". + StandardSqlStructType struct_type = 3; + + // The type of the range's elements, if type_kind = "RANGE". + StandardSqlDataType range_element_type = 4; + } +} + +// A field or a column. +message StandardSqlField { + // Optional. The name of this field. Can be absent for struct fields. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type of this parameter. Absent if not explicitly + // specified (e.g., CREATE FUNCTION statement can omit the return type; + // in this case the output parameter does not have this "type" field). + StandardSqlDataType type = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The representation of a SQL STRUCT type. +message StandardSqlStructType { + // Fields within the struct. + repeated StandardSqlField fields = 1; +} + +// A table type +message StandardSqlTableType { + // The columns in this table type + repeated StandardSqlField columns = 1; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/system_variable.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/system_variable.proto.baseline new file mode 100755 index 000000000..4437f0f4e --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/system_variable.proto.baseline @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/struct.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SystemVariableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// System variables given to a query. +message SystemVariables { + // Output only. Data type for each system variable. + map types = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Value for each system variable. + google.protobuf.Struct values = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table.proto.baseline new file mode 100755 index 000000000..9f93ae5df --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table.proto.baseline @@ -0,0 +1,730 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/biglake_config.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/external_catalog_table_options.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/partitioning_definition.proto"; +import "google/cloud/bigquery/v2/privacy_policy.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/table_constraints.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Table Service. +// +// It should not be relied on for production use cases at this time. +service TableService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified table resource by table ID. + // This method does not return the data in the table, it only returns the + // table resource, which describes the structure of this table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Creates a new, empty table in the dataset. + rpc InsertTable(InsertTableRequest) returns (Table) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire table resource, whereas the patch method only replaces fields that + // are provided in the submitted table resource. + // This method supports RFC5789 patch semantics. + rpc PatchTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire Table resource, whereas the patch method only replaces fields that + // are provided in the submitted Table resource. + rpc UpdateTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Deletes the table specified by tableId from the dataset. + // If the table contains data, all the data will be deleted. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Lists all tables in the specified dataset. Requires the READER dataset + // role. + rpc ListTables(ListTablesRequest) returns (TableList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + }; + } +} + +// Replication info of a table created using `AS REPLICA` DDL like: +// `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` +message TableReplicationInfo { + // Replication status of the table created using `AS REPLICA` like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + enum ReplicationStatus { + // Default value. + REPLICATION_STATUS_UNSPECIFIED = 0; + + // Replication is Active with no errors. + ACTIVE = 1; + + // Source object is deleted. + SOURCE_DELETED = 2; + + // Source revoked replication permissions. + PERMISSION_DENIED = 3; + + // Source configuration doesn’t allow replication. + UNSUPPORTED_CONFIGURATION = 4; + } + + // Required. Source table reference that is replicated. + TableReference source_table = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the interval at which the source table is polled for + // updates. + // It's Optional. If not specified, default replication interval would be + // applied. + int64 replication_interval_ms = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. If source is a materialized view, this field + // signifies the last refresh time of the source. + int64 replicated_source_last_refresh_time = 3 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication status of configured replication. + ReplicationStatus replication_status = 4 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication error that will permanently stopped + // table replication. + ErrorProto replication_error = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Describes the definition of a logical view. +message ViewDefinition { + // Required. A query that BigQuery executes when the view is referenced. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 2; + + // Specifies whether to use BigQuery's legacy SQL for this view. + // The default value is true. If set to false, the view will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // Queries and views that reference this view must use the same flag value. + // A wrapper is used here because the default value is True. + google.protobuf.BoolValue use_legacy_sql = 3; + + // True if the column names are explicitly specified. For example by using the + // 'CREATE VIEW v(c1, c2) AS ...' syntax. + // Can only be set for GoogleSQL views. + bool use_explicit_column_names = 4; + + // Optional. Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Foreign view representations. + repeated ForeignViewDefinition foreign_definitions = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A view can be represented in multiple ways. Each representation has its own +// dialect. This message stores the metadata required for these representations. +message ForeignViewDefinition { + // Required. The query that defines the view. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Represents the dialect of the query. + string dialect = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Definition and configuration of a materialized view. +message MaterializedViewDefinition { + // Required. A query whose results are persisted. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this materialized view was last refreshed, in + // milliseconds since the epoch. + int64 last_refresh_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Enable automatic refresh of the materialized view when the base + // table is updated. The default value is "true". + google.protobuf.BoolValue enable_refresh = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum frequency at which this materialized view will be + // refreshed. The default value is "1800000" (30 minutes). + google.protobuf.UInt64Value refresh_interval_ms = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. This option declares the intention to construct a materialized + // view that isn't refreshed incrementally. + google.protobuf.BoolValue allow_non_incremental_definition = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Status of a materialized view. +// The last refresh timestamp status is omitted here, but is present in the +// MaterializedViewDefinition message. +message MaterializedViewStatus { + // Output only. Refresh watermark of materialized view. The base tables' data + // were collected into the materialized view cache until this time. + google.protobuf.Timestamp refresh_watermark = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Error result of the last automatic refresh. If present, + // indicates that the last automatic refresh was unsuccessful. + ErrorProto last_refresh_status = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about base table and snapshot time of the snapshot. +message SnapshotDefinition { + // Required. Reference describing the ID of the table that was snapshot. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was snapshot. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp snapshot_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Information about base table and clone time of a table clone. +message CloneDefinition { + // Required. Reference describing the ID of the table that was cloned. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was cloned. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp clone_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +message Streamingbuffer { + // Output only. A lower-bound estimate of the number of bytes currently in + // the streaming buffer. + uint64 estimated_bytes = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A lower-bound estimate of the number of rows currently in the + // streaming buffer. + uint64 estimated_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains the timestamp of the oldest entry in the streaming + // buffer, in milliseconds since the epoch, if the streaming buffer is + // available. + fixed64 oldest_entry_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Table { + // The type of resource ID. + string kind = 1; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An opaque ID uniquely identifying the table. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access this resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this table. + TableReference table_reference = 5 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for this table. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of this table. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this table. You can use these to organize and + // group your tables. Label keys and values can be no longer than 63 + // characters, can only contain lowercase letters, numeric characters, + // underscores and dashes. International characters are allowed. Label values + // are optional. Label keys must start with a letter and each label in the + // list must have a different key. + map labels = 8; + + // Optional. Describes the schema of this table. + TableSchema schema = 9 [(google.api.field_behavior) = OPTIONAL]; + + // If specified, configures time-based partitioning for this table. + TimePartitioning time_partitioning = 10; + + // If specified, configures range partitioning for this table. + RangePartitioning range_partitioning = 27; + + // Clustering specification for the table. Must be specified with time-based + // partitioning, data in the table will be first partitioned and subsequently + // clustered. + Clustering clustering = 23; + + // Optional. If set to true, queries over this table require + // a partition filter that can be used for partition elimination to be + // specified. + google.protobuf.BoolValue require_partition_filter = 28 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The partition information for all table formats, including + // managed partitioned tables, hive partitioned tables, iceberg partitioned, + // and metastore partitioned tables. This field is only populated for + // metastore partitioned tables. For other table formats, this is an output + // only field. + optional PartitioningDefinition partition_definition = 51 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The size of this table in logical bytes, excluding any data in + // the streaming buffer. + google.protobuf.Int64Value num_bytes = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This includes + // storage used for time travel. + google.protobuf.Int64Value num_physical_bytes = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of logical bytes in the table that are considered + // "long-term storage". + google.protobuf.Int64Value num_long_term_bytes = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows of data in this table, excluding any data + // in the streaming buffer. + google.protobuf.UInt64Value num_rows = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created tables. + google.protobuf.Int64Value expiration_time = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this table was last modified, in milliseconds + // since the epoch. + fixed64 last_modified_time = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes the table type. The following values are supported: + // + // * `TABLE`: A normal BigQuery table. + // * `VIEW`: A virtual table defined by a SQL query. + // * `EXTERNAL`: A table that references data stored in an external storage + // system, such as Google Cloud Storage. + // * `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. + // * `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a + // base table at a particular time. See additional information on + // [table + // snapshots](https://cloud.google.com/bigquery/docs/table-snapshots-intro). + // + // The default value is `TABLE`. + string type = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The view definition. + ViewDefinition view = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The materialized view definition. + MaterializedViewDefinition materialized_view = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The materialized view status. + MaterializedViewStatus materialized_view_status = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Describes the data format, location, and other properties of + // a table stored outside of BigQuery. By defining these properties, the data + // source can then be queried as if it were a standard BigQuery table. + ExternalDataConfiguration external_data_configuration = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the configuration of a BigLake managed table. + BigLakeConfiguration biglake_configuration = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the table resides. This value + // is inherited from the dataset. + string location = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains information regarding this table's streaming buffer, + // if one is present. This field will be absent if the table is not being + // streamed to or if there is no data in the streaming buffer. + Streamingbuffer streaming_buffer = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration encryption_configuration = 22; + + // Output only. Contains information about the snapshot. This value is set via + // snapshot creation. + SnapshotDefinition snapshot_definition = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defines the default collation specification of new STRING fields + // in the table. During table creation or update, if a STRING field is added + // to this table without explicit collation specified, then the table inherits + // the table default collation. A change to this field affects only fields + // added afterwards, and does not alter the existing fields. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 30 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new decimal + // fields (NUMERIC OR BIGNUMERIC) in the table. During table creation or + // update, if a decimal field is added to this table without an explicit + // rounding mode specified, then the field inherits the table default + // rounding mode. Changing this field doesn't affect existing fields. + TableFieldSchema.RoundingMode default_rounding_mode = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Contains information about the clone. This value is set via + // the clone operation. + CloneDefinition clone_definition = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by time travel storage (deleted + // or changed data). This data is not kept in real time, and might be delayed + // by a few seconds to a few minutes. + google.protobuf.Int64Value num_time_travel_physical_bytes = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of logical bytes in the table or materialized + // view. + google.protobuf.Int64Value num_total_logical_bytes = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are less than 90 days old. + google.protobuf.Int64Value num_active_logical_bytes = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are more than 90 days old. + google.protobuf.Int64Value num_long_term_logical_bytes = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by current live data storage. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_current_physical_bytes = 53 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This also includes + // storage used for time travel. This data is not kept in real time, and might + // be delayed by a few seconds to a few minutes. + google.protobuf.Int64Value num_total_physical_bytes = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes less than 90 days old. This data is + // not kept in real time, and might be delayed by a few seconds to a few + // minutes. + google.protobuf.Int64Value num_active_physical_bytes = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes more than 90 days old. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_long_term_physical_bytes = 39 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of partitions present in the table or materialized + // view. This data is not kept in real time, and might be delayed by a few + // seconds to a few minutes. + google.protobuf.Int64Value num_partitions = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The maximum staleness of data that could be returned when the + // table (or stale MV) is queried. Staleness encoded as a string encoding + // of sql IntervalValue type. + string max_staleness = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for table. If set, restrict + // certain accesses on the table based on the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 46 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Tables Primary Key and Foreign Key information + TableConstraints table_constraints = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this table. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 48 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Table replication info for table created `AS REPLICA` DDL like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + TableReplicationInfo table_replication_info = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Table references of all replicas currently active on + // the table. + repeated TableReference replicas = 50 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Options defining open source compatible table. + ExternalCatalogTableOptions external_catalog_table_options = 54 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for getting table metadata. +message GetTableRequest { + // TableMetadataView specifies which table information is returned. + enum TableMetadataView { + // The default value. + // Default to the STORAGE_STATS view. + TABLE_METADATA_VIEW_UNSPECIFIED = 0; + + // Includes basic table information including schema and + // partitioning specification. This view does not include storage statistics + // such as numRows or numBytes. This view is significantly more efficient + // and should be used to support high query rates. + BASIC = 1; + + // Includes all information in the BASIC view as well as storage statistics + // (numBytes, numLongTermBytes, numRows and lastModifiedTime). + STORAGE_STATS = 2; + + // Includes all table information, including storage statistics. + // It returns same information as STORAGE_STATS view, but may contain + // additional information in the future. + FULL = 3; + } + + // Required. Project ID of the requested table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the requested table + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // List of table schema fields to return (comma-separated). + // If unspecified, all fields are returned. + // A fieldMask cannot be used here because the fields will automatically be + // converted from camelCase to snake_case and the conversion will fail if + // there are underscores. Since these are fields in BigQuery table schemas, + // underscores are allowed. + string selected_fields = 4; + + // Optional. Specifies the view that determines which table information is + // returned. By default, basic table information and storage statistics + // (STORAGE_STATS) are returned. + TableMetadataView view = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting table metadata. +message InsertTableRequest { + // Required. Project ID of the new table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource to insert + Table table = 4 [(google.api.field_behavior) = REQUIRED]; +} + +message UpdateOrPatchTableRequest { + // Required. Project ID of the table to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to update + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource which will replace or patch the specified table + Table table = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. When true will autodetect schema, else will keep original schema. + bool autodetect_schema = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for deleting a table. +message DeleteTableRequest { + // Required. Project ID of the table to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to delete + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for enumerating tables. +message ListTablesRequest { + // Required. Project ID of the tables to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the tables to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; +} + +// Information about a logical view. +message ListFormatView { + // True if view is defined in legacy SQL dialect, + // false if in GoogleSQL. + google.protobuf.BoolValue use_legacy_sql = 1; + + // Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 2; +} + +message ListFormatTable { + // The resource type. + string kind = 1; + + // An opaque ID of the table. + string id = 2; + + // A reference uniquely identifying table. + TableReference table_reference = 3; + + // The user-friendly name for this table. + google.protobuf.StringValue friendly_name = 4; + + // The type of table. + string type = 5; + + // The time-based partitioning for this table. + TimePartitioning time_partitioning = 6; + + // The range partitioning for this table. + RangePartitioning range_partitioning = 12; + + // Clustering specification for this table, if configured. + Clustering clustering = 11; + + // The labels associated with this table. You can use these to organize + // and group your tables. + map labels = 7; + + // Additional details for a view. + ListFormatView view = 8; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. + int64 expiration_time = 10; + + // Optional. If set to true, queries including this table must specify a + // partition filter. This filter is used for partition elimination. + google.protobuf.BoolValue require_partition_filter = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Partial projection of the metadata for a given table in a list response. +message TableList { + // The type of list. + string kind = 1; + + // A hash of this page of results. + string etag = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // Tables in the requested dataset. + repeated ListFormatTable tables = 4; + + // The total number of tables in the dataset. + google.protobuf.Int32Value total_items = 5; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline new file mode 100755 index 000000000..13edc6bb9 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_constraints.proto.baseline @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableConstraintsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents the primary key constraint on a table's columns. +message PrimaryKey { + // Required. The columns that are composed of the primary key constraint. + repeated string columns = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The pair of the foreign key column and primary key column. +message ColumnReference { + // Required. The column that composes the foreign key. + string referencing_column = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The column in the primary key that are referenced by the + // referencing_column. + string referenced_column = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Represents a foreign key constraint on a table's columns. +message ForeignKey { + // Optional. Set only if the foreign key constraint is named. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The table that holds the primary key and is referenced by this + // foreign key. + TableReference referenced_table = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The columns that compose the foreign key. + repeated ColumnReference column_references = 3 + [(google.api.field_behavior) = REQUIRED]; +} + +// The TableConstraints defines the primary key and foreign key. +message TableConstraints { + // Optional. Represents a primary key constraint on a table's columns. + // Present only if the table has a primary key. + // The primary key is not enforced. + PrimaryKey primary_key = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Present only if the table has a foreign key. + // The foreign key is not enforced. + repeated ForeignKey foreign_keys = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_reference.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_reference.proto.baseline new file mode 100755 index 000000000..e6e9a1b35 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_reference.proto.baseline @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TableReference { + // Required. The ID of the project containing this table. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this table. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table. The ID can contain Unicode characters in + // category L (letter), M (mark), N (number), Pc (connector, including + // underscore), Pd (dash), and Zs (space). For more information, see [General + // Category](https://wikipedia.org/wiki/Unicode_character_property#General_Category). + // The maximum length is 1,024 characters. Certain operations allow suffixing + // of the table ID with a partition decorator, such as + // `sample_table$20190123`. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_schema.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_schema.proto.baseline new file mode 100755 index 000000000..8a56f8e87 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/table_schema.proto.baseline @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableSchemaProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Schema of a table +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; + + // Optional. Specifies metadata of the foreign data type definition in field + // schema + // ([TableFieldSchema.foreign_type_definition][google.cloud.bigquery.v2.TableFieldSchema.foreign_type_definition]). + ForeignTypeInfo foreign_type_info = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata about the foreign data type definition such as the system +// in which the type is defined. +message ForeignTypeInfo { + // External systems, such as query engines or table formats, that have their + // own data types. + enum TypeSystem { + // TypeSystem not specified. + TYPE_SYSTEM_UNSPECIFIED = 0; + + // Represents Hive data types. + HIVE = 1; + } + + // Required. Specifies the system which defines the foreign data type. + TypeSystem type_system = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Data policy option proto, it currently supports name only, will support +// precedence later. +message DataPolicyOption { + // Data policy resource name in the form of + // projects/project_id/locations/location_id/dataPolicies/data_policy_id. + optional string name = 1; +} + +// A field in TableSchema +message TableFieldSchema { + message PolicyTagList { + // A list of policy tag resource names. For example, + // "projects/1/locations/eu/taxonomies/2/policyTags/3". At most 1 policy tag + // is currently allowed. + repeated string names = 1; + } + + // Rounding mode options that can be used when storing NUMERIC + // or BIGNUMERIC values. + enum RoundingMode { + // Unspecified will default to using ROUND_HALF_AWAY_FROM_ZERO. + ROUNDING_MODE_UNSPECIFIED = 0; + + // ROUND_HALF_AWAY_FROM_ZERO rounds half values away from zero + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5, 1.6, 1.7, 1.8, 1.9 => 2 + ROUND_HALF_AWAY_FROM_ZERO = 1; + + // ROUND_HALF_EVEN rounds half values to the nearest even value + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5 => 2 + // 1.6, 1.7, 1.8, 1.9 => 2 + // 2.5 => 2 + ROUND_HALF_EVEN = 2; + } + + // Represents the type of a field element. + message FieldElementType { + // Required. The type of a field element. For more information, see + // [TableFieldSchema.type][google.cloud.bigquery.v2.TableFieldSchema.type]. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 300 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. Possible values include: + // + // * STRING + // * BYTES + // * INTEGER (or INT64) + // * FLOAT (or FLOAT64) + // * BOOLEAN (or BOOL) + // * TIMESTAMP + // * DATE + // * TIME + // * DATETIME + // * GEOGRAPHY + // * NUMERIC + // * BIGNUMERIC + // * JSON + // * RECORD (or STRUCT) + // * RANGE + // + // Use of RECORD/STRUCT indicates that the field contains a nested schema. + string type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. Possible values include NULLABLE, REQUIRED and + // REPEATED. The default value is NULLABLE. + string mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set + // to RECORD. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + google.protobuf.StringValue description = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The policy tags attached to this field, used for field-level + // access control. If not set, defaults to empty policy_tags. + PolicyTagList policy_tags = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Data policy options, will replace the data_policies. + repeated DataPolicyOption data_policies = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type ≠ "STRING" and ≠ "BYTES". + int64 max_length = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type ≠ "NUMERIC" and ≠ + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10P-S + 10-S, + // 10P-S - 10-S] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10P + 1, 10P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. + // * If type = "BIGNUMERIC": + // 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 ≤ precision ≤ 29. + // * If type = "BIGNUMERIC": 1 ≤ precision ≤ 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the rounding mode to be used when storing values of + // NUMERIC and BIGNUMERIC type. + RoundingMode rounding_mode = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Field collation can be set only when the type of field is STRING. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue collation = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A SQL expression to specify the [default value] + // (https://cloud.google.com/bigquery/docs/default-values) for this field. + google.protobuf.StringValue default_value_expression = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The subtype of the RANGE, if the type of this field is RANGE. If + // the type is RANGE, this field is required. Values for the field element + // type can be the following: + // + // * DATE + // * DATETIME + // * TIMESTAMP + FieldElementType range_element_type = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Definition of the foreign data type. + // Only valid for top-level schema fields (not nested fields). + // If the type is FOREIGN, this field is required. + string foreign_type_definition = 23 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline new file mode 100755 index 000000000..440a20983 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/time_partitioning.proto.baseline @@ -0,0 +1,44 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TimePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TimePartitioning { + // Required. The supported types are DAY, HOUR, MONTH, and YEAR, which will + // generate one partition per day, hour, month, and year, respectively. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Number of milliseconds for which to keep the storage for a + // partition. + // A wrapper is used here because 0 is an invalid value. + google.protobuf.Int64Value expiration_ms = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, the table is partitioned by pseudo + // column '_PARTITIONTIME'; if set, the table is partitioned by this field. + // The field must be a top-level TIMESTAMP or DATE field. Its mode must be + // NULLABLE or REQUIRED. + // A wrapper is used here because an empty string is an invalid value. + google.protobuf.StringValue field = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline new file mode 100755 index 000000000..d630bf9c7 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/bigquery/v2/udf_resource.proto.baseline @@ -0,0 +1,42 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "UdfProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// +// This is used for defining User Defined Function (UDF) resources only when +// using legacy SQL. Users of GoogleSQL should leverage either DDL (e.g. +// CREATE [TEMPORARY] FUNCTION ... ) or the Routines API to define UDF +// resources. +// +// For additional information on migrating, see: +// https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions +message UserDefinedFunctionResource { + // [Pick one] A code resource to load from a Google Cloud Storage URI + // (gs://bucket/path). + google.protobuf.StringValue resource_uri = 1; + + // [Pick one] An inline resource that contains code for a user-defined + // function (UDF). Providing a inline code resource is equivalent to providing + // a URI for a file containing the same code. + google.protobuf.StringValue inline_code = 2; +} diff --git a/baselines/bigquery-v2/protos/google/cloud/common_resources.proto.baseline b/baselines/bigquery-v2/protos/google/cloud/common_resources.proto.baseline new file mode 100755 index 000000000..a2f46cea3 --- /dev/null +++ b/baselines/bigquery-v2/protos/google/cloud/common_resources.proto.baseline @@ -0,0 +1,52 @@ +// Copyright 2020 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains stub messages for common resources in GCP. +// It is not intended to be directly generated, and is instead used by +// other tooling to be able to match common resource patterns. +syntax = "proto3"; + +package google.cloud; + +import "google/api/resource.proto"; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Project" + pattern: "projects/{project}" +}; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Organization" + pattern: "organizations/{organization}" +}; + + +option (google.api.resource_definition) = { + type: "cloudresourcemanager.googleapis.com/Folder" + pattern: "folders/{folder}" +}; + + +option (google.api.resource_definition) = { + type: "cloudbilling.googleapis.com/BillingAccount" + pattern: "billingAccounts/{billing_account}" +}; + +option (google.api.resource_definition) = { + type: "locations.googleapis.com/Location" + pattern: "projects/{project}/locations/{location}" +}; + diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.delete_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.delete_dataset.js.baseline new file mode 100644 index 000000000..e54a23f56 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.delete_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_DeleteDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being deleted + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of dataset being deleted + */ + // const datasetId = 'abc123' + /** + * If True, delete all the tables in the dataset. + * If False and the dataset contains tables, the request will fail. + * Default is False + */ + // const deleteContents = true + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callDeleteDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.deleteDataset(request); + console.log(response); + } + + callDeleteDataset(); + // [END bigquery_v2_generated_DatasetService_DeleteDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.get_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.get_dataset.js.baseline new file mode 100644 index 000000000..e722d561d --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.get_dataset.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_GetDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested dataset + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested dataset + */ + // const datasetId = 'abc123' + /** + * Optional. Specifies the view that determines which dataset information is + * returned. By default, metadata and ACL information are returned. + */ + // const datasetView = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callGetDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.getDataset(request); + console.log(response); + } + + callGetDataset(); + // [END bigquery_v2_generated_DatasetService_GetDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.insert_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.insert_dataset.js.baseline new file mode 100644 index 000000000..a87a9ae12 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.insert_dataset.js.baseline @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, dataset) { + // [START bigquery_v2_generated_DatasetService_InsertDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new dataset + */ + // const projectId = 'abc123' + /** + * Required. Datasets resource to use for the new dataset + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callInsertDataset() { + // Construct request + const request = { + projectId, + dataset, + }; + + // Run request + const response = await bigqueryClient.insertDataset(request); + console.log(response); + } + + callInsertDataset(); + // [END bigquery_v2_generated_DatasetService_InsertDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.list_datasets.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.list_datasets.js.baseline new file mode 100644 index 000000000..cf5b38f50 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.list_datasets.js.baseline @@ -0,0 +1,87 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_DatasetService_ListDatasets_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the datasets to be listed + */ + // const projectId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + /** + * Whether to list all datasets, including hidden ones + */ + // const all = true + /** + * An expression for filtering the results of the request by label. + * The syntax is `labels.:`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See Filtering datasets using + * labels (https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + */ + // const filter = 'abc123' + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callListDatasets() { + // Construct request + const request = { + projectId, + }; + + // Run request + const iterable = bigqueryClient.listDatasetsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListDatasets(); + // [END bigquery_v2_generated_DatasetService_ListDatasets_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.patch_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.patch_dataset.js.baseline new file mode 100644 index 000000000..65948dcc5 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.patch_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, dataset) { + // [START bigquery_v2_generated_DatasetService_PatchDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being updated + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the dataset being updated + */ + // const datasetId = 'abc123' + /** + * Required. Datasets resource which will replace or patch the specified + * dataset. + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callPatchDataset() { + // Construct request + const request = { + projectId, + datasetId, + dataset, + }; + + // Run request + const response = await bigqueryClient.patchDataset(request); + console.log(response); + } + + callPatchDataset(); + // [END bigquery_v2_generated_DatasetService_PatchDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.undelete_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.undelete_dataset.js.baseline new file mode 100644 index 000000000..de5b4b62c --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.undelete_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_DatasetService_UndeleteDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset to be undeleted + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of dataset being deleted + */ + // const datasetId = 'abc123' + /** + * Optional. The exact time when the dataset was deleted. If not specified, + * the most recently deleted version is undeleted. Undeleting a dataset + * using deletion time is not supported. + */ + // const deletionTime = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callUndeleteDataset() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const response = await bigqueryClient.undeleteDataset(request); + console.log(response); + } + + callUndeleteDataset(); + // [END bigquery_v2_generated_DatasetService_UndeleteDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/dataset_service.update_dataset.js.baseline b/baselines/bigquery-v2/samples/generated/v2/dataset_service.update_dataset.js.baseline new file mode 100644 index 000000000..f047b5fea --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/dataset_service.update_dataset.js.baseline @@ -0,0 +1,72 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, dataset) { + // [START bigquery_v2_generated_DatasetService_UpdateDataset_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the dataset being updated + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the dataset being updated + */ + // const datasetId = 'abc123' + /** + * Required. Datasets resource which will replace or patch the specified + * dataset. + */ + // const dataset = {} + + // Imports the Bigquery library + const {DatasetServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new DatasetServiceClient(); + + async function callUpdateDataset() { + // Construct request + const request = { + projectId, + datasetId, + dataset, + }; + + // Run request + const response = await bigqueryClient.updateDataset(request); + console.log(response); + } + + callUpdateDataset(); + // [END bigquery_v2_generated_DatasetService_UpdateDataset_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.cancel_job.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.cancel_job.js.baseline new file mode 100644 index 000000000..7a36ce1cf --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.cancel_job.js.baseline @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_CancelJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the job to cancel + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the job to cancel + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callCancelJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.cancelJob(request); + console.log(response); + } + + callCancelJob(); + // [END bigquery_v2_generated_JobService_CancelJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.delete_job.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.delete_job.js.baseline new file mode 100644 index 000000000..5693afaa9 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.delete_job.js.baseline @@ -0,0 +1,75 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_DeleteJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the job for which metadata is to be deleted. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the job for which metadata is to be deleted. If this is + * a parent job which has child jobs, the metadata from all child jobs will be + * deleted as well. Direct deletion of the metadata of child jobs is not + * allowed. + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. Required. + * See details at: + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callDeleteJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.deleteJob(request); + console.log(response); + } + + callDeleteJob(); + // [END bigquery_v2_generated_JobService_DeleteJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.get_job.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.get_job.js.baseline new file mode 100644 index 000000000..1d5b9f2bf --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.get_job.js.baseline @@ -0,0 +1,77 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_GetJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested job. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the requested job. + */ + // const jobId = 'abc123' + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callGetJob() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.getJob(request); + console.log(response); + } + + callGetJob(); + // [END bigquery_v2_generated_JobService_GetJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.get_query_results.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.get_query_results.js.baseline new file mode 100644 index 000000000..cd708d171 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.get_query_results.js.baseline @@ -0,0 +1,109 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, jobId) { + // [START bigquery_v2_generated_JobService_GetQueryResults_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the query job. + */ + // const projectId = 'abc123' + /** + * Required. Job ID of the query job. + */ + // const jobId = 'abc123' + /** + * Zero-based index of the starting row. + */ + // const startIndex = {} + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * Maximum number of results to read. + */ + // const maxResults = {} + /** + * Optional: Specifies the maximum amount of time, in milliseconds, that the + * client is willing to wait for the query to complete. By default, this limit + * is 10 seconds (10,000 milliseconds). If the query is complete, the + * jobComplete field in the response is true. If the query has not yet + * completed, jobComplete is false. + * You can request a longer timeout period in the timeoutMs field. However, + * the call is not guaranteed to wait for the specified timeout; it typically + * returns after around 200 seconds (200,000 milliseconds), even if the query + * is not complete. + * If jobComplete is false, you can continue to wait for the query to complete + * by calling the getQueryResults method until the jobComplete field in the + * getQueryResults response is true. + */ + // const timeoutMs = {} + /** + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + */ + // const location = 'abc123' + /** + * Optional. Output format adjustments. + */ + // const formatOptions = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callGetQueryResults() { + // Construct request + const request = { + projectId, + jobId, + }; + + // Run request + const response = await bigqueryClient.getQueryResults(request); + console.log(response); + } + + callGetQueryResults(); + // [END bigquery_v2_generated_JobService_GetQueryResults_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.insert_job.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.insert_job.js.baseline new file mode 100644 index 000000000..544a889ae --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.insert_job.js.baseline @@ -0,0 +1,64 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START bigquery_v2_generated_JobService_InsertJob_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Project ID of project that will be billed for the job. + */ + // const projectId = 'abc123' + /** + * Jobs resource to insert. + */ + // const job = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callInsertJob() { + // Construct request + const request = { + }; + + // Run request + const response = await bigqueryClient.insertJob(request); + console.log(response); + } + + callInsertJob(); + // [END bigquery_v2_generated_JobService_InsertJob_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.list_jobs.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.list_jobs.js.baseline new file mode 100644 index 000000000..ed07adc02 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.list_jobs.js.baseline @@ -0,0 +1,99 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main() { + // [START bigquery_v2_generated_JobService_ListJobs_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Project ID of the jobs to list. + */ + // const projectId = 'abc123' + /** + * Whether to display jobs owned by all users in the project. Default False. + */ + // const allUsers = true + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + */ + // const minCreationTime = 1234 + /** + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + */ + // const maxCreationTime = {} + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * Restrict information returned to a set of selected fields + */ + // const projection = {} + /** + * Filter for job state + */ + // const stateFilter = [1,2,3,4] + /** + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + */ + // const parentJobId = 'abc123' + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callListJobs() { + // Construct request + const request = { + }; + + // Run request + const iterable = bigqueryClient.listJobsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListJobs(); + // [END bigquery_v2_generated_JobService_ListJobs_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/job_service.query.js.baseline b/baselines/bigquery-v2/samples/generated/v2/job_service.query.js.baseline new file mode 100644 index 000000000..24c7baf73 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/job_service.query.js.baseline @@ -0,0 +1,65 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_JobService_Query_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the query request. + */ + // const projectId = 'abc123' + /** + * The query request body. + */ + // const queryRequest = {} + + // Imports the Bigquery library + const {JobServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new JobServiceClient(); + + async function callQuery() { + // Construct request + const request = { + projectId, + }; + + // Run request + const response = await bigqueryClient.query(request); + console.log(response); + } + + callQuery(); + // [END bigquery_v2_generated_JobService_Query_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/model_service.delete_model.js.baseline b/baselines/bigquery-v2/samples/generated/v2/model_service.delete_model.js.baseline new file mode 100644 index 000000000..d857afc39 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/model_service.delete_model.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId) { + // [START bigquery_v2_generated_ModelService_DeleteModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the model to delete. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the model to delete. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the model to delete. + */ + // const modelId = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callDeleteModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + }; + + // Run request + const response = await bigqueryClient.deleteModel(request); + console.log(response); + } + + callDeleteModel(); + // [END bigquery_v2_generated_ModelService_DeleteModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/model_service.get_model.js.baseline b/baselines/bigquery-v2/samples/generated/v2/model_service.get_model.js.baseline new file mode 100644 index 000000000..ab2dae8cc --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/model_service.get_model.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId) { + // [START bigquery_v2_generated_ModelService_GetModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested model. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested model. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the requested model. + */ + // const modelId = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callGetModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + }; + + // Run request + const response = await bigqueryClient.getModel(request); + console.log(response); + } + + callGetModel(); + // [END bigquery_v2_generated_ModelService_GetModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/model_service.list_models.js.baseline b/baselines/bigquery-v2/samples/generated/v2/model_service.list_models.js.baseline new file mode 100644 index 000000000..8bc0d3228 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/model_service.list_models.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_ModelService_ListModels_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the models to list. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the models to list. + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call to request the next page of + * results + */ + // const pageToken = 'abc123' + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callListModels() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listModelsAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListModels(); + // [END bigquery_v2_generated_ModelService_ListModels_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/model_service.patch_model.js.baseline b/baselines/bigquery-v2/samples/generated/v2/model_service.patch_model.js.baseline new file mode 100644 index 000000000..b0c0b4e54 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/model_service.patch_model.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, modelId, model) { + // [START bigquery_v2_generated_ModelService_PatchModel_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the model to patch. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the model to patch. + */ + // const datasetId = 'abc123' + /** + * Required. Model ID of the model to patch. + */ + // const modelId = 'abc123' + /** + * Required. Patched model. + * Follows RFC5789 patch semantics. Missing fields are not updated. + * To clear a field, explicitly set to default value. + */ + // const model = {} + + // Imports the Bigquery library + const {ModelServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ModelServiceClient(); + + async function callPatchModel() { + // Construct request + const request = { + projectId, + datasetId, + modelId, + model, + }; + + // Run request + const response = await bigqueryClient.patchModel(request); + console.log(response); + } + + callPatchModel(); + // [END bigquery_v2_generated_ModelService_PatchModel_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/project_service.get_service_account.js.baseline b/baselines/bigquery-v2/samples/generated/v2/project_service.get_service_account.js.baseline new file mode 100644 index 000000000..3502dccac --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/project_service.get_service_account.js.baseline @@ -0,0 +1,61 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId) { + // [START bigquery_v2_generated_ProjectService_GetServiceAccount_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. ID of the project. + */ + // const projectId = 'abc123' + + // Imports the Bigquery library + const {ProjectServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new ProjectServiceClient(); + + async function callGetServiceAccount() { + // Construct request + const request = { + projectId, + }; + + // Run request + const response = await bigqueryClient.getServiceAccount(request); + console.log(response); + } + + callGetServiceAccount(); + // [END bigquery_v2_generated_ProjectService_GetServiceAccount_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.delete_routine.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.delete_routine.js.baseline new file mode 100644 index 000000000..c5fcd7f41 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.delete_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId) { + // [START bigquery_v2_generated_RoutineService_DeleteRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to delete + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to delete + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to delete + */ + // const routineId = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callDeleteRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + }; + + // Run request + const response = await bigqueryClient.deleteRoutine(request); + console.log(response); + } + + callDeleteRoutine(); + // [END bigquery_v2_generated_RoutineService_DeleteRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.get_routine.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.get_routine.js.baseline new file mode 100644 index 000000000..af1f00d7f --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.get_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId) { + // [START bigquery_v2_generated_RoutineService_GetRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested routine + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested routine + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the requested routine + */ + // const routineId = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callGetRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + }; + + // Run request + const response = await bigqueryClient.getRoutine(request); + console.log(response); + } + + callGetRoutine(); + // [END bigquery_v2_generated_RoutineService_GetRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.insert_routine.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.insert_routine.js.baseline new file mode 100644 index 000000000..a6006cb1f --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.insert_routine.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routine) { + // [START bigquery_v2_generated_RoutineService_InsertRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new routine + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the new routine + */ + // const datasetId = 'abc123' + /** + * Required. A routine resource to insert + */ + // const routine = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callInsertRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routine, + }; + + // Run request + const response = await bigqueryClient.insertRoutine(request); + console.log(response); + } + + callInsertRoutine(); + // [END bigquery_v2_generated_RoutineService_InsertRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.list_routines.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.list_routines.js.baseline new file mode 100644 index 000000000..100f58e90 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.list_routines.js.baseline @@ -0,0 +1,84 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_RoutineService_ListRoutines_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routines to list + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routines to list + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + /** + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + */ + // const filter = 'abc123' + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callListRoutines() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listRoutinesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListRoutines(); + // [END bigquery_v2_generated_RoutineService_ListRoutines_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.patch_routine.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.patch_routine.js.baseline new file mode 100644 index 000000000..693c45faf --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.patch_routine.js.baseline @@ -0,0 +1,83 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId, routine) { + // [START bigquery_v2_generated_RoutineService_PatchRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to update + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to update + */ + // const routineId = 'abc123' + /** + * Required. A routine resource which will be used to partially + * update the specified routine + */ + // const routine = {} + /** + * Only the Routine fields in the field mask are updated + * by the given routine. Repeated routine fields will be fully replaced + * if contained in the field mask. + */ + // const fieldMask = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callPatchRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + routine, + }; + + // Run request + const response = await bigqueryClient.patchRoutine(request); + console.log(response); + } + + callPatchRoutine(); + // [END bigquery_v2_generated_RoutineService_PatchRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/routine_service.update_routine.js.baseline b/baselines/bigquery-v2/samples/generated/v2/routine_service.update_routine.js.baseline new file mode 100644 index 000000000..af3a54341 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/routine_service.update_routine.js.baseline @@ -0,0 +1,76 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, routineId, routine) { + // [START bigquery_v2_generated_RoutineService_UpdateRoutine_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the routine to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the routine to update + */ + // const datasetId = 'abc123' + /** + * Required. Routine ID of the routine to update + */ + // const routineId = 'abc123' + /** + * Required. A routine resource which will replace the specified routine + */ + // const routine = {} + + // Imports the Bigquery library + const {RoutineServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RoutineServiceClient(); + + async function callUpdateRoutine() { + // Construct request + const request = { + projectId, + datasetId, + routineId, + routine, + }; + + // Run request + const response = await bigqueryClient.updateRoutine(request); + console.log(response); + } + + callUpdateRoutine(); + // [END bigquery_v2_generated_RoutineService_UpdateRoutine_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline b/baselines/bigquery-v2/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline new file mode 100644 index 000000000..3536a64dc --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/row_access_policy_service.list_row_access_policies.js.baseline @@ -0,0 +1,83 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the row access policies to list. + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of row access policies to list. + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to list row access policies. + */ + // const tableId = 'abc123' + /** + * Page token, returned by a previous call, to request the next page of + * results. + */ + // const pageToken = 'abc123' + /** + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + */ + // const pageSize = 1234 + + // Imports the Bigquery library + const {RowAccessPolicyServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new RowAccessPolicyServiceClient(); + + async function callListRowAccessPolicies() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const iterable = bigqueryClient.listRowAccessPoliciesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListRowAccessPolicies(); + // [END bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline b/baselines/bigquery-v2/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline new file mode 100644 index 000000000..917111d47 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/snippet_metadata_google.cloud.bigquery.v2.json.baseline @@ -0,0 +1,1647 @@ +{ + "clientLibrary": { + "name": "nodejs-bigquery", + "version": "0.1.0", + "language": "TYPESCRIPT", + "apis": [ + { + "id": "google.cloud.bigquery.v2", + "version": "v2" + } + ] + }, + "snippets": [ + { + "regionTag": "bigquery_v2_generated_DatasetService_GetDataset_async", + "title": "DatasetService getDataset Sample", + "origin": "API_DEFINITION", + "description": " Returns the dataset specified by datasetID.", + "canonical": true, + "file": "dataset_service.get_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.GetDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_view", + "type": ".google.cloud.bigquery.v2.GetDatasetRequest.DatasetView" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "GetDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.GetDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_InsertDataset_async", + "title": "DatasetService insertDataset Sample", + "origin": "API_DEFINITION", + "description": " Creates a new empty dataset.", + "canonical": true, + "file": "dataset_service.insert_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 58, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.InsertDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "InsertDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.InsertDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_PatchDataset_async", + "title": "DatasetService patchDataset Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports RFC5789 patch semantics.", + "canonical": true, + "file": "dataset_service.patch_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.PatchDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "PatchDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.PatchDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_UpdateDataset_async", + "title": "DatasetService updateDataset Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.", + "canonical": true, + "file": "dataset_service.update_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UpdateDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset", + "type": ".google.cloud.bigquery.v2.Dataset" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "UpdateDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UpdateDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_DeleteDataset_async", + "title": "DatasetService deleteDataset Sample", + "origin": "API_DEFINITION", + "description": " Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.", + "canonical": true, + "file": "dataset_service.delete_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.DeleteDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "delete_contents", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "DeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.DeleteDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_ListDatasets_async", + "title": "DatasetService listDatasets Sample", + "origin": "API_DEFINITION", + "description": " Lists all datasets in the specified project to which the user has been granted the READER dataset role.", + "canonical": true, + "file": "dataset_service.list_datasets.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 79, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListDatasets", + "fullName": "google.cloud.bigquery.v2.DatasetService.ListDatasets", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "all", + "type": "TYPE_BOOL" + }, + { + "name": "filter", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.DatasetList", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "ListDatasets", + "fullName": "google.cloud.bigquery.v2.DatasetService.ListDatasets", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_DatasetService_UndeleteDataset_async", + "title": "DatasetService undeleteDataset Sample", + "origin": "API_DEFINITION", + "description": " Undeletes a dataset which is within time travel window based on datasetId. If a time is specified, the dataset version deleted at that time is undeleted, else the last live version is undeleted.", + "canonical": true, + "file": "dataset_service.undelete_dataset.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 64, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UndeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UndeleteDataset", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "deletion_time", + "type": ".google.protobuf.Timestamp" + } + ], + "resultType": ".google.cloud.bigquery.v2.Dataset", + "client": { + "shortName": "DatasetServiceClient", + "fullName": "google.cloud.bigquery.v2.DatasetServiceClient" + }, + "method": { + "shortName": "UndeleteDataset", + "fullName": "google.cloud.bigquery.v2.DatasetService.UndeleteDataset", + "service": { + "shortName": "DatasetService", + "fullName": "google.cloud.bigquery.v2.DatasetService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_CancelJob_async", + "title": "DatasetService cancelJob Sample", + "origin": "API_DEFINITION", + "description": " Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.", + "canonical": true, + "file": "job_service.cancel_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 69, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "CancelJob", + "fullName": "google.cloud.bigquery.v2.JobService.CancelJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.JobCancelResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "CancelJob", + "fullName": "google.cloud.bigquery.v2.JobService.CancelJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_GetJob_async", + "title": "DatasetService getJob Sample", + "origin": "API_DEFINITION", + "description": " Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.", + "canonical": true, + "file": "job_service.get_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 69, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetJob", + "fullName": "google.cloud.bigquery.v2.JobService.GetJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Job", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "GetJob", + "fullName": "google.cloud.bigquery.v2.JobService.GetJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_InsertJob_async", + "title": "DatasetService insertJob Sample", + "origin": "API_DEFINITION", + "description": " Starts a new asynchronous job. This API has two different kinds of endpoint URIs, as this method supports a variety of use cases. * The *Metadata* URI is used for most interactions, as it accepts the job configuration directly. * The *Upload* URI is ONLY for the case when you're sending both a load job configuration and a data stream together. In this case, the Upload URI accepts the job configuration and the data as two distinct multipart MIME parts.", + "canonical": true, + "file": "job_service.insert_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 56, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertJob", + "fullName": "google.cloud.bigquery.v2.JobService.InsertJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job", + "type": ".google.cloud.bigquery.v2.Job" + } + ], + "resultType": ".google.cloud.bigquery.v2.Job", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "InsertJob", + "fullName": "google.cloud.bigquery.v2.JobService.InsertJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_DeleteJob_async", + "title": "DatasetService deleteJob Sample", + "origin": "API_DEFINITION", + "description": " Requests the deletion of the metadata of a job. This call returns when the job's metadata is deleted.", + "canonical": true, + "file": "job_service.delete_job.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 67, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteJob", + "fullName": "google.cloud.bigquery.v2.JobService.DeleteJob", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "location", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "DeleteJob", + "fullName": "google.cloud.bigquery.v2.JobService.DeleteJob", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_ListJobs_async", + "title": "DatasetService listJobs Sample", + "origin": "API_DEFINITION", + "description": " Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.", + "canonical": true, + "file": "job_service.list_jobs.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 91, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListJobs", + "fullName": "google.cloud.bigquery.v2.JobService.ListJobs", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "all_users", + "type": "TYPE_BOOL" + }, + { + "name": "max_results", + "type": ".google.protobuf.Int32Value" + }, + { + "name": "min_creation_time", + "type": "TYPE_UINT64" + }, + { + "name": "max_creation_time", + "type": ".google.protobuf.UInt64Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "projection", + "type": ".google.cloud.bigquery.v2.ListJobsRequest.Projection" + }, + { + "name": "state_filter", + "type": "TYPE_ENUM[]" + }, + { + "name": "parent_job_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.JobList", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "ListJobs", + "fullName": "google.cloud.bigquery.v2.JobService.ListJobs", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_GetQueryResults_async", + "title": "DatasetService getQueryResults Sample", + "origin": "API_DEFINITION", + "description": " RPC to get the results of a query job.", + "canonical": true, + "file": "job_service.get_query_results.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 101, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetQueryResults", + "fullName": "google.cloud.bigquery.v2.JobService.GetQueryResults", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "job_id", + "type": "TYPE_STRING" + }, + { + "name": "start_index", + "type": ".google.protobuf.UInt64Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "timeout_ms", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "location", + "type": "TYPE_STRING" + }, + { + "name": "format_options", + "type": ".google.cloud.bigquery.v2.DataFormatOptions" + } + ], + "resultType": ".google.cloud.bigquery.v2.GetQueryResultsResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "GetQueryResults", + "fullName": "google.cloud.bigquery.v2.JobService.GetQueryResults", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_JobService_Query_async", + "title": "DatasetService query Sample", + "origin": "API_DEFINITION", + "description": " Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.", + "canonical": true, + "file": "job_service.query.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 57, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "Query", + "fullName": "google.cloud.bigquery.v2.JobService.Query", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "query_request", + "type": ".google.cloud.bigquery.v2.QueryRequest" + } + ], + "resultType": ".google.cloud.bigquery.v2.QueryResponse", + "client": { + "shortName": "JobServiceClient", + "fullName": "google.cloud.bigquery.v2.JobServiceClient" + }, + "method": { + "shortName": "Query", + "fullName": "google.cloud.bigquery.v2.JobService.Query", + "service": { + "shortName": "JobService", + "fullName": "google.cloud.bigquery.v2.JobService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_GetModel_async", + "title": "DatasetService getModel Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified model resource by model ID.", + "canonical": true, + "file": "model_service.get_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetModel", + "fullName": "google.cloud.bigquery.v2.ModelService.GetModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Model", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "GetModel", + "fullName": "google.cloud.bigquery.v2.ModelService.GetModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_ListModels_async", + "title": "DatasetService listModels Sample", + "origin": "API_DEFINITION", + "description": " Lists all models in the specified dataset. Requires the READER dataset role. After retrieving the list of models, you can get information about a particular model by calling the models.get method.", + "canonical": true, + "file": "model_service.list_models.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListModels", + "fullName": "google.cloud.bigquery.v2.ModelService.ListModels", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListModelsResponse", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "ListModels", + "fullName": "google.cloud.bigquery.v2.ModelService.ListModels", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_PatchModel_async", + "title": "DatasetService patchModel Sample", + "origin": "API_DEFINITION", + "description": " Patch specific fields in the specified model.", + "canonical": true, + "file": "model_service.patch_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchModel", + "fullName": "google.cloud.bigquery.v2.ModelService.PatchModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + }, + { + "name": "model", + "type": ".google.cloud.bigquery.v2.Model" + } + ], + "resultType": ".google.cloud.bigquery.v2.Model", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "PatchModel", + "fullName": "google.cloud.bigquery.v2.ModelService.PatchModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ModelService_DeleteModel_async", + "title": "DatasetService deleteModel Sample", + "origin": "API_DEFINITION", + "description": " Deletes the model specified by modelId from the dataset.", + "canonical": true, + "file": "model_service.delete_model.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteModel", + "fullName": "google.cloud.bigquery.v2.ModelService.DeleteModel", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "model_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "ModelServiceClient", + "fullName": "google.cloud.bigquery.v2.ModelServiceClient" + }, + "method": { + "shortName": "DeleteModel", + "fullName": "google.cloud.bigquery.v2.ModelService.DeleteModel", + "service": { + "shortName": "ModelService", + "fullName": "google.cloud.bigquery.v2.ModelService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_ProjectService_GetServiceAccount_async", + "title": "DatasetService getServiceAccount Sample", + "origin": "API_DEFINITION", + "description": " RPC to get the service account for a project used for interactions with Google Cloud KMS", + "canonical": true, + "file": "project_service.get_service_account.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 53, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetServiceAccount", + "fullName": "google.cloud.bigquery.v2.ProjectService.GetServiceAccount", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.GetServiceAccountResponse", + "client": { + "shortName": "ProjectServiceClient", + "fullName": "google.cloud.bigquery.v2.ProjectServiceClient" + }, + "method": { + "shortName": "GetServiceAccount", + "fullName": "google.cloud.bigquery.v2.ProjectService.GetServiceAccount", + "service": { + "shortName": "ProjectService", + "fullName": "google.cloud.bigquery.v2.ProjectService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_GetRoutine_async", + "title": "DatasetService getRoutine Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified routine resource by routine ID.", + "canonical": true, + "file": "routine_service.get_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.GetRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "GetRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.GetRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_InsertRoutine_async", + "title": "DatasetService insertRoutine Sample", + "origin": "API_DEFINITION", + "description": " Creates a new routine in the dataset.", + "canonical": true, + "file": "routine_service.insert_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.InsertRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "InsertRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.InsertRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_UpdateRoutine_async", + "title": "DatasetService updateRoutine Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing routine. The update method replaces the entire Routine resource.", + "canonical": true, + "file": "routine_service.update_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 68, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.UpdateRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "UpdateRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.UpdateRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_PatchRoutine_async", + "title": "DatasetService patchRoutine Sample", + "origin": "API_DEFINITION", + "description": " Patches information in an existing routine. The patch method does a partial update to an existing Routine resource.", + "canonical": true, + "file": "routine_service.patch_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 75, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.PatchRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + }, + { + "name": "routine", + "type": ".google.cloud.bigquery.v2.Routine" + }, + { + "name": "field_mask", + "type": ".google.protobuf.FieldMask" + } + ], + "resultType": ".google.cloud.bigquery.v2.Routine", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "PatchRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.PatchRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_DeleteRoutine_async", + "title": "DatasetService deleteRoutine Sample", + "origin": "API_DEFINITION", + "description": " Deletes the routine specified by routineId from the dataset.", + "canonical": true, + "file": "routine_service.delete_routine.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.DeleteRoutine", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "routine_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "DeleteRoutine", + "fullName": "google.cloud.bigquery.v2.RoutineService.DeleteRoutine", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RoutineService_ListRoutines_async", + "title": "DatasetService listRoutines Sample", + "origin": "API_DEFINITION", + "description": " Lists all routines in the specified dataset. Requires the READER dataset role.", + "canonical": true, + "file": "routine_service.list_routines.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 76, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListRoutines", + "fullName": "google.cloud.bigquery.v2.RoutineService.ListRoutines", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "filter", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListRoutinesResponse", + "client": { + "shortName": "RoutineServiceClient", + "fullName": "google.cloud.bigquery.v2.RoutineServiceClient" + }, + "method": { + "shortName": "ListRoutines", + "fullName": "google.cloud.bigquery.v2.RoutineService.ListRoutines", + "service": { + "shortName": "RoutineService", + "fullName": "google.cloud.bigquery.v2.RoutineService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async", + "title": "DatasetService listRowAccessPolicies Sample", + "origin": "API_DEFINITION", + "description": " Lists all row access policies on the specified table.", + "canonical": true, + "file": "row_access_policy_service.list_row_access_policies.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 75, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListRowAccessPolicies", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService.ListRowAccessPolicies", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + }, + { + "name": "page_size", + "type": "TYPE_INT32" + } + ], + "resultType": ".google.cloud.bigquery.v2.ListRowAccessPoliciesResponse", + "client": { + "shortName": "RowAccessPolicyServiceClient", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyServiceClient" + }, + "method": { + "shortName": "ListRowAccessPolicies", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService.ListRowAccessPolicies", + "service": { + "shortName": "RowAccessPolicyService", + "fullName": "google.cloud.bigquery.v2.RowAccessPolicyService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_GetTable_async", + "title": "DatasetService getTable Sample", + "origin": "API_DEFINITION", + "description": " Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.", + "canonical": true, + "file": "table_service.get_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 78, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "GetTable", + "fullName": "google.cloud.bigquery.v2.TableService.GetTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "selected_fields", + "type": "TYPE_STRING" + }, + { + "name": "view", + "type": ".google.cloud.bigquery.v2.GetTableRequest.TableMetadataView" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "GetTable", + "fullName": "google.cloud.bigquery.v2.TableService.GetTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_InsertTable_async", + "title": "DatasetService insertTable Sample", + "origin": "API_DEFINITION", + "description": " Creates a new, empty table in the dataset.", + "canonical": true, + "file": "table_service.insert_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "InsertTable", + "fullName": "google.cloud.bigquery.v2.TableService.InsertTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "InsertTable", + "fullName": "google.cloud.bigquery.v2.TableService.InsertTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_PatchTable_async", + "title": "DatasetService patchTable Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports RFC5789 patch semantics.", + "canonical": true, + "file": "table_service.patch_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 72, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "PatchTable", + "fullName": "google.cloud.bigquery.v2.TableService.PatchTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + }, + { + "name": "autodetect_schema", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "PatchTable", + "fullName": "google.cloud.bigquery.v2.TableService.PatchTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_UpdateTable_async", + "title": "DatasetService updateTable Sample", + "origin": "API_DEFINITION", + "description": " Updates information in an existing table. The update method replaces the entire Table resource, whereas the patch method only replaces fields that are provided in the submitted Table resource.", + "canonical": true, + "file": "table_service.update_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 72, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "UpdateTable", + "fullName": "google.cloud.bigquery.v2.TableService.UpdateTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + }, + { + "name": "table", + "type": ".google.cloud.bigquery.v2.Table" + }, + { + "name": "autodetect_schema", + "type": "TYPE_BOOL" + } + ], + "resultType": ".google.cloud.bigquery.v2.Table", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "UpdateTable", + "fullName": "google.cloud.bigquery.v2.TableService.UpdateTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_DeleteTable_async", + "title": "DatasetService deleteTable Sample", + "origin": "API_DEFINITION", + "description": " Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.", + "canonical": true, + "file": "table_service.delete_table.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 63, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "DeleteTable", + "fullName": "google.cloud.bigquery.v2.TableService.DeleteTable", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "table_id", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.protobuf.Empty", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "DeleteTable", + "fullName": "google.cloud.bigquery.v2.TableService.DeleteTable", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + }, + { + "regionTag": "bigquery_v2_generated_TableService_ListTables_async", + "title": "DatasetService listTables Sample", + "origin": "API_DEFINITION", + "description": " Lists all tables in the specified dataset. Requires the READER dataset role.", + "canonical": true, + "file": "table_service.list_tables.js", + "language": "JAVASCRIPT", + "segments": [ + { + "start": 25, + "end": 70, + "type": "FULL" + } + ], + "clientMethod": { + "shortName": "ListTables", + "fullName": "google.cloud.bigquery.v2.TableService.ListTables", + "async": true, + "parameters": [ + { + "name": "project_id", + "type": "TYPE_STRING" + }, + { + "name": "dataset_id", + "type": "TYPE_STRING" + }, + { + "name": "max_results", + "type": ".google.protobuf.UInt32Value" + }, + { + "name": "page_token", + "type": "TYPE_STRING" + } + ], + "resultType": ".google.cloud.bigquery.v2.TableList", + "client": { + "shortName": "TableServiceClient", + "fullName": "google.cloud.bigquery.v2.TableServiceClient" + }, + "method": { + "shortName": "ListTables", + "fullName": "google.cloud.bigquery.v2.TableService.ListTables", + "service": { + "shortName": "TableService", + "fullName": "google.cloud.bigquery.v2.TableService" + } + } + } + } + ] +} diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.delete_table.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.delete_table.js.baseline new file mode 100644 index 000000000..2496174f1 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.delete_table.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_TableService_DeleteTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to delete + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to delete + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to delete + */ + // const tableId = 'abc123' + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callDeleteTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const response = await bigqueryClient.deleteTable(request); + console.log(response); + } + + callDeleteTable(); + // [END bigquery_v2_generated_TableService_DeleteTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.get_table.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.get_table.js.baseline new file mode 100644 index 000000000..be8481470 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.get_table.js.baseline @@ -0,0 +1,86 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId) { + // [START bigquery_v2_generated_TableService_GetTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the requested table + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the requested table + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the requested table + */ + // const tableId = 'abc123' + /** + * List of table schema fields to return (comma-separated). + * If unspecified, all fields are returned. + * A fieldMask cannot be used here because the fields will automatically be + * converted from camelCase to snake_case and the conversion will fail if + * there are underscores. Since these are fields in BigQuery table schemas, + * underscores are allowed. + */ + // const selectedFields = 'abc123' + /** + * Optional. Specifies the view that determines which table information is + * returned. By default, basic table information and storage statistics + * (STORAGE_STATS) are returned. + */ + // const view = {} + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callGetTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + }; + + // Run request + const response = await bigqueryClient.getTable(request); + console.log(response); + } + + callGetTable(); + // [END bigquery_v2_generated_TableService_GetTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.insert_table.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.insert_table.js.baseline new file mode 100644 index 000000000..2d338eb3f --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.insert_table.js.baseline @@ -0,0 +1,71 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, table) { + // [START bigquery_v2_generated_TableService_InsertTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the new table + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the new table + */ + // const datasetId = 'abc123' + /** + * Required. A tables resource to insert + */ + // const table = {} + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callInsertTable() { + // Construct request + const request = { + projectId, + datasetId, + table, + }; + + // Run request + const response = await bigqueryClient.insertTable(request); + console.log(response); + } + + callInsertTable(); + // [END bigquery_v2_generated_TableService_InsertTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.list_tables.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.list_tables.js.baseline new file mode 100644 index 000000000..de4cb4058 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.list_tables.js.baseline @@ -0,0 +1,78 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId) { + // [START bigquery_v2_generated_TableService_ListTables_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the tables to list + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the tables to list + */ + // const datasetId = 'abc123' + /** + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + */ + // const maxResults = 1234 + /** + * Page token, returned by a previous call, to request the next page of + * results + */ + // const pageToken = 'abc123' + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callListTables() { + // Construct request + const request = { + projectId, + datasetId, + }; + + // Run request + const iterable = bigqueryClient.listTablesAsync(request); + for await (const response of iterable) { + console.log(response); + } + } + + callListTables(); + // [END bigquery_v2_generated_TableService_ListTables_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.patch_table.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.patch_table.js.baseline new file mode 100644 index 000000000..ceafc7c5d --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.patch_table.js.baseline @@ -0,0 +1,80 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId, table) { + // [START bigquery_v2_generated_TableService_PatchTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to update + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to update + */ + // const tableId = 'abc123' + /** + * Required. A tables resource which will replace or patch the specified table + */ + // const table = {} + /** + * Optional. When true will autodetect schema, else will keep original schema. + */ + // const autodetectSchema = true + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callPatchTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + table, + }; + + // Run request + const response = await bigqueryClient.patchTable(request); + console.log(response); + } + + callPatchTable(); + // [END bigquery_v2_generated_TableService_PatchTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/samples/generated/v2/table_service.update_table.js.baseline b/baselines/bigquery-v2/samples/generated/v2/table_service.update_table.js.baseline new file mode 100644 index 000000000..50a2dfc30 --- /dev/null +++ b/baselines/bigquery-v2/samples/generated/v2/table_service.update_table.js.baseline @@ -0,0 +1,80 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + + +'use strict'; + +function main(projectId, datasetId, tableId, table) { + // [START bigquery_v2_generated_TableService_UpdateTable_async] + /** + * This snippet has been automatically generated and should be regarded as a code template only. + * It will require modifications to work. + * It may require correct/in-range values for request initialization. + * TODO(developer): Uncomment these variables before running the sample. + */ + /** + * Required. Project ID of the table to update + */ + // const projectId = 'abc123' + /** + * Required. Dataset ID of the table to update + */ + // const datasetId = 'abc123' + /** + * Required. Table ID of the table to update + */ + // const tableId = 'abc123' + /** + * Required. A tables resource which will replace or patch the specified table + */ + // const table = {} + /** + * Optional. When true will autodetect schema, else will keep original schema. + */ + // const autodetectSchema = true + + // Imports the Bigquery library + const {TableServiceClient} = require('bigquery').v2; + + // Instantiates a client + const bigqueryClient = new TableServiceClient(); + + async function callUpdateTable() { + // Construct request + const request = { + projectId, + datasetId, + tableId, + table, + }; + + // Run request + const response = await bigqueryClient.updateTable(request); + console.log(response); + } + + callUpdateTable(); + // [END bigquery_v2_generated_TableService_UpdateTable_async] +} + +process.on('unhandledRejection', err => { + console.error(err.message); + process.exitCode = 1; +}); +main(...process.argv.slice(2)); diff --git a/baselines/bigquery-v2/src/index.ts.baseline b/baselines/bigquery-v2/src/index.ts.baseline new file mode 100644 index 000000000..65ada5fef --- /dev/null +++ b/baselines/bigquery-v2/src/index.ts.baseline @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as v2 from './v2'; +const DatasetServiceClient = v2.DatasetServiceClient; +type DatasetServiceClient = v2.DatasetServiceClient; +const JobServiceClient = v2.JobServiceClient; +type JobServiceClient = v2.JobServiceClient; +const ModelServiceClient = v2.ModelServiceClient; +type ModelServiceClient = v2.ModelServiceClient; +const ProjectServiceClient = v2.ProjectServiceClient; +type ProjectServiceClient = v2.ProjectServiceClient; +const RoutineServiceClient = v2.RoutineServiceClient; +type RoutineServiceClient = v2.RoutineServiceClient; +const RowAccessPolicyServiceClient = v2.RowAccessPolicyServiceClient; +type RowAccessPolicyServiceClient = v2.RowAccessPolicyServiceClient; +const TableServiceClient = v2.TableServiceClient; +type TableServiceClient = v2.TableServiceClient; +export {v2, DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient}; +export default {v2, DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient}; +import * as protos from '../protos/protos'; +export {protos} diff --git a/baselines/bigquery-v2/src/v2/dataset_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/dataset_service_client.ts.baseline new file mode 100644 index 000000000..89e907459 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/dataset_service_client.ts.baseline @@ -0,0 +1,1021 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/dataset_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './dataset_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Dataset Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class DatasetServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + datasetServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of DatasetServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new DatasetServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof DatasetServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listDatasets: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'datasets') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.DatasetService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.datasetServiceStub) { + return this.datasetServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.DatasetService. + this.datasetServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.DatasetService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.DatasetService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const datasetServiceStubMethods = + ['getDataset', 'insertDataset', 'patchDataset', 'updateDataset', 'deleteDataset', 'listDatasets', 'undeleteDataset']; + for (const methodName of datasetServiceStubMethods) { + const callPromise = this.datasetServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.datasetServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Returns the dataset specified by datasetID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested dataset + * @param {string} request.datasetId + * Required. Dataset ID of the requested dataset + * @param {google.cloud.bigquery.v2.GetDatasetRequest.DatasetView} [request.datasetView] + * Optional. Specifies the view that determines which dataset information is + * returned. By default, metadata and ACL information are returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.get_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_GetDataset_async + */ + getDataset( + request?: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|undefined, {}|undefined + ]>; + getDataset( + request: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): void; + getDataset( + request: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): void; + getDataset( + request?: protos.google.cloud.bigquery.v2.IGetDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IGetDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getDataset(request, options, callback); + } +/** + * Creates a new empty dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new dataset + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource to use for the new dataset + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.insert_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_InsertDataset_async + */ + insertDataset( + request?: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|undefined, {}|undefined + ]>; + insertDataset( + request: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): void; + insertDataset( + request: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): void; + insertDataset( + request?: protos.google.cloud.bigquery.v2.IInsertDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IInsertDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertDataset(request, options, callback); + } +/** + * Updates information in an existing dataset. The update method replaces the + * entire dataset resource, whereas the patch method only replaces fields that + * are provided in the submitted dataset resource. + * This method supports RFC5789 patch semantics. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being updated + * @param {string} request.datasetId + * Required. Dataset ID of the dataset being updated + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource which will replace or patch the specified + * dataset. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.patch_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_PatchDataset_async + */ + patchDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>; + patchDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + patchDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + patchDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchDataset(request, options, callback); + } +/** + * Updates information in an existing dataset. The update method replaces the + * entire dataset resource, whereas the patch method only replaces fields that + * are provided in the submitted dataset resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being updated + * @param {string} request.datasetId + * Required. Dataset ID of the dataset being updated + * @param {google.cloud.bigquery.v2.Dataset} request.dataset + * Required. Datasets resource which will replace or patch the specified + * dataset. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.update_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_UpdateDataset_async + */ + updateDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>; + updateDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + updateDataset( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): void; + updateDataset( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUpdateOrPatchDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateDataset(request, options, callback); + } +/** + * Deletes the dataset specified by the datasetId value. Before you can delete + * a dataset, you must delete all its tables, either manually or by specifying + * deleteContents. Immediately after deletion, you can create another dataset + * with the same name. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset being deleted + * @param {string} request.datasetId + * Required. Dataset ID of dataset being deleted + * @param {boolean} request.deleteContents + * If True, delete all the tables in the dataset. + * If False and the dataset contains tables, the request will fail. + * Default is False + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.delete_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_DeleteDataset_async + */ + deleteDataset( + request?: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|undefined, {}|undefined + ]>; + deleteDataset( + request: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + deleteDataset( + request: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + deleteDataset( + request?: protos.google.cloud.bigquery.v2.IDeleteDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteDataset(request, options, callback); + } +/** + * Undeletes a dataset which is within time travel window based on datasetId. + * If a time is specified, the dataset version deleted at that time is + * undeleted, else the last live version is undeleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the dataset to be undeleted + * @param {string} request.datasetId + * Required. Dataset ID of dataset being deleted + * @param {google.protobuf.Timestamp} [request.deletionTime] + * Optional. The exact time when the dataset was deleted. If not specified, + * the most recently deleted version is undeleted. Undeleting a dataset + * using deletion time is not supported. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Dataset|Dataset}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.undelete_dataset.js + * region_tag:bigquery_v2_generated_DatasetService_UndeleteDataset_async + */ + undeleteDataset( + request?: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|undefined, {}|undefined + ]>; + undeleteDataset( + request: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + undeleteDataset( + request: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): void; + undeleteDataset( + request?: protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IDataset, + protos.google.cloud.bigquery.v2.IUndeleteDatasetRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.undeleteDataset(request, options, callback); + } + + /** + * Lists all datasets in the specified project to which the user has been + * granted the READER dataset role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listDatasetsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listDatasets( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatDataset[], + protos.google.cloud.bigquery.v2.IListDatasetsRequest|null, + protos.google.cloud.bigquery.v2.IDatasetList + ]>; + listDatasets( + request: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): void; + listDatasets( + request: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): void; + listDatasets( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListDatasetsRequest, + protos.google.cloud.bigquery.v2.IDatasetList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatDataset>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatDataset[], + protos.google.cloud.bigquery.v2.IListDatasetsRequest|null, + protos.google.cloud.bigquery.v2.IDatasetList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listDatasets(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listDatasetsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listDatasetsStream( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listDatasets']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listDatasets.createStream( + this.innerApiCalls.listDatasets as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listDatasets`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the datasets to be listed + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {boolean} request.all + * Whether to list all datasets, including hidden ones + * @param {string} request.filter + * An expression for filtering the results of the request by label. + * The syntax is `labels.[:]`. + * Multiple filters can be ANDed together by connecting with a space. + * Example: `labels.department:receiving labels.active`. + * See [Filtering datasets using + * labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + * for details. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatDataset|ListFormatDataset}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/dataset_service.list_datasets.js + * region_tag:bigquery_v2_generated_DatasetService_ListDatasets_async + */ + listDatasetsAsync( + request?: protos.google.cloud.bigquery.v2.IListDatasetsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listDatasets']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listDatasets.asyncIterate( + this.innerApiCalls['listDatasets'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.datasetServiceStub && !this._terminated) { + return this.datasetServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/dataset_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/dataset_service_client_config.json.baseline new file mode 100644 index 000000000..74c64e606 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/dataset_service_client_config.json.baseline @@ -0,0 +1,54 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.DatasetService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListDatasets": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UndeleteDataset": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/dataset_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/dataset_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/dataset_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/index.ts.baseline b/baselines/bigquery-v2/src/v2/index.ts.baseline new file mode 100644 index 000000000..79130ad65 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/index.ts.baseline @@ -0,0 +1,25 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +export {DatasetServiceClient} from './dataset_service_client'; +export {JobServiceClient} from './job_service_client'; +export {ModelServiceClient} from './model_service_client'; +export {ProjectServiceClient} from './project_service_client'; +export {RoutineServiceClient} from './routine_service_client'; +export {RowAccessPolicyServiceClient} from './row_access_policy_service_client'; +export {TableServiceClient} from './table_service_client'; diff --git a/baselines/bigquery-v2/src/v2/job_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/job_service_client.ts.baseline new file mode 100644 index 000000000..c5c71fef5 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/job_service_client.ts.baseline @@ -0,0 +1,1094 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/job_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './job_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Job Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class JobServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + jobServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of JobServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new JobServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof JobServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listJobs: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'jobs') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.JobService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.jobServiceStub) { + return this.jobServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.JobService. + this.jobServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.JobService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.JobService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const jobServiceStubMethods = + ['cancelJob', 'getJob', 'insertJob', 'deleteJob', 'listJobs', 'getQueryResults', 'query']; + for (const methodName of jobServiceStubMethods) { + const callPromise = this.jobServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.jobServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + 'https://www.googleapis.com/auth/devstorage.full_control', + 'https://www.googleapis.com/auth/devstorage.read_only', + 'https://www.googleapis.com/auth/devstorage.read_write' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Requests that a job be cancelled. This call will return immediately, and + * the client will need to poll for the job status to see if the cancel + * completed successfully. Cancelled jobs may still incur costs. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the job to cancel + * @param {string} request.jobId + * Required. Job ID of the job to cancel + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.JobCancelResponse|JobCancelResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.cancel_job.js + * region_tag:bigquery_v2_generated_JobService_CancelJob_async + */ + cancelJob( + request?: protos.google.cloud.bigquery.v2.ICancelJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|undefined, {}|undefined + ]>; + cancelJob( + request: protos.google.cloud.bigquery.v2.ICancelJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request: protos.google.cloud.bigquery.v2.ICancelJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): void; + cancelJob( + request?: protos.google.cloud.bigquery.v2.ICancelJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJobCancelResponse, + protos.google.cloud.bigquery.v2.ICancelJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.cancelJob(request, options, callback); + } +/** + * Returns information about a specific job. Job information is available for + * a six month period after creation. Requires that you're the person who ran + * the job, or have the Is Owner project role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested job. + * @param {string} request.jobId + * Required. Job ID of the requested job. + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Job|Job}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.get_job.js + * region_tag:bigquery_v2_generated_JobService_GetJob_async + */ + getJob( + request?: protos.google.cloud.bigquery.v2.IGetJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|undefined, {}|undefined + ]>; + getJob( + request: protos.google.cloud.bigquery.v2.IGetJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request: protos.google.cloud.bigquery.v2.IGetJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): void; + getJob( + request?: protos.google.cloud.bigquery.v2.IGetJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IGetJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getJob(request, options, callback); + } +/** + * Starts a new asynchronous job. + * + * This API has two different kinds of endpoint URIs, as this method supports + * a variety of use cases. + * + * * The *Metadata* URI is used for most interactions, as it accepts the job + * configuration directly. + * * The *Upload* URI is ONLY for the case when you're sending both a load job + * configuration and a data stream together. In this case, the Upload URI + * accepts the job configuration and the data as two distinct multipart MIME + * parts. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of project that will be billed for the job. + * @param {google.cloud.bigquery.v2.Job} request.job + * Jobs resource to insert. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Job|Job}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.insert_job.js + * region_tag:bigquery_v2_generated_JobService_InsertJob_async + */ + insertJob( + request?: protos.google.cloud.bigquery.v2.IInsertJobRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|undefined, {}|undefined + ]>; + insertJob( + request: protos.google.cloud.bigquery.v2.IInsertJobRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): void; + insertJob( + request: protos.google.cloud.bigquery.v2.IInsertJobRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): void; + insertJob( + request?: protos.google.cloud.bigquery.v2.IInsertJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IJob, + protos.google.cloud.bigquery.v2.IInsertJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertJob(request, options, callback); + } +/** + * Requests the deletion of the metadata of a job. This call returns when the + * job's metadata is deleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the job for which metadata is to be deleted. + * @param {string} request.jobId + * Required. Job ID of the job for which metadata is to be deleted. If this is + * a parent job which has child jobs, the metadata from all child jobs will be + * deleted as well. Direct deletion of the metadata of child jobs is not + * allowed. + * @param {string} request.location + * The geographic location of the job. Required. + * See details at: + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.delete_job.js + * region_tag:bigquery_v2_generated_JobService_DeleteJob_async + */ + deleteJob( + request?: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|undefined, {}|undefined + ]>; + deleteJob( + request: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): void; + deleteJob( + request?: protos.google.cloud.bigquery.v2.IDeleteJobRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteJobRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteJob(request, options, callback); + } +/** + * RPC to get the results of a query job. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the query job. + * @param {string} request.jobId + * Required. Job ID of the query job. + * @param {google.protobuf.UInt64Value} request.startIndex + * Zero-based index of the starting row. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.protobuf.UInt32Value} request.maxResults + * Maximum number of results to read. + * @param {google.protobuf.UInt32Value} request.timeoutMs + * Optional: Specifies the maximum amount of time, in milliseconds, that the + * client is willing to wait for the query to complete. By default, this limit + * is 10 seconds (10,000 milliseconds). If the query is complete, the + * jobComplete field in the response is true. If the query has not yet + * completed, jobComplete is false. + * + * You can request a longer timeout period in the timeoutMs field. However, + * the call is not guaranteed to wait for the specified timeout; it typically + * returns after around 200 seconds (200,000 milliseconds), even if the query + * is not complete. + * + * If jobComplete is false, you can continue to wait for the query to complete + * by calling the getQueryResults method until the jobComplete field in the + * getQueryResults response is true. + * @param {string} request.location + * The geographic location of the job. You must specify the location to run + * the job for the following scenarios: + * + * * If the location to run a job is not in the `us` or + * the `eu` multi-regional location + * * If the job's location is in a single region (for example, + * `us-central1`) + * + * For more information, see + * https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + * @param {google.cloud.bigquery.v2.DataFormatOptions} [request.formatOptions] + * Optional. Output format adjustments. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.GetQueryResultsResponse|GetQueryResultsResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.get_query_results.js + * region_tag:bigquery_v2_generated_JobService_GetQueryResults_async + */ + getQueryResults( + request?: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|undefined, {}|undefined + ]>; + getQueryResults( + request: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): void; + getQueryResults( + request: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): void; + getQueryResults( + request?: protos.google.cloud.bigquery.v2.IGetQueryResultsRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IGetQueryResultsResponse, + protos.google.cloud.bigquery.v2.IGetQueryResultsRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'job_id': request.jobId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getQueryResults(request, options, callback); + } +/** + * Runs a BigQuery SQL query synchronously and returns query results if the + * query completes within a specified timeout. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the query request. + * @param {google.cloud.bigquery.v2.QueryRequest} request.queryRequest + * The query request body. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.QueryResponse|QueryResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.query.js + * region_tag:bigquery_v2_generated_JobService_Query_async + */ + query( + request?: protos.google.cloud.bigquery.v2.IPostQueryRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|undefined, {}|undefined + ]>; + query( + request: protos.google.cloud.bigquery.v2.IPostQueryRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): void; + query( + request: protos.google.cloud.bigquery.v2.IPostQueryRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): void; + query( + request?: protos.google.cloud.bigquery.v2.IPostQueryRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IQueryResponse, + protos.google.cloud.bigquery.v2.IPostQueryRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.query(request, options, callback); + } + + /** + * Lists all jobs that you started in the specified project. Job information + * is available for a six month period after creation. The job list is sorted + * in reverse chronological order, by job creation time. Requires the Can View + * project role, or the Is Owner project role if you set the allUsers + * property. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listJobs( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatJob[], + protos.google.cloud.bigquery.v2.IListJobsRequest|null, + protos.google.cloud.bigquery.v2.IJobList + ]>; + listJobs( + request: protos.google.cloud.bigquery.v2.IListJobsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): void; + listJobs( + request: protos.google.cloud.bigquery.v2.IListJobsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): void; + listJobs( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListJobsRequest, + protos.google.cloud.bigquery.v2.IJobList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatJob>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatJob[], + protos.google.cloud.bigquery.v2.IListJobsRequest|null, + protos.google.cloud.bigquery.v2.IJobList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listJobs(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listJobsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listJobsStream( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.createStream( + this.innerApiCalls.listJobs as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listJobs`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Project ID of the jobs to list. + * @param {boolean} request.allUsers + * Whether to display jobs owned by all users in the project. Default False. + * @param {google.protobuf.Int32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {number} request.minCreationTime + * Min value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created after or at this timestamp are returned. + * @param {google.protobuf.UInt64Value} request.maxCreationTime + * Max value for job creation time, in milliseconds since the POSIX epoch. + * If set, only jobs created before or at this timestamp are returned. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {google.cloud.bigquery.v2.ListJobsRequest.Projection} request.projection + * Restrict information returned to a set of selected fields + * @param {number[]} request.stateFilter + * Filter for job state + * @param {string} request.parentJobId + * If set, show only child jobs of the specified parent. Otherwise, show all + * top-level jobs. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatJob|ListFormatJob}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/job_service.list_jobs.js + * region_tag:bigquery_v2_generated_JobService_ListJobs_async + */ + listJobsAsync( + request?: protos.google.cloud.bigquery.v2.IListJobsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + const defaultCallSettings = this._defaults['listJobs']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listJobs.asyncIterate( + this.innerApiCalls['listJobs'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.jobServiceStub && !this._terminated) { + return this.jobServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/job_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/job_service_client_config.json.baseline new file mode 100644 index 000000000..7b1d7cc93 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/job_service_client_config.json.baseline @@ -0,0 +1,54 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.JobService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CancelJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteJob": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListJobs": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetQueryResults": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "Query": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/job_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/job_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/job_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/model_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/model_service_client.ts.baseline new file mode 100644 index 000000000..dded7a50a --- /dev/null +++ b/baselines/bigquery-v2/src/v2/model_service_client.ts.baseline @@ -0,0 +1,769 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/model_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './model_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Model Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class ModelServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + modelServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of ModelServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new ModelServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof ModelServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listModels: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'models') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.ModelService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.modelServiceStub) { + return this.modelServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.ModelService. + this.modelServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.ModelService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.ModelService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const modelServiceStubMethods = + ['getModel', 'listModels', 'patchModel', 'deleteModel']; + for (const methodName of modelServiceStubMethods) { + const callPromise = this.modelServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.modelServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified model resource by model ID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested model. + * @param {string} request.datasetId + * Required. Dataset ID of the requested model. + * @param {string} request.modelId + * Required. Model ID of the requested model. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Model|Model}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.get_model.js + * region_tag:bigquery_v2_generated_ModelService_GetModel_async + */ + getModel( + request?: protos.google.cloud.bigquery.v2.IGetModelRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|undefined, {}|undefined + ]>; + getModel( + request: protos.google.cloud.bigquery.v2.IGetModelRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): void; + getModel( + request: protos.google.cloud.bigquery.v2.IGetModelRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): void; + getModel( + request?: protos.google.cloud.bigquery.v2.IGetModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IGetModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getModel(request, options, callback); + } +/** + * Patch specific fields in the specified model. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the model to patch. + * @param {string} request.datasetId + * Required. Dataset ID of the model to patch. + * @param {string} request.modelId + * Required. Model ID of the model to patch. + * @param {google.cloud.bigquery.v2.Model} request.model + * Required. Patched model. + * Follows RFC5789 patch semantics. Missing fields are not updated. + * To clear a field, explicitly set to default value. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Model|Model}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.patch_model.js + * region_tag:bigquery_v2_generated_ModelService_PatchModel_async + */ + patchModel( + request?: protos.google.cloud.bigquery.v2.IPatchModelRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|undefined, {}|undefined + ]>; + patchModel( + request: protos.google.cloud.bigquery.v2.IPatchModelRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): void; + patchModel( + request: protos.google.cloud.bigquery.v2.IPatchModelRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): void; + patchModel( + request?: protos.google.cloud.bigquery.v2.IPatchModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel, + protos.google.cloud.bigquery.v2.IPatchModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchModel(request, options, callback); + } +/** + * Deletes the model specified by modelId from the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the model to delete. + * @param {string} request.datasetId + * Required. Dataset ID of the model to delete. + * @param {string} request.modelId + * Required. Model ID of the model to delete. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.delete_model.js + * region_tag:bigquery_v2_generated_ModelService_DeleteModel_async + */ + deleteModel( + request?: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|undefined, {}|undefined + ]>; + deleteModel( + request: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): void; + deleteModel( + request: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): void; + deleteModel( + request?: protos.google.cloud.bigquery.v2.IDeleteModelRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteModelRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'model_id': request.modelId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteModel(request, options, callback); + } + + /** + * Lists all models in the specified dataset. Requires the READER dataset + * role. After retrieving the list of models, you can get information about a + * particular model by calling the models.get method. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.Model|Model}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listModelsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listModels( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IModel[], + protos.google.cloud.bigquery.v2.IListModelsRequest|null, + protos.google.cloud.bigquery.v2.IListModelsResponse + ]>; + listModels( + request: protos.google.cloud.bigquery.v2.IListModelsRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): void; + listModels( + request: protos.google.cloud.bigquery.v2.IListModelsRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): void; + listModels( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListModelsRequest, + protos.google.cloud.bigquery.v2.IListModelsResponse|null|undefined, + protos.google.cloud.bigquery.v2.IModel>): + Promise<[ + protos.google.cloud.bigquery.v2.IModel[], + protos.google.cloud.bigquery.v2.IListModelsRequest|null, + protos.google.cloud.bigquery.v2.IListModelsResponse + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listModels(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.Model|Model} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listModelsAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listModelsStream( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listModels']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listModels.createStream( + this.innerApiCalls.listModels as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listModels`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the models to list. + * @param {string} request.datasetId + * Required. Dataset ID of the models to list. + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.Model|Model}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/model_service.list_models.js + * region_tag:bigquery_v2_generated_ModelService_ListModels_async + */ + listModelsAsync( + request?: protos.google.cloud.bigquery.v2.IListModelsRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listModels']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listModels.asyncIterate( + this.innerApiCalls['listModels'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.modelServiceStub && !this._terminated) { + return this.modelServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/model_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/model_service_client_config.json.baseline new file mode 100644 index 000000000..76a66c9cb --- /dev/null +++ b/baselines/bigquery-v2/src/v2/model_service_client_config.json.baseline @@ -0,0 +1,42 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.ModelService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListModels": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteModel": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/model_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/model_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/model_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/project_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/project_service_client.ts.baseline new file mode 100644 index 000000000..848fabb96 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/project_service_client.ts.baseline @@ -0,0 +1,407 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions} from 'google-gax'; + +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/project_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './project_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Project Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class ProjectServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + projectServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of ProjectServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new ProjectServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof ProjectServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.ProjectService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.projectServiceStub) { + return this.projectServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.ProjectService. + this.projectServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.ProjectService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.ProjectService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const projectServiceStubMethods = + ['getServiceAccount']; + for (const methodName of projectServiceStubMethods) { + const callPromise = this.projectServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.projectServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * RPC to get the service account for a project used for interactions with + * Google Cloud KMS + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. ID of the project. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.GetServiceAccountResponse|GetServiceAccountResponse}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/project_service.get_service_account.js + * region_tag:bigquery_v2_generated_ProjectService_GetServiceAccount_async + */ + getServiceAccount( + request?: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|undefined, {}|undefined + ]>; + getServiceAccount( + request: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): void; + getServiceAccount( + request: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): void; + getServiceAccount( + request?: protos.google.cloud.bigquery.v2.IGetServiceAccountRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IGetServiceAccountResponse, + protos.google.cloud.bigquery.v2.IGetServiceAccountRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getServiceAccount(request, options, callback); + } + + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.projectServiceStub && !this._terminated) { + return this.projectServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/project_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/project_service_client_config.json.baseline new file mode 100644 index 000000000..043d875e7 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/project_service_client_config.json.baseline @@ -0,0 +1,30 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.ProjectService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetServiceAccount": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/project_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/project_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/project_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/routine_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/routine_service_client.ts.baseline new file mode 100644 index 000000000..0dcc36159 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/routine_service_client.ts.baseline @@ -0,0 +1,931 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/routine_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './routine_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Routine Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class RoutineServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + routineServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of RoutineServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new RoutineServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof RoutineServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listRoutines: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'routines') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.RoutineService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.routineServiceStub) { + return this.routineServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.RoutineService. + this.routineServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.RoutineService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.RoutineService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const routineServiceStubMethods = + ['getRoutine', 'insertRoutine', 'updateRoutine', 'patchRoutine', 'deleteRoutine', 'listRoutines']; + for (const methodName of routineServiceStubMethods) { + const callPromise = this.routineServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.routineServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified routine resource by routine ID. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested routine + * @param {string} request.datasetId + * Required. Dataset ID of the requested routine + * @param {string} request.routineId + * Required. Routine ID of the requested routine + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.get_routine.js + * region_tag:bigquery_v2_generated_RoutineService_GetRoutine_async + */ + getRoutine( + request?: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|undefined, {}|undefined + ]>; + getRoutine( + request: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): void; + getRoutine( + request: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): void; + getRoutine( + request?: protos.google.cloud.bigquery.v2.IGetRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IGetRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getRoutine(request, options, callback); + } +/** + * Creates a new routine in the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new routine + * @param {string} request.datasetId + * Required. Dataset ID of the new routine + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource to insert + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.insert_routine.js + * region_tag:bigquery_v2_generated_RoutineService_InsertRoutine_async + */ + insertRoutine( + request?: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|undefined, {}|undefined + ]>; + insertRoutine( + request: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): void; + insertRoutine( + request: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): void; + insertRoutine( + request?: protos.google.cloud.bigquery.v2.IInsertRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IInsertRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertRoutine(request, options, callback); + } +/** + * Updates information in an existing routine. The update method replaces the + * entire Routine resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to update + * @param {string} request.datasetId + * Required. Dataset ID of the routine to update + * @param {string} request.routineId + * Required. Routine ID of the routine to update + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource which will replace the specified routine + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.update_routine.js + * region_tag:bigquery_v2_generated_RoutineService_UpdateRoutine_async + */ + updateRoutine( + request?: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|undefined, {}|undefined + ]>; + updateRoutine( + request: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): void; + updateRoutine( + request: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): void; + updateRoutine( + request?: protos.google.cloud.bigquery.v2.IUpdateRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IUpdateRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateRoutine(request, options, callback); + } +/** + * Patches information in an existing routine. The patch method does a partial + * update to an existing Routine resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to update + * @param {string} request.datasetId + * Required. Dataset ID of the routine to update + * @param {string} request.routineId + * Required. Routine ID of the routine to update + * @param {google.cloud.bigquery.v2.Routine} request.routine + * Required. A routine resource which will be used to partially + * update the specified routine + * @param {google.protobuf.FieldMask} request.fieldMask + * Only the Routine fields in the field mask are updated + * by the given routine. Repeated routine fields will be fully replaced + * if contained in the field mask. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.patch_routine.js + * region_tag:bigquery_v2_generated_RoutineService_PatchRoutine_async + */ + patchRoutine( + request?: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|undefined, {}|undefined + ]>; + patchRoutine( + request: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): void; + patchRoutine( + request: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): void; + patchRoutine( + request?: protos.google.cloud.bigquery.v2.IPatchRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine, + protos.google.cloud.bigquery.v2.IPatchRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + this.initialize(); + return this.innerApiCalls.patchRoutine(request, options, callback); + } +/** + * Deletes the routine specified by routineId from the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routine to delete + * @param {string} request.datasetId + * Required. Dataset ID of the routine to delete + * @param {string} request.routineId + * Required. Routine ID of the routine to delete + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.delete_routine.js + * region_tag:bigquery_v2_generated_RoutineService_DeleteRoutine_async + */ + deleteRoutine( + request?: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|undefined, {}|undefined + ]>; + deleteRoutine( + request: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): void; + deleteRoutine( + request: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): void; + deleteRoutine( + request?: protos.google.cloud.bigquery.v2.IDeleteRoutineRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteRoutineRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'routine_id': request.routineId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteRoutine(request, options, callback); + } + + /** + * Lists all routines in the specified dataset. Requires the READER dataset + * role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.Routine|Routine}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listRoutinesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRoutines( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine[], + protos.google.cloud.bigquery.v2.IListRoutinesRequest|null, + protos.google.cloud.bigquery.v2.IListRoutinesResponse + ]>; + listRoutines( + request: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): void; + listRoutines( + request: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): void; + listRoutines( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRoutinesRequest, + protos.google.cloud.bigquery.v2.IListRoutinesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRoutine>): + Promise<[ + protos.google.cloud.bigquery.v2.IRoutine[], + protos.google.cloud.bigquery.v2.IListRoutinesRequest|null, + protos.google.cloud.bigquery.v2.IListRoutinesResponse + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listRoutines(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.Routine|Routine} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listRoutinesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRoutinesStream( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listRoutines']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRoutines.createStream( + this.innerApiCalls.listRoutines as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listRoutines`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the routines to list + * @param {string} request.datasetId + * Required. Dataset ID of the routines to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {string} request.filter + * If set, then only the Routines matching this filter are returned. + * The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + * is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.Routine|Routine}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/routine_service.list_routines.js + * region_tag:bigquery_v2_generated_RoutineService_ListRoutines_async + */ + listRoutinesAsync( + request?: protos.google.cloud.bigquery.v2.IListRoutinesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listRoutines']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRoutines.asyncIterate( + this.innerApiCalls['listRoutines'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.routineServiceStub && !this._terminated) { + return this.routineServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/routine_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/routine_service_client_config.json.baseline new file mode 100644 index 000000000..7d0cea2ba --- /dev/null +++ b/baselines/bigquery-v2/src/v2/routine_service_client_config.json.baseline @@ -0,0 +1,50 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.RoutineService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteRoutine": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListRoutines": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/routine_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/routine_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/routine_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/row_access_policy_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/row_access_policy_service_client.ts.baseline new file mode 100644 index 000000000..24c1caadd --- /dev/null +++ b/baselines/bigquery-v2/src/v2/row_access_policy_service_client.ts.baseline @@ -0,0 +1,536 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/row_access_policy_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './row_access_policy_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * Service for interacting with row access policies. + * @class + * @memberof v2 + */ +export class RowAccessPolicyServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + rowAccessPolicyServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of RowAccessPolicyServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new RowAccessPolicyServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof RowAccessPolicyServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listRowAccessPolicies: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'rowAccessPolicies') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.RowAccessPolicyService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.rowAccessPolicyServiceStub) { + return this.rowAccessPolicyServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.RowAccessPolicyService. + this.rowAccessPolicyServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.RowAccessPolicyService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.RowAccessPolicyService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const rowAccessPolicyServiceStubMethods = + ['listRowAccessPolicies']; + for (const methodName of rowAccessPolicyServiceStubMethods) { + const callPromise = this.rowAccessPolicyServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.rowAccessPolicyServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- + + /** + * Lists all row access policies on the specified table. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listRowAccessPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRowAccessPolicies( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IRowAccessPolicy[], + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest|null, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse + ]>; + listRowAccessPolicies( + request: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): void; + listRowAccessPolicies( + request: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): void; + listRowAccessPolicies( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse|null|undefined, + protos.google.cloud.bigquery.v2.IRowAccessPolicy>): + Promise<[ + protos.google.cloud.bigquery.v2.IRowAccessPolicy[], + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest|null, + protos.google.cloud.bigquery.v2.IListRowAccessPoliciesResponse + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listRowAccessPolicies(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listRowAccessPoliciesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listRowAccessPoliciesStream( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + const defaultCallSettings = this._defaults['listRowAccessPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRowAccessPolicies.createStream( + this.innerApiCalls.listRowAccessPolicies as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listRowAccessPolicies`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the row access policies to list. + * @param {string} request.datasetId + * Required. Dataset ID of row access policies to list. + * @param {string} request.tableId + * Required. Table ID of the table to list row access policies. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results. + * @param {number} request.pageSize + * The maximum number of results to return in a single response page. Leverage + * the page tokens to iterate through the entire collection. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.RowAccessPolicy|RowAccessPolicy}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/row_access_policy_service.list_row_access_policies.js + * region_tag:bigquery_v2_generated_RowAccessPolicyService_ListRowAccessPolicies_async + */ + listRowAccessPoliciesAsync( + request?: protos.google.cloud.bigquery.v2.IListRowAccessPoliciesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + const defaultCallSettings = this._defaults['listRowAccessPolicies']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listRowAccessPolicies.asyncIterate( + this.innerApiCalls['listRowAccessPolicies'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.rowAccessPolicyServiceStub && !this._terminated) { + return this.rowAccessPolicyServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/row_access_policy_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/row_access_policy_service_client_config.json.baseline new file mode 100644 index 000000000..6f88ac20f --- /dev/null +++ b/baselines/bigquery-v2/src/v2/row_access_policy_service_client_config.json.baseline @@ -0,0 +1,30 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.RowAccessPolicyService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "ListRowAccessPolicies": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/row_access_policy_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/row_access_policy_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/row_access_policy_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/src/v2/table_service_client.ts.baseline b/baselines/bigquery-v2/src/v2/table_service_client.ts.baseline new file mode 100644 index 000000000..39ae2fd46 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/table_service_client.ts.baseline @@ -0,0 +1,942 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +/* global window */ +import type * as gax from 'google-gax'; +import type {Callback, CallOptions, Descriptors, ClientOptions, PaginationCallback, GaxCall} from 'google-gax'; +import {Transform} from 'stream'; +import * as protos from '../../protos/protos'; +import jsonProtos = require('../../protos/protos.json'); + +/** + * Client JSON configuration object, loaded from + * `src/v2/table_service_client_config.json`. + * This file defines retry strategy and timeouts for all API methods in this library. + */ +import * as gapicConfig from './table_service_client_config.json'; +const version = require('../../../package.json').version; + +/** + * This is an experimental RPC service definition for the BigQuery + * Table Service. + * + * It should not be relied on for production use cases at this time. + * @class + * @memberof v2 + */ +export class TableServiceClient { + private _terminated = false; + private _opts: ClientOptions; + private _providedCustomServicePath: boolean; + private _gaxModule: typeof gax | typeof gax.fallback; + private _gaxGrpc: gax.GrpcClient | gax.fallback.GrpcClient; + private _protos: {}; + private _defaults: {[method: string]: gax.CallSettings}; + private _universeDomain: string; + private _servicePath: string; + auth: gax.GoogleAuth; + descriptors: Descriptors = { + page: {}, + stream: {}, + longrunning: {}, + batching: {}, + }; + warn: (code: string, message: string, warnType?: string) => void; + innerApiCalls: {[name: string]: Function}; + tableServiceStub?: Promise<{[name: string]: Function}>; + + /** + * Construct an instance of TableServiceClient. + * + * @param {object} [options] - The configuration object. + * The options accepted by the constructor are described in detail + * in [this document](https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#creating-the-client-instance). + * The common options are: + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * using a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option below is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {string} [options.apiEndpoint] - The domain name of the + * API remote host. + * @param {gax.ClientConfig} [options.clientConfig] - Client configuration override. + * Follows the structure of {@link gapicConfig}. + * @param {boolean} [options.fallback] - Use HTTP/1.1 REST mode. + * For more information, please check the + * {@link https://github.com/googleapis/gax-nodejs/blob/main/client-libraries.md#http11-rest-api-mode documentation}. + * @param {gax} [gaxInstance]: loaded instance of `google-gax`. Useful if you + * need to avoid loading the default gRPC version and want to use the fallback + * HTTP implementation. Load only fallback version and pass it to the constructor: + * ``` + * const gax = require('google-gax/build/src/fallback'); // avoids loading google-gax with gRPC + * const client = new TableServiceClient({fallback: true}, gax); + * ``` + */ + constructor(opts?: ClientOptions, gaxInstance?: typeof gax | typeof gax.fallback) { + // Ensure that options include all the required fields. + const staticMembers = this.constructor as typeof TableServiceClient; + if (opts?.universe_domain && opts?.universeDomain && opts?.universe_domain !== opts?.universeDomain) { + throw new Error('Please set either universe_domain or universeDomain, but not both.'); + } + const universeDomainEnvVar = (typeof process === 'object' && typeof process.env === 'object') ? process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] : undefined; + this._universeDomain = opts?.universeDomain ?? opts?.universe_domain ?? universeDomainEnvVar ?? 'googleapis.com'; + this._servicePath = 'bigquery.' + this._universeDomain; + const servicePath = opts?.servicePath || opts?.apiEndpoint || this._servicePath; + this._providedCustomServicePath = !!(opts?.servicePath || opts?.apiEndpoint); + const port = opts?.port || staticMembers.port; + const clientConfig = opts?.clientConfig ?? {}; + const fallback = opts?.fallback ?? (typeof window !== 'undefined' && typeof window?.fetch === 'function'); + opts = Object.assign({servicePath, port, clientConfig, fallback}, opts); + + // If scopes are unset in options and we're connecting to a non-default endpoint, set scopes just in case. + if (servicePath !== this._servicePath && !('scopes' in opts)) { + opts['scopes'] = staticMembers.scopes; + } + + // Load google-gax module synchronously if needed + if (!gaxInstance) { + gaxInstance = require('google-gax') as typeof gax; + } + + // Choose either gRPC or proto-over-HTTP implementation of google-gax. + this._gaxModule = opts.fallback ? gaxInstance.fallback : gaxInstance; + + // Create a `gaxGrpc` object, with any grpc-specific options sent to the client. + this._gaxGrpc = new this._gaxModule.GrpcClient(opts); + + // Save options to use in initialize() method. + this._opts = opts; + + // Save the auth object to the client, for use by other methods. + this.auth = (this._gaxGrpc.auth as gax.GoogleAuth); + + // Set useJWTAccessWithScope on the auth object. + this.auth.useJWTAccessWithScope = true; + + // Set defaultServicePath on the auth object. + this.auth.defaultServicePath = this._servicePath; + + // Set the default scopes in auth client if needed. + if (servicePath === this._servicePath) { + this.auth.defaultScopes = staticMembers.scopes; + } + + // Determine the client header string. + const clientHeader = [ + `gax/${this._gaxModule.version}`, + `gapic/${version}`, + ]; + if (typeof process === 'object' && 'versions' in process) { + clientHeader.push(`gl-node/${process.versions.node}`); + } else { + clientHeader.push(`gl-web/${this._gaxModule.version}`); + } + if (!opts.fallback) { + clientHeader.push(`grpc/${this._gaxGrpc.grpcVersion}`); + } else { + clientHeader.push(`rest/${this._gaxGrpc.grpcVersion}`); + } + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + // Load the applicable protos. + this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos); + + // Some of the methods on this service return "paged" results, + // (e.g. 50 results at a time, with tokens to get subsequent + // pages). Denote the keys used for pagination and results. + this.descriptors.page = { + listTables: + new this._gaxModule.PageDescriptor('pageToken', 'nextPageToken', 'tables') + }; + + // Put together the default options sent with requests. + this._defaults = this._gaxGrpc.constructSettings( + 'google.cloud.bigquery.v2.TableService', gapicConfig as gax.ClientConfig, + opts.clientConfig || {}, {'x-goog-api-client': clientHeader.join(' ')}); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this.innerApiCalls = {}; + + // Add a warn function to the client constructor so it can be easily tested. + this.warn = this._gaxModule.warn; + } + + /** + * Initialize the client. + * Performs asynchronous operations (such as authentication) and prepares the client. + * This function will be called automatically when any class method is called for the + * first time, but if you need to initialize it before calling an actual method, + * feel free to call initialize() directly. + * + * You can await on this method if you want to make sure the client is initialized. + * + * @returns {Promise} A promise that resolves to an authenticated service stub. + */ + initialize() { + // If the client stub promise is already initialized, return immediately. + if (this.tableServiceStub) { + return this.tableServiceStub; + } + + // Put together the "service stub" for + // google.cloud.bigquery.v2.TableService. + this.tableServiceStub = this._gaxGrpc.createStub( + this._opts.fallback ? + (this._protos as protobuf.Root).lookupService('google.cloud.bigquery.v2.TableService') : + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (this._protos as any).google.cloud.bigquery.v2.TableService, + this._opts, this._providedCustomServicePath) as Promise<{[method: string]: Function}>; + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + const tableServiceStubMethods = + ['getTable', 'insertTable', 'patchTable', 'updateTable', 'deleteTable', 'listTables']; + for (const methodName of tableServiceStubMethods) { + const callPromise = this.tableServiceStub.then( + stub => (...args: Array<{}>) => { + if (this._terminated) { + return Promise.reject('The client has already been closed.'); + } + const func = stub[methodName]; + return func.apply(stub, args); + }, + (err: Error|null|undefined) => () => { + throw err; + }); + + const descriptor = + this.descriptors.page[methodName] || + undefined; + const apiCall = this._gaxModule.createApiCall( + callPromise, + this._defaults[methodName], + descriptor, + this._opts.fallback + ); + + this.innerApiCalls[methodName] = apiCall; + } + + return this.tableServiceStub; + } + + /** + * The DNS address for this API service. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get servicePath() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static servicePath is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service - same as servicePath. + * @deprecated Use the apiEndpoint method of the client instance. + * @returns {string} The DNS address for this service. + */ + static get apiEndpoint() { + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + process.emitWarning('Static apiEndpoint is deprecated, please use the instance method instead.', 'DeprecationWarning'); + } + return 'bigquery.googleapis.com'; + } + + /** + * The DNS address for this API service. + * @returns {string} The DNS address for this service. + */ + get apiEndpoint() { + return this._servicePath; + } + + get universeDomain() { + return this._universeDomain; + } + + /** + * The port for this API service. + * @returns {number} The default port for this service. + */ + static get port() { + return 443; + } + + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + * @returns {string[]} List of default scopes. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only' + ]; + } + + getProjectId(): Promise; + getProjectId(callback: Callback): void; + /** + * Return the project ID used by this class. + * @returns {Promise} A promise that resolves to string containing the project ID. + */ + getProjectId(callback?: Callback): + Promise|void { + if (callback) { + this.auth.getProjectId(callback); + return; + } + return this.auth.getProjectId(); + } + + // ------------------- + // -- Service calls -- + // ------------------- +/** + * Gets the specified table resource by table ID. + * This method does not return the data in the table, it only returns the + * table resource, which describes the structure of this table. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the requested table + * @param {string} request.datasetId + * Required. Dataset ID of the requested table + * @param {string} request.tableId + * Required. Table ID of the requested table + * @param {string} request.selectedFields + * List of table schema fields to return (comma-separated). + * If unspecified, all fields are returned. + * A fieldMask cannot be used here because the fields will automatically be + * converted from camelCase to snake_case and the conversion will fail if + * there are underscores. Since these are fields in BigQuery table schemas, + * underscores are allowed. + * @param {google.cloud.bigquery.v2.GetTableRequest.TableMetadataView} [request.view] + * Optional. Specifies the view that determines which table information is + * returned. By default, basic table information and storage statistics + * (STORAGE_STATS) are returned. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.get_table.js + * region_tag:bigquery_v2_generated_TableService_GetTable_async + */ + getTable( + request?: protos.google.cloud.bigquery.v2.IGetTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|undefined, {}|undefined + ]>; + getTable( + request: protos.google.cloud.bigquery.v2.IGetTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): void; + getTable( + request: protos.google.cloud.bigquery.v2.IGetTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): void; + getTable( + request?: protos.google.cloud.bigquery.v2.IGetTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IGetTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.getTable(request, options, callback); + } +/** + * Creates a new, empty table in the dataset. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the new table + * @param {string} request.datasetId + * Required. Dataset ID of the new table + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource to insert + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.insert_table.js + * region_tag:bigquery_v2_generated_TableService_InsertTable_async + */ + insertTable( + request?: protos.google.cloud.bigquery.v2.IInsertTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|undefined, {}|undefined + ]>; + insertTable( + request: protos.google.cloud.bigquery.v2.IInsertTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): void; + insertTable( + request: protos.google.cloud.bigquery.v2.IInsertTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): void; + insertTable( + request?: protos.google.cloud.bigquery.v2.IInsertTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IInsertTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.insertTable(request, options, callback); + } +/** + * Updates information in an existing table. The update method replaces the + * entire table resource, whereas the patch method only replaces fields that + * are provided in the submitted table resource. + * This method supports RFC5789 patch semantics. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to update + * @param {string} request.datasetId + * Required. Dataset ID of the table to update + * @param {string} request.tableId + * Required. Table ID of the table to update + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource which will replace or patch the specified table + * @param {boolean} [request.autodetectSchema] + * Optional. When true will autodetect schema, else will keep original schema. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.patch_table.js + * region_tag:bigquery_v2_generated_TableService_PatchTable_async + */ + patchTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>; + patchTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + patchTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + patchTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.patchTable(request, options, callback); + } +/** + * Updates information in an existing table. The update method replaces the + * entire Table resource, whereas the patch method only replaces fields that + * are provided in the submitted Table resource. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to update + * @param {string} request.datasetId + * Required. Dataset ID of the table to update + * @param {string} request.tableId + * Required. Table ID of the table to update + * @param {google.cloud.bigquery.v2.Table} request.table + * Required. A tables resource which will replace or patch the specified table + * @param {boolean} [request.autodetectSchema] + * Optional. When true will autodetect schema, else will keep original schema. + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.cloud.bigquery.v2.Table|Table}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.update_table.js + * region_tag:bigquery_v2_generated_TableService_UpdateTable_async + */ + updateTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>; + updateTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + options: CallOptions, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + updateTable( + request: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + callback: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): void; + updateTable( + request?: protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.cloud.bigquery.v2.ITable, + protos.google.cloud.bigquery.v2.IUpdateOrPatchTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.updateTable(request, options, callback); + } +/** + * Deletes the table specified by tableId from the dataset. + * If the table contains data, all the data will be deleted. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the table to delete + * @param {string} request.datasetId + * Required. Dataset ID of the table to delete + * @param {string} request.tableId + * Required. Table ID of the table to delete + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing {@link protos.google.protobuf.Empty|Empty}. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.delete_table.js + * region_tag:bigquery_v2_generated_TableService_DeleteTable_async + */ + deleteTable( + request?: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + options?: CallOptions): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|undefined, {}|undefined + ]>; + deleteTable( + request: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + options: CallOptions, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): void; + deleteTable( + request: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + callback: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): void; + deleteTable( + request?: protos.google.cloud.bigquery.v2.IDeleteTableRequest, + optionsOrCallback?: CallOptions|Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>, + callback?: Callback< + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|null|undefined, + {}|null|undefined>): + Promise<[ + protos.google.protobuf.IEmpty, + protos.google.cloud.bigquery.v2.IDeleteTableRequest|undefined, {}|undefined + ]>|void { + request = request || {}; + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + 'table_id': request.tableId ?? '', + }); + this.initialize(); + return this.innerApiCalls.deleteTable(request, options, callback); + } + + /** + * Lists all tables in the specified dataset. Requires the READER dataset + * role. + * + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value| number } request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is Array of {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable}. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed and will merge results from all the pages into this array. + * Note that it can affect your quota. + * We recommend using `listTablesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listTables( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatTable[], + protos.google.cloud.bigquery.v2.IListTablesRequest|null, + protos.google.cloud.bigquery.v2.ITableList + ]>; + listTables( + request: protos.google.cloud.bigquery.v2.IListTablesRequest, + options: CallOptions, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): void; + listTables( + request: protos.google.cloud.bigquery.v2.IListTablesRequest, + callback: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): void; + listTables( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + optionsOrCallback?: CallOptions|PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>, + callback?: PaginationCallback< + protos.google.cloud.bigquery.v2.IListTablesRequest, + protos.google.cloud.bigquery.v2.ITableList|null|undefined, + protos.google.cloud.bigquery.v2.IListFormatTable>): + Promise<[ + protos.google.cloud.bigquery.v2.IListFormatTable[], + protos.google.cloud.bigquery.v2.IListTablesRequest|null, + protos.google.cloud.bigquery.v2.ITableList + ]>|void { + request = request || {}; + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + let options: CallOptions; + if (typeof optionsOrCallback === 'function' && callback === undefined) { + callback = optionsOrCallback; + options = {}; + } + else { + options = optionsOrCallback as CallOptions; + } + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + this.initialize(); + return this.innerApiCalls.listTables(request, options, callback); + } + +/** + * Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Stream} + * An object stream which emits an object representing {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable} on 'data' event. + * The client library will perform auto-pagination by default: it will call the API as many + * times as needed. Note that it can affect your quota. + * We recommend using `listTablesAsync()` + * method described below for async iteration which you can stop as needed. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + */ + listTablesStream( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + Transform{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listTables']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listTables.createStream( + this.innerApiCalls.listTables as GaxCall, + request, + callSettings + ); + } + +/** + * Equivalent to `listTables`, but returns an iterable object. + * + * `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand. + * @param {Object} request + * The request object that will be sent. + * @param {string} request.projectId + * Required. Project ID of the tables to list + * @param {string} request.datasetId + * Required. Dataset ID of the tables to list + * @param {google.protobuf.UInt32Value} request.maxResults + * The maximum number of results to return in a single response page. + * Leverage the page tokens to iterate through the entire collection. + * @param {string} request.pageToken + * Page token, returned by a previous call, to request the next page of + * results + * @param {object} [options] + * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. + * @returns {Object} + * An iterable Object that allows {@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols | async iteration }. + * When you iterate the returned iterable, each element will be an object representing + * {@link protos.google.cloud.bigquery.v2.ListFormatTable|ListFormatTable}. The API will be called under the hood as needed, once per the page, + * so you can stop the iteration when you don't need more results. + * Please see the {@link https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination | documentation } + * for more details and examples. + * @example include:samples/generated/v2/table_service.list_tables.js + * region_tag:bigquery_v2_generated_TableService_ListTables_async + */ + listTablesAsync( + request?: protos.google.cloud.bigquery.v2.IListTablesRequest, + options?: CallOptions): + AsyncIterable{ + request = request || {}; + options = options || {}; + options.otherArgs = options.otherArgs || {}; + options.otherArgs.headers = options.otherArgs.headers || {}; + options.otherArgs.headers[ + 'x-goog-request-params' + ] = this._gaxModule.routingHeader.fromParams({ + 'project_id': request.projectId ?? '', + 'dataset_id': request.datasetId ?? '', + }); + const defaultCallSettings = this._defaults['listTables']; + const callSettings = defaultCallSettings.merge(options); + this.initialize(); + return this.descriptors.page.listTables.asyncIterate( + this.innerApiCalls['listTables'] as GaxCall, + request as {}, + callSettings + ) as AsyncIterable; + } + + /** + * Terminate the gRPC channel and close the client. + * + * The client will no longer be usable and all future behavior is undefined. + * @returns {Promise} A promise that resolves when the client is closed. + */ + close(): Promise { + if (this.tableServiceStub && !this._terminated) { + return this.tableServiceStub.then(stub => { + this._terminated = true; + stub.close(); + }); + } + return Promise.resolve(); + } +} diff --git a/baselines/bigquery-v2/src/v2/table_service_client_config.json.baseline b/baselines/bigquery-v2/src/v2/table_service_client_config.json.baseline new file mode 100644 index 000000000..3141de741 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/table_service_client_config.json.baseline @@ -0,0 +1,50 @@ +{ + "interfaces": { + "google.cloud.bigquery.v2.TableService": { + "retry_codes": { + "non_idempotent": [], + "idempotent": [ + "DEADLINE_EXCEEDED", + "UNAVAILABLE" + ] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "GetTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "InsertTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "PatchTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "UpdateTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DeleteTable": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListTables": { + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/baselines/bigquery-v2/src/v2/table_service_proto_list.json.baseline b/baselines/bigquery-v2/src/v2/table_service_proto_list.json.baseline new file mode 100644 index 000000000..4878b9be1 --- /dev/null +++ b/baselines/bigquery-v2/src/v2/table_service_proto_list.json.baseline @@ -0,0 +1,46 @@ +[ + "../../protos/google/cloud/bigquery/v2/biglake_config.proto", + "../../protos/google/cloud/bigquery/v2/clustering.proto", + "../../protos/google/cloud/bigquery/v2/data_format_options.proto", + "../../protos/google/cloud/bigquery/v2/dataset.proto", + "../../protos/google/cloud/bigquery/v2/dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/decimal_target_types.proto", + "../../protos/google/cloud/bigquery/v2/encryption_config.proto", + "../../protos/google/cloud/bigquery/v2/error.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto", + "../../protos/google/cloud/bigquery/v2/external_catalog_table_options.proto", + "../../protos/google/cloud/bigquery/v2/external_data_config.proto", + "../../protos/google/cloud/bigquery/v2/external_dataset_reference.proto", + "../../protos/google/cloud/bigquery/v2/file_set_specification_type.proto", + "../../protos/google/cloud/bigquery/v2/hive_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/job.proto", + "../../protos/google/cloud/bigquery/v2/job_config.proto", + "../../protos/google/cloud/bigquery/v2/job_creation_reason.proto", + "../../protos/google/cloud/bigquery/v2/job_reference.proto", + "../../protos/google/cloud/bigquery/v2/job_stats.proto", + "../../protos/google/cloud/bigquery/v2/job_status.proto", + "../../protos/google/cloud/bigquery/v2/json_extension.proto", + "../../protos/google/cloud/bigquery/v2/location_metadata.proto", + "../../protos/google/cloud/bigquery/v2/map_target_type.proto", + "../../protos/google/cloud/bigquery/v2/model.proto", + "../../protos/google/cloud/bigquery/v2/model_reference.proto", + "../../protos/google/cloud/bigquery/v2/partitioning_definition.proto", + "../../protos/google/cloud/bigquery/v2/privacy_policy.proto", + "../../protos/google/cloud/bigquery/v2/project.proto", + "../../protos/google/cloud/bigquery/v2/query_parameter.proto", + "../../protos/google/cloud/bigquery/v2/range_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/restriction_config.proto", + "../../protos/google/cloud/bigquery/v2/routine.proto", + "../../protos/google/cloud/bigquery/v2/routine_reference.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy.proto", + "../../protos/google/cloud/bigquery/v2/row_access_policy_reference.proto", + "../../protos/google/cloud/bigquery/v2/session_info.proto", + "../../protos/google/cloud/bigquery/v2/standard_sql.proto", + "../../protos/google/cloud/bigquery/v2/system_variable.proto", + "../../protos/google/cloud/bigquery/v2/table.proto", + "../../protos/google/cloud/bigquery/v2/table_constraints.proto", + "../../protos/google/cloud/bigquery/v2/table_reference.proto", + "../../protos/google/cloud/bigquery/v2/table_schema.proto", + "../../protos/google/cloud/bigquery/v2/time_partitioning.proto", + "../../protos/google/cloud/bigquery/v2/udf_resource.proto" +] diff --git a/baselines/bigquery-v2/system-test/fixtures/sample/src/index.js.baseline b/baselines/bigquery-v2/system-test/fixtures/sample/src/index.js.baseline new file mode 100644 index 000000000..5ce0cf564 --- /dev/null +++ b/baselines/bigquery-v2/system-test/fixtures/sample/src/index.js.baseline @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + + +/* eslint-disable node/no-missing-require, no-unused-vars */ +const bigquery = require('bigquery'); + +function main() { + const datasetServiceClient = new bigquery.DatasetServiceClient(); + const jobServiceClient = new bigquery.JobServiceClient(); + const modelServiceClient = new bigquery.ModelServiceClient(); + const projectServiceClient = new bigquery.ProjectServiceClient(); + const routineServiceClient = new bigquery.RoutineServiceClient(); + const rowAccessPolicyServiceClient = new bigquery.RowAccessPolicyServiceClient(); + const tableServiceClient = new bigquery.TableServiceClient(); +} + +main(); diff --git a/baselines/bigquery-v2/system-test/fixtures/sample/src/index.ts.baseline b/baselines/bigquery-v2/system-test/fixtures/sample/src/index.ts.baseline new file mode 100644 index 000000000..ccfb6fd38 --- /dev/null +++ b/baselines/bigquery-v2/system-test/fixtures/sample/src/index.ts.baseline @@ -0,0 +1,68 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {DatasetServiceClient, JobServiceClient, ModelServiceClient, ProjectServiceClient, RoutineServiceClient, RowAccessPolicyServiceClient, TableServiceClient} from 'bigquery'; + +// check that the client class type name can be used +function doStuffWithDatasetServiceClient(client: DatasetServiceClient) { + client.close(); +} +function doStuffWithJobServiceClient(client: JobServiceClient) { + client.close(); +} +function doStuffWithModelServiceClient(client: ModelServiceClient) { + client.close(); +} +function doStuffWithProjectServiceClient(client: ProjectServiceClient) { + client.close(); +} +function doStuffWithRoutineServiceClient(client: RoutineServiceClient) { + client.close(); +} +function doStuffWithRowAccessPolicyServiceClient(client: RowAccessPolicyServiceClient) { + client.close(); +} +function doStuffWithTableServiceClient(client: TableServiceClient) { + client.close(); +} + +function main() { + // check that the client instance can be created + const datasetServiceClient = new DatasetServiceClient(); + doStuffWithDatasetServiceClient(datasetServiceClient); + // check that the client instance can be created + const jobServiceClient = new JobServiceClient(); + doStuffWithJobServiceClient(jobServiceClient); + // check that the client instance can be created + const modelServiceClient = new ModelServiceClient(); + doStuffWithModelServiceClient(modelServiceClient); + // check that the client instance can be created + const projectServiceClient = new ProjectServiceClient(); + doStuffWithProjectServiceClient(projectServiceClient); + // check that the client instance can be created + const routineServiceClient = new RoutineServiceClient(); + doStuffWithRoutineServiceClient(routineServiceClient); + // check that the client instance can be created + const rowAccessPolicyServiceClient = new RowAccessPolicyServiceClient(); + doStuffWithRowAccessPolicyServiceClient(rowAccessPolicyServiceClient); + // check that the client instance can be created + const tableServiceClient = new TableServiceClient(); + doStuffWithTableServiceClient(tableServiceClient); +} + +main(); diff --git a/baselines/bigquery-v2/system-test/install.ts.baseline b/baselines/bigquery-v2/system-test/install.ts.baseline new file mode 100644 index 000000000..fd5bfdc71 --- /dev/null +++ b/baselines/bigquery-v2/system-test/install.ts.baseline @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import {packNTest} from 'pack-n-play'; +import {readFileSync} from 'fs'; +import {describe, it} from 'mocha'; + +describe('📦 pack-n-play test', () => { + + it('TypeScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'TypeScript user can use the type definitions', + ts: readFileSync('./system-test/fixtures/sample/src/index.ts').toString() + } + }; + await packNTest(options); + }); + + it('JavaScript code', async function() { + this.timeout(300000); + const options = { + packageDir: process.cwd(), + sample: { + description: 'JavaScript user can use the library', + ts: readFileSync('./system-test/fixtures/sample/src/index.js').toString() + } + }; + await packNTest(options); + }); + +}); diff --git a/baselines/bigquery-v2/test/gapic_dataset_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_dataset_service_v2.ts.baseline new file mode 100644 index 000000000..8af981d79 --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_dataset_service_v2.ts.baseline @@ -0,0 +1,1214 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as datasetserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.DatasetServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = datasetserviceModule.v2.DatasetServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = datasetserviceModule.v2.DatasetServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new datasetserviceModule.v2.DatasetServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new datasetserviceModule.v2.DatasetServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new datasetserviceModule.v2.DatasetServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = datasetserviceModule.v2.DatasetServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.datasetServiceStub, undefined); + await client.initialize(); + assert(client.datasetServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.datasetServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.datasetServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getDataset', () => { + it('invokes getDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.getDataset = stubSimpleCall(expectedResponse); + const [response] = await client.getDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.getDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getDataset(request), expectedError); + }); + }); + + describe('insertDataset', () => { + it('invokes insertDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.insertDataset = stubSimpleCall(expectedResponse); + const [response] = await client.insertDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.insertDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertDataset(request), expectedError); + }); + }); + + describe('patchDataset', () => { + it('invokes patchDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.patchDataset = stubSimpleCall(expectedResponse); + const [response] = await client.patchDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.patchDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchDataset(request), expectedError); + }); + }); + + describe('updateDataset', () => { + it('invokes updateDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.updateDataset = stubSimpleCall(expectedResponse); + const [response] = await client.updateDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.updateDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateDataset(request), expectedError); + }); + }); + + describe('deleteDataset', () => { + it('invokes deleteDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteDataset = stubSimpleCall(expectedResponse); + const [response] = await client.deleteDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteDataset( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteDataset(request), expectedError); + }); + }); + + describe('undeleteDataset', () => { + it('invokes undeleteDataset without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.undeleteDataset = stubSimpleCall(expectedResponse); + const [response] = await client.undeleteDataset(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Dataset() + ); + client.innerApiCalls.undeleteDataset = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.undeleteDataset( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IDataset|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.undeleteDataset = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.undeleteDataset(request), expectedError); + const actualRequest = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.undeleteDataset as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes undeleteDataset with closed client', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UndeleteDatasetRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UndeleteDatasetRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.undeleteDataset(request), expectedError); + }); + }); + + describe('listDatasets', () => { + it('invokes listDatasets without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.innerApiCalls.listDatasets = stubSimpleCall(expectedResponse); + const [response] = await client.listDatasets(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasets without error using callback', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.innerApiCalls.listDatasets = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listDatasets( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatDataset[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasets with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listDatasets = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listDatasets(request), expectedError); + const actualRequest = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listDatasets as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listDatasetsStream without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.descriptors.page.listDatasets.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listDatasetsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatDataset[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatDataset) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listDatasets, request)); + assert( + (client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listDatasetsStream with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listDatasets.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listDatasetsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatDataset[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatDataset) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listDatasets, request)); + assert( + (client.descriptors.page.listDatasets.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listDatasets without error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatDataset()), + ]; + client.descriptors.page.listDatasets.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatDataset[] = []; + const iterable = client.listDatasetsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listDatasets with error', async () => { + const client = new datasetserviceModule.v2.DatasetServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListDatasetsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListDatasetsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listDatasets.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listDatasetsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatDataset[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listDatasets.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_job_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_job_service_v2.ts.baseline new file mode 100644 index 000000000..637c98509 --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_job_service_v2.ts.baseline @@ -0,0 +1,1202 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as jobserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.JobServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = jobserviceModule.v2.JobServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = jobserviceModule.v2.JobServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new jobserviceModule.v2.JobServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new jobserviceModule.v2.JobServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new jobserviceModule.v2.JobServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new jobserviceModule.v2.JobServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new jobserviceModule.v2.JobServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = jobserviceModule.v2.JobServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new jobserviceModule.v2.JobServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new jobserviceModule.v2.JobServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobServiceStub, undefined); + await client.initialize(); + assert(client.jobServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.jobServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.jobServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('cancelJob', () => { + it('invokes cancelJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.JobCancelResponse() + ); + client.innerApiCalls.cancelJob = stubSimpleCall(expectedResponse); + const [response] = await client.cancelJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.JobCancelResponse() + ); + client.innerApiCalls.cancelJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.cancelJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJobCancelResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.cancelJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.cancelJob(request), expectedError); + const actualRequest = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.cancelJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes cancelJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.CancelJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.CancelJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.cancelJob(request), expectedError); + }); + }); + + describe('getJob', () => { + it('invokes getJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.getJob = stubSimpleCall(expectedResponse); + const [response] = await client.getJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.getJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getJob(request), expectedError); + const actualRequest = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getJob(request), expectedError); + }); + }); + + describe('insertJob', () => { + it('invokes insertJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.insertJob = stubSimpleCall(expectedResponse); + const [response] = await client.insertJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Job() + ); + client.innerApiCalls.insertJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertJob( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IJob|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertJob(request), expectedError); + const actualRequest = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertJobRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertJob(request), expectedError); + }); + }); + + describe('deleteJob', () => { + it('invokes deleteJob without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCall(expectedResponse); + const [response] = await client.deleteJob(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteJob = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteJob( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteJob = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteJob(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteJob as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteJob with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteJobRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteJobRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteJob(request), expectedError); + }); + }); + + describe('getQueryResults', () => { + it('invokes getQueryResults without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsResponse() + ); + client.innerApiCalls.getQueryResults = stubSimpleCall(expectedResponse); + const [response] = await client.getQueryResults(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsResponse() + ); + client.innerApiCalls.getQueryResults = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getQueryResults( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IGetQueryResultsResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&job_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getQueryResults = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getQueryResults(request), expectedError); + const actualRequest = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getQueryResults as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getQueryResults with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetQueryResultsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetQueryResultsRequest', ['jobId']); + request.jobId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getQueryResults(request), expectedError); + }); + }); + + describe('query', () => { + it('invokes query without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.QueryResponse() + ); + client.innerApiCalls.query = stubSimpleCall(expectedResponse); + const [response] = await client.query(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.QueryResponse() + ); + client.innerApiCalls.query = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.query( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IQueryResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.query = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.query(request), expectedError); + const actualRequest = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.query as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes query with closed client', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PostQueryRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PostQueryRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.query(request), expectedError); + }); + }); + + describe('listJobs', () => { + it('invokes listJobs without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.innerApiCalls.listJobs = stubSimpleCall(expectedResponse); + const [response] = await client.listJobs(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs without error using callback', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.innerApiCalls.listJobs = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listJobs( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatJob[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobs with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listJobs = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listJobs(request), expectedError); + const actualRequest = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listJobs as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listJobsStream without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatJob[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatJob) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listJobsStream with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listJobsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatJob[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatJob) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listJobs, request)); + assert( + (client.descriptors.page.listJobs.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs without error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatJob()), + ]; + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatJob[] = []; + const iterable = client.listJobsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listJobs with error', async () => { + const client = new jobserviceModule.v2.JobServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListJobsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListJobsRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listJobs.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listJobsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatJob[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listJobs.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_model_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_model_service_v2.ts.baseline new file mode 100644 index 000000000..50883a6d0 --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_model_service_v2.ts.baseline @@ -0,0 +1,923 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as modelserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.ModelServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = modelserviceModule.v2.ModelServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = modelserviceModule.v2.ModelServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new modelserviceModule.v2.ModelServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new modelserviceModule.v2.ModelServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new modelserviceModule.v2.ModelServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new modelserviceModule.v2.ModelServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new modelserviceModule.v2.ModelServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = modelserviceModule.v2.ModelServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new modelserviceModule.v2.ModelServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.modelServiceStub, undefined); + await client.initialize(); + assert(client.modelServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.modelServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.modelServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getModel', () => { + it('invokes getModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.getModel = stubSimpleCall(expectedResponse); + const [response] = await client.getModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.getModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getModel( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getModel(request), expectedError); + const actualRequest = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getModel(request), expectedError); + }); + }); + + describe('patchModel', () => { + it('invokes patchModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.patchModel = stubSimpleCall(expectedResponse); + const [response] = await client.patchModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Model() + ); + client.innerApiCalls.patchModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchModel( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchModel(request), expectedError); + const actualRequest = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.PatchModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchModel(request), expectedError); + }); + }); + + describe('deleteModel', () => { + it('invokes deleteModel without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteModel = stubSimpleCall(expectedResponse); + const [response] = await client.deleteModel(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteModel = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteModel( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&model_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteModel = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteModel(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteModel as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteModel with closed client', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteModelRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteModelRequest', ['modelId']); + request.modelId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteModel(request), expectedError); + }); + }); + + describe('listModels', () => { + it('invokes listModels without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.innerApiCalls.listModels = stubSimpleCall(expectedResponse); + const [response] = await client.listModels(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModels without error using callback', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.innerApiCalls.listModels = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listModels( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IModel[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModels with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listModels = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listModels(request), expectedError); + const actualRequest = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listModels as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listModelsStream without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.descriptors.page.listModels.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listModelsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Model[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Model) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listModels, request)); + assert( + (client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listModelsStream with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listModels.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listModelsStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Model[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Model) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listModels, request)); + assert( + (client.descriptors.page.listModels.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listModels without error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Model()), + ]; + client.descriptors.page.listModels.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IModel[] = []; + const iterable = client.listModelsAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listModels with error', async () => { + const client = new modelserviceModule.v2.ModelServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListModelsRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListModelsRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listModels.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listModelsAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IModel[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listModels.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_project_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_project_service_v2.ts.baseline new file mode 100644 index 000000000..dbeb30ecb --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_project_service_v2.ts.baseline @@ -0,0 +1,322 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as projectserviceModule from '../src'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +describe('v2.ProjectServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = projectserviceModule.v2.ProjectServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = projectserviceModule.v2.ProjectServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new projectserviceModule.v2.ProjectServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new projectserviceModule.v2.ProjectServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new projectserviceModule.v2.ProjectServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = projectserviceModule.v2.ProjectServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new projectserviceModule.v2.ProjectServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.projectServiceStub, undefined); + await client.initialize(); + assert(client.projectServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.projectServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.projectServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getServiceAccount', () => { + it('invokes getServiceAccount without error', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountResponse() + ); + client.innerApiCalls.getServiceAccount = stubSimpleCall(expectedResponse); + const [response] = await client.getServiceAccount(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount without error using callback', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountResponse() + ); + client.innerApiCalls.getServiceAccount = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getServiceAccount( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IGetServiceAccountResponse|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount with error', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getServiceAccount = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getServiceAccount(request), expectedError); + const actualRequest = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getServiceAccount as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getServiceAccount with closed client', async () => { + const client = new projectserviceModule.v2.ProjectServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetServiceAccountRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetServiceAccountRequest', ['projectId']); + request.projectId = defaultValue1; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getServiceAccount(request), expectedError); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_routine_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_routine_service_v2.ts.baseline new file mode 100644 index 000000000..852dde576 --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_routine_service_v2.ts.baseline @@ -0,0 +1,1118 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as routineserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.RoutineServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = routineserviceModule.v2.RoutineServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = routineserviceModule.v2.RoutineServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new routineserviceModule.v2.RoutineServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new routineserviceModule.v2.RoutineServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new routineserviceModule.v2.RoutineServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = routineserviceModule.v2.RoutineServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new routineserviceModule.v2.RoutineServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.routineServiceStub, undefined); + await client.initialize(); + assert(client.routineServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.routineServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.routineServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getRoutine', () => { + it('invokes getRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.getRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.getRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.getRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getRoutine(request), expectedError); + }); + }); + + describe('insertRoutine', () => { + it('invokes insertRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.insertRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.insertRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.insertRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertRoutine(request), expectedError); + }); + }); + + describe('updateRoutine', () => { + it('invokes updateRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.updateRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.updateRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.updateRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateRoutine(request), expectedError); + }); + }); + + describe('patchRoutine', () => { + it('invokes patchRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.patchRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.patchRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes patchRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Routine() + ); + client.innerApiCalls.patchRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchRoutine( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + }); + + it('invokes patchRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedError = new Error('expected'); + client.innerApiCalls.patchRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchRoutine(request), expectedError); + }); + + it('invokes patchRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.PatchRoutineRequest() + ); + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchRoutine(request), expectedError); + }); + }); + + describe('deleteRoutine', () => { + it('invokes deleteRoutine without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteRoutine = stubSimpleCall(expectedResponse); + const [response] = await client.deleteRoutine(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteRoutine = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteRoutine( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&routine_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteRoutine = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteRoutine(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteRoutine as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteRoutine with closed client', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteRoutineRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteRoutineRequest', ['routineId']); + request.routineId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteRoutine(request), expectedError); + }); + }); + + describe('listRoutines', () => { + it('invokes listRoutines without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.innerApiCalls.listRoutines = stubSimpleCall(expectedResponse); + const [response] = await client.listRoutines(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutines without error using callback', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.innerApiCalls.listRoutines = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listRoutines( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRoutine[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutines with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listRoutines = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listRoutines(request), expectedError); + const actualRequest = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRoutines as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRoutinesStream without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.descriptors.page.listRoutines.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listRoutinesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Routine[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Routine) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRoutines, request)); + assert( + (client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listRoutinesStream with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRoutines.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listRoutinesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.Routine[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.Routine) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRoutines, request)); + assert( + (client.descriptors.page.listRoutines.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRoutines without error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.Routine()), + ]; + client.descriptors.page.listRoutines.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IRoutine[] = []; + const iterable = client.listRoutinesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRoutines with error', async () => { + const client = new routineserviceModule.v2.RoutineServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRoutinesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRoutinesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRoutines.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listRoutinesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IRoutine[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRoutines.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_row_access_policy_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_row_access_policy_service_v2.ts.baseline new file mode 100644 index 000000000..245260c0e --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_row_access_policy_service_v2.ts.baseline @@ -0,0 +1,548 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as rowaccesspolicyserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.RowAccessPolicyServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.rowAccessPolicyServiceStub, undefined); + await client.initialize(); + assert(client.rowAccessPolicyServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.rowAccessPolicyServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.rowAccessPolicyServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('listRowAccessPolicies', () => { + it('invokes listRowAccessPolicies without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.innerApiCalls.listRowAccessPolicies = stubSimpleCall(expectedResponse); + const [response] = await client.listRowAccessPolicies(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPolicies without error using callback', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.innerApiCalls.listRowAccessPolicies = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listRowAccessPolicies( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IRowAccessPolicy[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPolicies with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listRowAccessPolicies = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listRowAccessPolicies(request), expectedError); + const actualRequest = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listRowAccessPolicies as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listRowAccessPoliciesStream without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.descriptors.page.listRowAccessPolicies.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listRowAccessPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.RowAccessPolicy[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.RowAccessPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRowAccessPolicies, request)); + assert( + (client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listRowAccessPoliciesStream with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRowAccessPolicies.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listRowAccessPoliciesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.RowAccessPolicy[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.RowAccessPolicy) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listRowAccessPolicies, request)); + assert( + (client.descriptors.page.listRowAccessPolicies.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRowAccessPolicies without error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.RowAccessPolicy()), + ]; + client.descriptors.page.listRowAccessPolicies.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IRowAccessPolicy[] = []; + const iterable = client.listRowAccessPoliciesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listRowAccessPolicies with error', async () => { + const client = new rowaccesspolicyserviceModule.v2.RowAccessPolicyServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListRowAccessPoliciesRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listRowAccessPolicies.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listRowAccessPoliciesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IRowAccessPolicy[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listRowAccessPolicies.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/test/gapic_table_service_v2.ts.baseline b/baselines/bigquery-v2/test/gapic_table_service_v2.ts.baseline new file mode 100644 index 000000000..000e8ad5c --- /dev/null +++ b/baselines/bigquery-v2/test/gapic_table_service_v2.ts.baseline @@ -0,0 +1,1175 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +import * as protos from '../protos/protos'; +import * as assert from 'assert'; +import * as sinon from 'sinon'; +import {SinonStub} from 'sinon'; +import {describe, it} from 'mocha'; +import * as tableserviceModule from '../src'; + +import {PassThrough} from 'stream'; + +import {protobuf} from 'google-gax'; + +// Dynamically loaded proto JSON is needed to get the type information +// to fill in default values for request objects +const root = protobuf.Root.fromJSON(require('../protos/protos.json')).resolveAll(); + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +function getTypeDefaultValue(typeName: string, fields: string[]) { + let type = root.lookupType(typeName) as protobuf.Type; + for (const field of fields.slice(0, -1)) { + type = type.fields[field]?.resolvedType as protobuf.Type; + } + return type.fields[fields[fields.length - 1]]?.defaultValue; +} + +function generateSampleMessage(instance: T) { + const filledObject = (instance.constructor as typeof protobuf.Message) + .toObject(instance as protobuf.Message, {defaults: true}); + return (instance.constructor as typeof protobuf.Message).fromObject(filledObject) as T; +} + +function stubSimpleCall(response?: ResponseType, error?: Error) { + return error ? sinon.stub().rejects(error) : sinon.stub().resolves([response]); +} + +function stubSimpleCallWithCallback(response?: ResponseType, error?: Error) { + return error ? sinon.stub().callsArgWith(2, error) : sinon.stub().callsArgWith(2, null, response); +} + +function stubPageStreamingCall(responses?: ResponseType[], error?: Error) { + const pagingStub = sinon.stub(); + if (responses) { + for (let i = 0; i < responses.length; ++i) { + pagingStub.onCall(i).callsArgWith(2, null, responses[i]); + } + } + const transformStub = error ? sinon.stub().callsArgWith(2, error) : pagingStub; + const mockStream = new PassThrough({ + objectMode: true, + transform: transformStub, + }); + // trigger as many responses as needed + if (responses) { + for (let i = 0; i < responses.length; ++i) { + setImmediate(() => { mockStream.write({}); }); + } + setImmediate(() => { mockStream.end(); }); + } else { + setImmediate(() => { mockStream.write({}); }); + setImmediate(() => { mockStream.end(); }); + } + return sinon.stub().returns(mockStream); +} + +function stubAsyncIterationCall(responses?: ResponseType[], error?: Error) { + let counter = 0; + const asyncIterable = { + [Symbol.asyncIterator]() { + return { + async next() { + if (error) { + return Promise.reject(error); + } + if (counter >= responses!.length) { + return Promise.resolve({done: true, value: undefined}); + } + return Promise.resolve({done: false, value: responses![counter++]}); + } + }; + } + }; + return sinon.stub().returns(asyncIterable); +} + +describe('v2.TableServiceClient', () => { + describe('Common methods', () => { + it('has apiEndpoint', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + const apiEndpoint = client.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + }); + + it('has universeDomain', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + const universeDomain = client.universeDomain; + assert.strictEqual(universeDomain, "googleapis.com"); + }); + + if (typeof process === 'object' && typeof process.emitWarning === 'function') { + it('throws DeprecationWarning if static servicePath is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const servicePath = tableserviceModule.v2.TableServiceClient.servicePath; + assert.strictEqual(servicePath, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + + it('throws DeprecationWarning if static apiEndpoint is used', () => { + const stub = sinon.stub(process, 'emitWarning'); + const apiEndpoint = tableserviceModule.v2.TableServiceClient.apiEndpoint; + assert.strictEqual(apiEndpoint, 'bigquery.googleapis.com'); + assert(stub.called); + stub.restore(); + }); + } + it('sets apiEndpoint according to universe domain camelCase', () => { + const client = new tableserviceModule.v2.TableServiceClient({universeDomain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + it('sets apiEndpoint according to universe domain snakeCase', () => { + const client = new tableserviceModule.v2.TableServiceClient({universe_domain: 'example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + }); + + if (typeof process === 'object' && 'env' in process) { + describe('GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variable', () => { + it('sets apiEndpoint from environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new tableserviceModule.v2.TableServiceClient(); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + + it('value configured in code has priority over environment variable', () => { + const saved = process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = 'example.com'; + const client = new tableserviceModule.v2.TableServiceClient({universeDomain: 'configured.example.com'}); + const servicePath = client.apiEndpoint; + assert.strictEqual(servicePath, 'bigquery.configured.example.com'); + if (saved) { + process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN'] = saved; + } else { + delete process.env['GOOGLE_CLOUD_UNIVERSE_DOMAIN']; + } + }); + }); + } + it('does not allow setting both universeDomain and universe_domain', () => { + assert.throws(() => { new tableserviceModule.v2.TableServiceClient({universe_domain: 'example.com', universeDomain: 'example.net'}); }); + }); + + it('has port', () => { + const port = tableserviceModule.v2.TableServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no option', () => { + const client = new tableserviceModule.v2.TableServiceClient(); + assert(client); + }); + + it('should create a client with gRPC fallback', () => { + const client = new tableserviceModule.v2.TableServiceClient({ + fallback: true, + }); + assert(client); + }); + + it('has initialize method and supports deferred initialization', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.tableServiceStub, undefined); + await client.initialize(); + assert(client.tableServiceStub); + }); + + it('has close method for the initialized client', done => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + assert(client.tableServiceStub); + client.close().then(() => { + done(); + }); + }); + + it('has close method for the non-initialized client', done => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + assert.strictEqual(client.tableServiceStub, undefined); + client.close().then(() => { + done(); + }); + }); + + it('has getProjectId method', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().resolves(fakeProjectId); + const result = await client.getProjectId(); + assert.strictEqual(result, fakeProjectId); + assert((client.auth.getProjectId as SinonStub).calledWithExactly()); + }); + + it('has getProjectId method with callback', async () => { + const fakeProjectId = 'fake-project-id'; + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.auth.getProjectId = sinon.stub().callsArgWith(0, null, fakeProjectId); + const promise = new Promise((resolve, reject) => { + client.getProjectId((err?: Error|null, projectId?: string|null) => { + if (err) { + reject(err); + } else { + resolve(projectId); + } + }); + }); + const result = await promise; + assert.strictEqual(result, fakeProjectId); + }); + }); + + describe('getTable', () => { + it('invokes getTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.getTable = stubSimpleCall(expectedResponse); + const [response] = await client.getTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.getTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.getTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.getTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.getTable(request), expectedError); + const actualRequest = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.getTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes getTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.GetTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.GetTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.getTable(request), expectedError); + }); + }); + + describe('insertTable', () => { + it('invokes insertTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.insertTable = stubSimpleCall(expectedResponse); + const [response] = await client.insertTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.insertTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.insertTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.insertTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.insertTable(request), expectedError); + const actualRequest = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.insertTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes insertTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.InsertTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.InsertTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.insertTable(request), expectedError); + }); + }); + + describe('patchTable', () => { + it('invokes patchTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.patchTable = stubSimpleCall(expectedResponse); + const [response] = await client.patchTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.patchTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.patchTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.patchTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.patchTable(request), expectedError); + const actualRequest = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.patchTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes patchTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.patchTable(request), expectedError); + }); + }); + + describe('updateTable', () => { + it('invokes updateTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.updateTable = stubSimpleCall(expectedResponse); + const [response] = await client.updateTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.cloud.bigquery.v2.Table() + ); + client.innerApiCalls.updateTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.updateTable( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.ITable|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.updateTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.updateTable(request), expectedError); + const actualRequest = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.updateTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes updateTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.UpdateOrPatchTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.UpdateOrPatchTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.updateTable(request), expectedError); + }); + }); + + describe('deleteTable', () => { + it('invokes deleteTable without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteTable = stubSimpleCall(expectedResponse); + const [response] = await client.deleteTable(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedResponse = generateSampleMessage( + new protos.google.protobuf.Empty() + ); + client.innerApiCalls.deleteTable = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.deleteTable( + request, + (err?: Error|null, result?: protos.google.protobuf.IEmpty|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }&table_id=${defaultValue3 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.deleteTable = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.deleteTable(request), expectedError); + const actualRequest = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.deleteTable as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes deleteTable with closed client', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.DeleteTableRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['datasetId']); + request.datasetId = defaultValue2; + const defaultValue3 = + getTypeDefaultValue('.google.cloud.bigquery.v2.DeleteTableRequest', ['tableId']); + request.tableId = defaultValue3; + const expectedError = new Error('The client has already been closed.'); + client.close(); + await assert.rejects(client.deleteTable(request), expectedError); + }); + }); + + describe('listTables', () => { + it('invokes listTables without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.innerApiCalls.listTables = stubSimpleCall(expectedResponse); + const [response] = await client.listTables(request); + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTables without error using callback', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`;const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.innerApiCalls.listTables = stubSimpleCallWithCallback(expectedResponse); + const promise = new Promise((resolve, reject) => { + client.listTables( + request, + (err?: Error|null, result?: protos.google.cloud.bigquery.v2.IListFormatTable[]|null) => { + if (err) { + reject(err); + } else { + resolve(result); + } + }); + }); + const response = await promise; + assert.deepStrictEqual(response, expectedResponse); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTables with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.innerApiCalls.listTables = stubSimpleCall(undefined, expectedError); + await assert.rejects(client.listTables(request), expectedError); + const actualRequest = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[0]; + assert.deepStrictEqual(actualRequest, request); + const actualHeaderRequestParams = (client.innerApiCalls.listTables as SinonStub) + .getCall(0).args[1].otherArgs.headers['x-goog-request-params']; + assert(actualHeaderRequestParams.includes(expectedHeaderRequestParams)); + }); + + it('invokes listTablesStream without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.descriptors.page.listTables.createStream = stubPageStreamingCall(expectedResponse); + const stream = client.listTablesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatTable[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatTable) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + const responses = await promise; + assert.deepStrictEqual(responses, expectedResponse); + assert((client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listTables, request)); + assert( + (client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('invokes listTablesStream with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listTables.createStream = stubPageStreamingCall(undefined, expectedError); + const stream = client.listTablesStream(request); + const promise = new Promise((resolve, reject) => { + const responses: protos.google.cloud.bigquery.v2.ListFormatTable[] = []; + stream.on('data', (response: protos.google.cloud.bigquery.v2.ListFormatTable) => { + responses.push(response); + }); + stream.on('end', () => { + resolve(responses); + }); + stream.on('error', (err: Error) => { + reject(err); + }); + }); + await assert.rejects(promise, expectedError); + assert((client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).calledWith(client.innerApiCalls.listTables, request)); + assert( + (client.descriptors.page.listTables.createStream as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listTables without error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedResponse = [ + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + generateSampleMessage(new protos.google.cloud.bigquery.v2.ListFormatTable()), + ]; + client.descriptors.page.listTables.asyncIterate = stubAsyncIterationCall(expectedResponse); + const responses: protos.google.cloud.bigquery.v2.IListFormatTable[] = []; + const iterable = client.listTablesAsync(request); + for await (const resource of iterable) { + responses.push(resource!); + } + assert.deepStrictEqual(responses, expectedResponse); + assert.deepStrictEqual( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + + it('uses async iteration with listTables with error', async () => { + const client = new tableserviceModule.v2.TableServiceClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + client.initialize(); + const request = generateSampleMessage( + new protos.google.cloud.bigquery.v2.ListTablesRequest() + ); + const defaultValue1 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['projectId']); + request.projectId = defaultValue1; + const defaultValue2 = + getTypeDefaultValue('.google.cloud.bigquery.v2.ListTablesRequest', ['datasetId']); + request.datasetId = defaultValue2; + const expectedHeaderRequestParams = `project_id=${defaultValue1 ?? '' }&dataset_id=${defaultValue2 ?? '' }`; + const expectedError = new Error('expected'); + client.descriptors.page.listTables.asyncIterate = stubAsyncIterationCall(undefined, expectedError); + const iterable = client.listTablesAsync(request); + await assert.rejects(async () => { + const responses: protos.google.cloud.bigquery.v2.IListFormatTable[] = []; + for await (const resource of iterable) { + responses.push(resource!); + } + }); + assert.deepStrictEqual( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[1], request); + assert( + (client.descriptors.page.listTables.asyncIterate as SinonStub) + .getCall(0).args[2].otherArgs.headers['x-goog-request-params'].includes( + expectedHeaderRequestParams + ) + ); + }); + }); +}); diff --git a/baselines/bigquery-v2/tsconfig.json.baseline b/baselines/bigquery-v2/tsconfig.json.baseline new file mode 100644 index 000000000..c78f1c884 --- /dev/null +++ b/baselines/bigquery-v2/tsconfig.json.baseline @@ -0,0 +1,19 @@ +{ + "extends": "./node_modules/gts/tsconfig-google.json", + "compilerOptions": { + "rootDir": ".", + "outDir": "build", + "resolveJsonModule": true, + "lib": [ + "es2018", + "dom" + ] + }, + "include": [ + "src/*.ts", + "src/**/*.ts", + "test/*.ts", + "test/**/*.ts", + "system-test/*.ts" + ] +} diff --git a/baselines/bigquery-v2/webpack.config.js.baseline b/baselines/bigquery-v2/webpack.config.js.baseline new file mode 100644 index 000000000..f4947d41e --- /dev/null +++ b/baselines/bigquery-v2/webpack.config.js.baseline @@ -0,0 +1,64 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const path = require('path'); + +module.exports = { + entry: './src/index.ts', + output: { + library: 'DatasetService', + filename: './dataset-service.js', + }, + node: { + child_process: 'empty', + fs: 'empty', + crypto: 'empty', + }, + resolve: { + alias: { + '../../../package.json': path.resolve(__dirname, 'package.json'), + }, + extensions: ['.js', '.json', '.ts'], + }, + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/ + }, + { + test: /node_modules[\\/]@grpc[\\/]grpc-js/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]grpc/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]retry-request/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]https?-proxy-agent/, + use: 'null-loader' + }, + { + test: /node_modules[\\/]gtoken/, + use: 'null-loader' + }, + ], + }, + mode: 'production', +}; diff --git a/templates/cjs/typescript_gapic/_util.njk b/templates/cjs/typescript_gapic/_util.njk index 89802e385..65d1599e2 100644 --- a/templates/cjs/typescript_gapic/_util.njk +++ b/templates/cjs/typescript_gapic/_util.njk @@ -39,7 +39,7 @@ limitations under the License. {%- macro printCommentsPageStream(method, generatedName) -%} {{- printPageStreamHeader(method) -}} {{- printRequest(method) -}} - {{- printCommentsForParams(method) -}} + {{- printCommentsForParams(method, "stream") -}} {{- printOptions() -}} {{- printReturnPageStream(method, generatedName) }} {%- endmacro -%} @@ -47,7 +47,7 @@ limitations under the License. {%- macro printCommentsPageAsync(method, generatedName) -%} {{- printPageAsyncHeader(method) -}} {{- printRequest(method) -}} - {{- printCommentsForParams(method) -}} + {{- printCommentsForParams(method, "async") -}} {{- printOptions() -}} {{- printReturnPageAsync(method, generatedName) }} {%- endmacro -%} @@ -93,13 +93,21 @@ limitations under the License. {%- endif %} {%- endmacro -%} -{%- macro printCommentsForParams(method) -%} +{%- macro printCommentsForParams(method, asyncOrStream) -%} {% if not method.clientStreaming %} {%- set commentsMap = method.paramComment -%} {%- for oneComment in commentsMap -%} {%- set type = oneComment.paramType -%} {%- if type.startsWith('.') %} + {%- if method.maxResultsParameter + and method.pagingResponseType + and printRequestField(oneComment)=="request.maxResults" + and not asyncOrStream + and (type == ".google.protobuf.UInt32Value" or type == ".google.protobuf.Int32Value")%} + * @param { {{- type.substring(1) -}} | number } {{ printRequestField(oneComment) }} + {%- else %} * @param { {{- type.substring(1) -}} } {{ printRequestField(oneComment) }} + {%- endif -%} {%- else %} * @param { {{- convertParamType(oneComment.paramType) -}} } {{ printRequestField(oneComment) }} {%- endif -%} diff --git a/templates/cjs/typescript_gapic/src/$version/$service_client.ts.njk b/templates/cjs/typescript_gapic/src/$version/$service_client.ts.njk index a67853369..838c85301 100644 --- a/templates/cjs/typescript_gapic/src/$version/$service_client.ts.njk +++ b/templates/cjs/typescript_gapic/src/$version/$service_client.ts.njk @@ -848,6 +848,7 @@ export class {{ service.name }}Client { } {%- endfor %} {%- for method in service.paging %} + {%- if not method.ignoreMapPagingMethod %} {%- if not method.pagingMapResponseType %} /** @@ -893,10 +894,21 @@ export class {{ service.name }}Client { {{ util.toInterface(method.outputInterface) }} ]>|void { request = request || {}; + {%- if method.maxResultsParameter %} + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + + {%- endif %} + {%- for field in method.autoPopulatedFields %} if (!request.{{ field.toCamelCase() }}) { request.{{ field.toCamelCase() }} = gax.makeUUID(); - } {%- endfor %} + } + + {%- endfor %} let options: CallOptions; if (typeof optionsOrCallback === 'function' && callback === undefined) { callback = optionsOrCallback; diff --git a/templates/cjs/typescript_samples/samples/generated/$version/_util.njk b/templates/cjs/typescript_samples/samples/generated/$version/_util.njk index e0ca0ed7d..c0c555eaf 100644 --- a/templates/cjs/typescript_samples/samples/generated/$version/_util.njk +++ b/templates/cjs/typescript_samples/samples/generated/$version/_util.njk @@ -44,7 +44,15 @@ {%- elif printParamFieldSample(oneComment) == 'region' %} // const {{ printParamFieldSample(oneComment) }} = 'us-central1' {%- elif type.startsWith('.') %} +{%- if method.maxResultsParameter + and method.pagingResponseType + and (printParamFieldSample(oneComment) | replace('[', '') | replace(']', '')).val=="maxResults" + and not asyncOrStream + and (type == ".google.protobuf.UInt32Value" or type == ".google.protobuf.Int32Value")%} + // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample('number') -}} {{-\n-}} + {%- else %} // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample(type.substring(1)) -}} {{-\n-}} + {%- endif -%} {%- else %} // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample(convertParamType(oneComment.paramType)) -}} {%- endif -%} diff --git a/templates/esm/typescript_gapic/_util.njk b/templates/esm/typescript_gapic/_util.njk index 89802e385..65d1599e2 100644 --- a/templates/esm/typescript_gapic/_util.njk +++ b/templates/esm/typescript_gapic/_util.njk @@ -39,7 +39,7 @@ limitations under the License. {%- macro printCommentsPageStream(method, generatedName) -%} {{- printPageStreamHeader(method) -}} {{- printRequest(method) -}} - {{- printCommentsForParams(method) -}} + {{- printCommentsForParams(method, "stream") -}} {{- printOptions() -}} {{- printReturnPageStream(method, generatedName) }} {%- endmacro -%} @@ -47,7 +47,7 @@ limitations under the License. {%- macro printCommentsPageAsync(method, generatedName) -%} {{- printPageAsyncHeader(method) -}} {{- printRequest(method) -}} - {{- printCommentsForParams(method) -}} + {{- printCommentsForParams(method, "async") -}} {{- printOptions() -}} {{- printReturnPageAsync(method, generatedName) }} {%- endmacro -%} @@ -93,13 +93,21 @@ limitations under the License. {%- endif %} {%- endmacro -%} -{%- macro printCommentsForParams(method) -%} +{%- macro printCommentsForParams(method, asyncOrStream) -%} {% if not method.clientStreaming %} {%- set commentsMap = method.paramComment -%} {%- for oneComment in commentsMap -%} {%- set type = oneComment.paramType -%} {%- if type.startsWith('.') %} + {%- if method.maxResultsParameter + and method.pagingResponseType + and printRequestField(oneComment)=="request.maxResults" + and not asyncOrStream + and (type == ".google.protobuf.UInt32Value" or type == ".google.protobuf.Int32Value")%} + * @param { {{- type.substring(1) -}} | number } {{ printRequestField(oneComment) }} + {%- else %} * @param { {{- type.substring(1) -}} } {{ printRequestField(oneComment) }} + {%- endif -%} {%- else %} * @param { {{- convertParamType(oneComment.paramType) -}} } {{ printRequestField(oneComment) }} {%- endif -%} diff --git a/templates/esm/typescript_gapic/esm/src/$version/$service_client.ts.njk b/templates/esm/typescript_gapic/esm/src/$version/$service_client.ts.njk index d0e547930..54e4b7daf 100644 --- a/templates/esm/typescript_gapic/esm/src/$version/$service_client.ts.njk +++ b/templates/esm/typescript_gapic/esm/src/$version/$service_client.ts.njk @@ -905,6 +905,13 @@ export class {{ service.name }}Client { {{ util.toInterface(method.outputInterface) }} ]>|void { request = request || {}; + {%- if method.maxResultsParameter %} + // Converts number to Unit32 or Int32 value for non-compliant APIs. + if(request.maxResults && typeof request.maxResults === "number"){ + const maxResultsObject = {"value": request.maxResults} + request.maxResults = maxResultsObject + } + {%- endif %} {%- for field in method.autoPopulatedFields %} if (!request.{{ field.toCamelCase() }}) { request.{{ field.toCamelCase() }} = gax.makeUUID(); diff --git a/templates/esm/typescript_samples/samples/generated/$version/_util.njk b/templates/esm/typescript_samples/samples/generated/$version/_util.njk index e0ca0ed7d..c0299db56 100644 --- a/templates/esm/typescript_samples/samples/generated/$version/_util.njk +++ b/templates/esm/typescript_samples/samples/generated/$version/_util.njk @@ -44,8 +44,16 @@ {%- elif printParamFieldSample(oneComment) == 'region' %} // const {{ printParamFieldSample(oneComment) }} = 'us-central1' {%- elif type.startsWith('.') %} +{%- if method.maxResultsParameter + and method.pagingResponseType + and (printParamFieldSample(oneComment) | replace('[', '') | replace(']', '')).val=="maxResults" + and not asyncOrStream + and (type == ".google.protobuf.UInt32Value" or type == ".google.protobuf.Int32Value")%} + // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample('number') -}} {{-\n-}} + {%- else %} // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample(type.substring(1)) -}} {{-\n-}} -{%- else %} + {%- endif -%} + {%- else %} // const {{ printParamFieldSample(oneComment) | replace('[', '') | replace(']', '') }} = {{ printTypeExample(convertParamType(oneComment.paramType)) -}} {%- endif -%} {%- endif -%} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/biglake_config.proto b/test-fixtures/protos/google/cloud/bigquery/v2/biglake_config.proto new file mode 100644 index 000000000..fd076037b --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/biglake_config.proto @@ -0,0 +1,62 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "BigLakeConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for BigLake managed tables. +message BigLakeConfiguration { + // Supported file formats for BigLake tables. + enum FileFormat { + // Default Value. + FILE_FORMAT_UNSPECIFIED = 0; + + // Apache Parquet format. + PARQUET = 1; + } + + // Supported table formats for BigLake tables. + enum TableFormat { + // Default Value. + TABLE_FORMAT_UNSPECIFIED = 0; + + // Apache Iceberg format. + ICEBERG = 1; + } + + // Required. The connection specifying the credentials to be used to read and + // write to external storage, such as Cloud Storage. The connection_id can + // have the form `{project}.{location}.{connection_id}` or + // `projects/{project}/locations/{location}/connections/{connection_id}". + string connection_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The fully qualified location prefix of the external folder where + // table data is stored. The '*' wildcard character is not allowed. The URI + // should be in the format `gs://bucket/path_to_table/` + string storage_uri = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The file format the table data is stored in. + FileFormat file_format = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The table format the metadata only snapshots are stored in. + TableFormat table_format = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/clustering.proto b/test-fixtures/protos/google/cloud/bigquery/v2/clustering.proto new file mode 100644 index 000000000..b871f41ec --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/clustering.proto @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ClusteringProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configures table clustering. +message Clustering { + // One or more fields on which data should be clustered. Only top-level, + // non-repeated, simple-type fields are supported. The ordering of the + // clustering fields should be prioritized from most to least important + // for filtering purposes. + // + // Additional information on limitations can be found here: + // https://cloud.google.com/bigquery/docs/creating-clustered-tables#limitations + repeated string fields = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/data_format_options.proto b/test-fixtures/protos/google/cloud/bigquery/v2/data_format_options.proto new file mode 100644 index 000000000..e2c6fb67d --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/data_format_options.proto @@ -0,0 +1,29 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DataFormatOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for data format adjustments. +message DataFormatOptions { + // Optional. Output timestamp as usec int64. Default is false. + bool use_int64_timestamp = 1 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/dataset.proto b/test-fixtures/protos/google/cloud/bigquery/v2/dataset.proto new file mode 100644 index 000000000..62968e292 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/dataset.proto @@ -0,0 +1,625 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_catalog_dataset_options.proto"; +import "google/cloud/bigquery/v2/external_dataset_reference.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Dataset Service. +// +// It should not be relied on for production use cases at this time. +service DatasetService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Returns the dataset specified by datasetID. + rpc GetDataset(GetDatasetRequest) returns (Dataset) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Creates a new empty dataset. + rpc InsertDataset(InsertDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + // This method supports RFC5789 patch semantics. + rpc PatchDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Updates information in an existing dataset. The update method replaces the + // entire dataset resource, whereas the patch method only replaces fields that + // are provided in the submitted dataset resource. + rpc UpdateDataset(UpdateOrPatchDatasetRequest) returns (Dataset) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + body: "dataset" + }; + } + + // Deletes the dataset specified by the datasetId value. Before you can delete + // a dataset, you must delete all its tables, either manually or by specifying + // deleteContents. Immediately after deletion, you can create another dataset + // with the same name. + rpc DeleteDataset(DeleteDatasetRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}" + }; + } + + // Lists all datasets in the specified project to which the user has been + // granted the READER dataset role. + rpc ListDatasets(ListDatasetsRequest) returns (DatasetList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets" + }; + } + + // Undeletes a dataset which is within time travel window based on datasetId. + // If a time is specified, the dataset version deleted at that time is + // undeleted, else the last live version is undeleted. + rpc UndeleteDataset(UndeleteDatasetRequest) returns (Dataset) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}:undelete" + body: "*" + }; + } +} + +// Grants all resources of particular types in a particular dataset read access +// to the current dataset. +// +// Similar to how individually authorized views work, updates to any resource +// granted through its dataset (including creation of new resources) requires +// read permission to referenced resources, plus write permission to the +// authorizing dataset. +message DatasetAccessEntry { + // Indicates the type of resources in a dataset that the entry applies to. + enum TargetType { + // Do not use. You must set a target type explicitly. + TARGET_TYPE_UNSPECIFIED = 0; + + // This entry applies to views in the dataset. + VIEWS = 1; + + // This entry applies to routines in the dataset. + ROUTINES = 2; + } + + // The dataset this entry applies to + DatasetReference dataset = 1; + + // Which resources in the dataset this entry applies to. Currently, only + // views are supported, but additional target types may be added in the + // future. + repeated TargetType target_types = 2; +} + +// An object that defines dataset access for an entity. +message Access { + // An IAM role ID that should be granted to the user, group, + // or domain specified in this access entry. + // The following legacy mappings will be applied: + // + // * `OWNER`: `roles/bigquery.dataOwner` + // * `WRITER`: `roles/bigquery.dataEditor` + // * `READER`: `roles/bigquery.dataViewer` + // + // This field will accept any of the above formats, but will return only + // the legacy format. For example, if you set this field to + // "roles/bigquery.dataOwner", it will be returned back as "OWNER". + string role = 1; + + // [Pick one] An email address of a user to grant access to. For example: + // fred@example.com. Maps to IAM policy member "user:EMAIL" or + // "serviceAccount:EMAIL". + string user_by_email = 2; + + // [Pick one] An email address of a Google Group to grant access to. + // Maps to IAM policy member "group:GROUP". + string group_by_email = 3; + + // [Pick one] A domain to grant access to. Any users signed in with the domain + // specified will be granted the specified access. Example: "example.com". + // Maps to IAM policy member "domain:DOMAIN". + string domain = 4; + + // [Pick one] A special group to grant access to. Possible values include: + // + // * projectOwners: Owners of the enclosing project. + // * projectReaders: Readers of the enclosing project. + // * projectWriters: Writers of the enclosing project. + // * allAuthenticatedUsers: All authenticated BigQuery users. + // + // Maps to similarly-named IAM members. + string special_group = 5; + + // [Pick one] Some other type of member that appears in the IAM Policy but + // isn't a user, group, domain, or special group. + string iam_member = 7; + + // [Pick one] A view from a different dataset to grant access to. Queries + // executed against that view will have read access to views/tables/routines + // in this dataset. + // The role field is not required when this field is set. If that view is + // updated by any user, access to the view needs to be granted again via an + // update operation. + TableReference view = 6; + + // [Pick one] A routine from a different dataset to grant access to. Queries + // executed against that routine will have read access to + // views/tables/routines in this dataset. Only UDF is supported for now. + // The role field is not required when this field is set. If that routine is + // updated by any user, access to the routine needs to be granted again via + // an update operation. + RoutineReference routine = 8; + + // [Pick one] A grant authorizing all resources of a particular type in a + // particular dataset access to this dataset. Only views are supported for + // now. The role field is not required when this field is set. If that dataset + // is deleted and re-created, its access needs to be granted again via an + // update operation. + DatasetAccessEntry dataset = 9; +} + +// Represents a BigQuery dataset. +message Dataset { + // Indicates the billing model that will be applied to the dataset. + enum StorageBillingModel { + // Value not set. + STORAGE_BILLING_MODEL_UNSPECIFIED = 0; + + // Billing for logical bytes. + LOGICAL = 1; + + // Billing for physical bytes. + PHYSICAL = 2; + } + + // Output only. The resource type. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of the resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The fully-qualified unique name of the dataset in the format + // projectId:datasetId. The dataset name without the project name is given in + // the datasetId field. When creating a new dataset, leave this field blank, + // and instead specify the datasetId field. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. You can + // use this URL in Get or Update requests to the resource. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. A reference that identifies the dataset. + DatasetReference dataset_reference = 5 + [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for the dataset. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of the dataset. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The default lifetime of all tables in the dataset, in + // milliseconds. The minimum lifetime value is 3600000 milliseconds (one + // hour). To clear an existing default expiration with a PATCH request, set to + // 0. Once this property is set, all newly-created tables in the dataset will + // have an expirationTime property set to the creation time plus the value in + // this property, and changing the value will only affect new tables, not + // existing ones. When the expirationTime for a given table is reached, that + // table will be deleted automatically. + // If a table's expirationTime is modified or removed before the table + // expires, or if you provide an explicit expirationTime when creating a + // table, that value takes precedence over the default expiration time + // indicated by this property. + google.protobuf.Int64Value default_table_expiration_ms = 8 + [(google.api.field_behavior) = OPTIONAL]; + + // This default partition expiration, expressed in milliseconds. + // + // When new time-partitioned tables are created in a dataset where this + // property is set, the table will inherit this value, propagated as the + // `TimePartitioning.expirationMs` property on the new table. If you set + // `TimePartitioning.expirationMs` explicitly when creating a table, + // the `defaultPartitionExpirationMs` of the containing dataset is ignored. + // + // When creating a partitioned table, if `defaultPartitionExpirationMs` + // is set, the `defaultTableExpirationMs` value is ignored and the table + // will not be inherit a table expiration deadline. + google.protobuf.Int64Value default_partition_expiration_ms = 14; + + // The labels associated with this dataset. You can use these + // to organize and group your datasets. + // You can set this property when inserting or updating a dataset. + // See [Creating and Updating Dataset + // Labels](https://cloud.google.com/bigquery/docs/creating-managing-labels#creating_and_updating_dataset_labels) + // for more information. + map labels = 9; + + // Optional. An array of objects that define dataset access for one or more + // entities. You can set this property when inserting or updating a dataset in + // order to control who is allowed to access the data. If unspecified at + // dataset creation time, BigQuery adds default dataset access for the + // following entities: access.specialGroup: projectReaders; access.role: + // READER; access.specialGroup: projectWriters; access.role: WRITER; + // access.specialGroup: projectOwners; access.role: OWNER; + // access.userByEmail: [dataset creator email]; access.role: OWNER; + // If you patch a dataset, then this field is overwritten by the patched + // dataset's access field. To add entities, you must supply the entire + // existing access array in addition to any new entities that you want to add. + repeated Access access = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this dataset was created, in milliseconds since + // the epoch. + int64 creation_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The date when this dataset was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The geographic location where the dataset should reside. See + // https://cloud.google.com/bigquery/docs/locations for supported + // locations. + string location = 13; + + // The default encryption key for all tables in the dataset. + // After this property is set, the encryption key of all newly-created tables + // in the dataset is set to this value unless the table creation request or + // query explicitly overrides the key. + EncryptionConfiguration default_encryption_configuration = 16; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzs = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Reserved for future use. + google.protobuf.BoolValue satisfies_pzi = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Same as `type` in `ListFormatDataset`. + // The type of the dataset, one of: + // + // * DEFAULT - only accessible by owner and authorized accounts, + // * PUBLIC - accessible by everyone, + // * LINKED - linked dataset, + // * EXTERNAL - dataset with definition in external metadata catalog. + string type = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The source dataset reference when the dataset is of type LINKED. + // For all other dataset types it is not set. This field cannot be updated + // once it is set. Any attempt to update this field using Update and Patch API + // Operations will be ignored. + LinkedDatasetSource linked_dataset_source = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Metadata about the LinkedDataset. Filled out when the dataset + // type is LINKED. + LinkedDatasetMetadata linked_dataset_metadata = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Reference to a read-only external dataset defined in data + // catalogs outside of BigQuery. Filled out when the dataset type is EXTERNAL. + ExternalDatasetReference external_dataset_reference = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Options defining open source compatible datasets living in the + // BigQuery catalog. Contains metadata of open source database, schema or + // namespace represented by the current dataset. + ExternalCatalogDatasetOptions external_catalog_dataset_options = 32 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. TRUE if the dataset and its table names are case-insensitive, + // otherwise FALSE. By default, this is FALSE, which means the dataset and its + // table names are case-sensitive. This field does not affect routine + // references. + google.protobuf.BoolValue is_case_insensitive = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default collation specification of future tables + // created in the dataset. If a table is created in this dataset without + // table-level default collation, then the table inherits the dataset default + // collation, which is applied to the string fields that do not have explicit + // collation specified. A change to this field affects only tables created + // afterwards, and does not alter the existing tables. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new tables + // created within this dataset. During table creation, if this field is + // specified, the table within this dataset will inherit the default rounding + // mode of the dataset. Setting the default rounding mode on a table overrides + // this option. Existing tables in the dataset are unaffected. + // If columns are defined during that table creation, + // they will immediately inherit the table's default rounding mode, + // unless otherwise specified. + TableFieldSchema.RoundingMode default_rounding_mode = 26 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the time travel window in hours. The value can be from 48 + // to 168 hours (2 to 7 days). The default value is 168 hours if this is not + // set. + google.protobuf.Int64Value max_time_travel_hours = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Tags for the dataset. To provide tags as inputs, use the + // `resourceTags` field. + repeated GcpTag tags = 24 + [deprecated = true, (google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Updates storage_billing_model for the dataset. + StorageBillingModel storage_billing_model = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for all tables and dataset. If + // set, restrict certain accesses on the dataset and all its tables based on + // the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 27 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this dataset. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 30 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A global tag managed by Resource Manager. +// https://cloud.google.com/iam/docs/tags-access-control#definitions +message GcpTag { + // Required. The namespaced friendly name of the tag key, e.g. + // "12345/environment" where 12345 is org id. + string tag_key = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The friendly short name of the tag value, e.g. "production". + string tag_value = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// A dataset source type which refers to another BigQuery dataset. +message LinkedDatasetSource { + // The source dataset reference contains project numbers and not project ids. + DatasetReference source_dataset = 1; +} + +// Metadata about the Linked Dataset. +message LinkedDatasetMetadata { + // Specifies whether Linked Dataset is currently in a linked state or not. + enum LinkState { + // The default value. + // Default to the LINKED state. + LINK_STATE_UNSPECIFIED = 0; + + // Normal Linked Dataset state. Data is queryable via the Linked Dataset. + LINKED = 1; + + // Data publisher or owner has unlinked this Linked Dataset. It means you + // can no longer query or see the data in the Linked Dataset. + UNLINKED = 2; + } + + // Output only. Specifies whether Linked Dataset is currently in a linked + // state or not. + LinkState link_state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a dataset. +message GetDatasetRequest { + // DatasetView specifies which dataset information is returned. + enum DatasetView { + // The default value. + // Default to the FULL view. + DATASET_VIEW_UNSPECIFIED = 0; + + // Includes metadata information for the dataset, such as location, + // etag, lastModifiedTime, etc. + METADATA = 1; + + // Includes ACL information for the dataset, which defines dataset access + // for one or more entities. + ACL = 2; + + // Includes both dataset metadata and ACL information. + FULL = 3; + } + + // Required. Project ID of the requested dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested dataset + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the view that determines which dataset information is + // returned. By default, metadata and ACL information are returned. + DatasetView dataset_view = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting a dataset. +message InsertDatasetRequest { + // Required. Project ID of the new dataset + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource to use for the new dataset + Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Message for updating or patching a dataset. +message UpdateOrPatchDatasetRequest { + // Required. Project ID of the dataset being updated + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the dataset being updated + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Datasets resource which will replace or patch the specified + // dataset. + Dataset dataset = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting a dataset. +message DeleteDatasetRequest { + // Required. Project ID of the dataset being deleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // If True, delete all the tables in the dataset. + // If False and the dataset contains tables, the request will fail. + // Default is False + bool delete_contents = 3; +} + +message ListDatasetsRequest { + // Required. Project ID of the datasets to be listed + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 2; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 3; + + // Whether to list all datasets, including hidden ones + bool all = 4; + + // An expression for filtering the results of the request by label. + // The syntax is `labels.[:]`. + // Multiple filters can be ANDed together by connecting with a space. + // Example: `labels.department:receiving labels.active`. + // See [Filtering datasets using + // labels](https://cloud.google.com/bigquery/docs/filtering-labels#filtering_datasets_using_labels) + // for details. + string filter = 5; +} + +// A dataset resource with only a subset of fields, to be returned in a list of +// datasets. +message ListFormatDataset { + // The resource type. + // This property always returns the value "bigquery#dataset" + string kind = 1; + + // The fully-qualified, unique, opaque ID of the dataset. + string id = 2; + + // The dataset reference. + // Use this property to access specific parts of the dataset's ID, such as + // project ID or dataset ID. + DatasetReference dataset_reference = 3; + + // The labels associated with this dataset. + // You can use these to organize and group your datasets. + map labels = 4; + + // An alternate name for the dataset. The friendly name is purely + // decorative in nature. + google.protobuf.StringValue friendly_name = 5; + + // The geographic location where the dataset resides. + string location = 6; +} + +// Response format for a page of results when listing datasets. +message DatasetList { + // Output only. The resource type. + // This property always returns the value "bigquery#datasetList" + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash value of the results page. You can use this property to + // determine if the page has changed since the last request. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // A token that can be used to request the next results page. This property is + // omitted on the final results page. + string next_page_token = 3; + + // An array of the dataset resources in the project. + // Each resource contains basic information. + // For full information about a particular dataset resource, use the Datasets: + // get method. This property is omitted when there are no datasets in the + // project. + repeated ListFormatDataset datasets = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request format for undeleting a dataset. +message UndeleteDatasetRequest { + // Required. Project ID of the dataset to be undeleted + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of dataset being deleted + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The exact time when the dataset was deleted. If not specified, + // the most recently deleted version is undeleted. Undeleting a dataset + // using deletion time is not supported. + google.protobuf.Timestamp deletion_time = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/dataset_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/dataset_reference.proto new file mode 100644 index 000000000..03695a4c4 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/dataset_reference.proto @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "DatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Identifier for a dataset. +message DatasetReference { + // Required. A unique ID for this dataset, without the project name. The ID + // must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + string dataset_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The ID of the project containing this dataset. + string project_id = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/decimal_target_types.proto b/test-fixtures/protos/google/cloud/bigquery/v2/decimal_target_types.proto new file mode 100644 index 000000000..72266b110 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/decimal_target_types.proto @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "DecimalTargetTypesProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data types that could be used as a target type when converting decimal +// values. +enum DecimalTargetType { + // Invalid type. + DECIMAL_TARGET_TYPE_UNSPECIFIED = 0; + + // Decimal values could be converted to NUMERIC + // type. + NUMERIC = 1; + + // Decimal values could be converted to BIGNUMERIC + // type. + BIGNUMERIC = 2; + + // Decimal values could be converted to STRING type. + STRING = 3; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/encryption_config.proto b/test-fixtures/protos/google/cloud/bigquery/v2/encryption_config.proto new file mode 100644 index 000000000..ac7ee1679 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/encryption_config.proto @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "EncryptionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Configuration for Cloud KMS encryption settings. +message EncryptionConfiguration { + // Optional. Describes the Cloud KMS encryption key that will be used to + // protect destination BigQuery table. The BigQuery Service Account associated + // with your project requires access to this encryption key. + google.protobuf.StringValue kms_key_name = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/error.proto b/test-fixtures/protos/google/cloud/bigquery/v2/error.proto new file mode 100644 index 000000000..9cab21c4f --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/error.proto @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Error details. +message ErrorProto { + // A short error code that summarizes the error. + string reason = 1; + + // Specifies where the error occurred, if present. + string location = 2; + + // Debugging information. This property is internal to Google and should not + // be used. + string debug_info = 3; + + // A human-readable description of the error. + string message = 4; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto b/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto new file mode 100644 index 000000000..70d0f1f40 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_dataset_options.proto @@ -0,0 +1,39 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogDatasetOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options defining open source compatible datasets living in the BigQuery +// catalog. Contains metadata of open source database, schema +// or namespace represented by the current dataset. +message ExternalCatalogDatasetOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source schema. Maximum size of 2Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The storage location URI for all tables in the dataset. + // Equivalent to hive metastore's database locationUri. Maximum length of 1024 + // characters. + string default_storage_location_uri = 2 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto b/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto new file mode 100644 index 000000000..b0833d441 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/external_catalog_table_options.proto @@ -0,0 +1,87 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalCatalogTableOptionsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Metadata about open source compatible table. The fields contained in +// these options correspond to hive metastore's table level properties. +message ExternalCatalogTableOptions { + // Optional. A map of key value pairs defining the parameters and properties + // of the open source table. Corresponds with hive meta store table + // parameters. Maximum size of 4Mib. + map parameters = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A storage descriptor containing information about the physical + // storage of this table. + StorageDescriptor storage_descriptor = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The connection + // is needed to read the open source table from BigQuery Engine. The + // connection_id can have the form + // `..` or + // `projects//locations//connections/`. + string connection_id = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Contains information about how a table's data is stored and accessed by open +// source query engines. +message StorageDescriptor { + // Optional. The physical location of the table + // (e.g. `gs://spark-dataproc-data/pangea-data/case_sensitive/` or + // `gs://spark-dataproc-data/pangea-data/*`). + // The maximum length is 2056 bytes. + string location_uri = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the InputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"). + // The maximum length is 128 characters. + string input_format = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the fully qualified class name of the OutputFormat + // (e.g. "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"). + // The maximum length is 128 characters. + string output_format = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Serializer and deserializer information. + SerDeInfo serde_info = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Serializer and deserializer information. +message SerDeInfo { + // Optional. Name of the SerDe. + // The maximum length is 256 characters. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. Specifies a fully-qualified class name of the serialization + // library that is responsible for the translation of data between table + // representation and the underlying low-level input and output format + // structures. The maximum length is 256 characters. + string serialization_library = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Key-value pairs that define the initialization parameters for the + // serialization library. + // Maximum size 10 Kib. + map parameters = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/external_data_config.proto b/test-fixtures/protos/google/cloud/bigquery/v2/external_data_config.proto new file mode 100644 index 000000000..610af982a --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/external_data_config.proto @@ -0,0 +1,499 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/map_target_type.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ExternalDataConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for external data sources. +message AvroOptions { + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Parquet Options for load and make external tables. +message ParquetOptions { + // Optional. Indicates whether to infer Parquet ENUM logical type as STRING + // instead of BYTES by default. + google.protobuf.BoolValue enum_as_string = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates whether to use schema inference specifically for + // Parquet LIST logical type. + google.protobuf.BoolValue enable_list_inference = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates how to represent a Parquet map if present. + MapTargetType map_target_type = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a CSV data source. +message CsvOptions { + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when reading the data. The default value is 0. This property is + // useful if you have header rows in the file that should be skipped. + // When autodetect is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ("). + // If your data does not contain quoted sections, + // set the property value to an empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + google.protobuf.StringValue quote = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow quoted data sections that + // contain newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should accept rows that are missing + // trailing optional columns. If true, BigQuery treats missing trailing + // columns as null values. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. The default value is false. + google.protobuf.BoolValue allow_jagged_rows = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + // BigQuery decodes the data after the raw, binary data has been split using + // the values of the quote and fieldDelimiter properties. + string encoding = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if the embedded ASCII control characters (the first 32 + // characters in the ASCII-table, from '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when querying a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 8 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Json Options for load and make external tables. +message JsonOptions { + // Optional. The character encoding of the data. + // The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. + string encoding = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column. +message BigtableColumn { + // [Required] Qualifier of the column. + // Columns in the parent column family that has this exact qualifier are + // exposed as `.` field. + // If the qualifier is valid UTF-8 string, it can be specified in the + // qualifier_string field. Otherwise, a base-64 encoded value must be set to + // qualifier_encoded. + // The column field name is the same as the column qualifier. However, if the + // qualifier is not a valid BigQuery field identifier i.e. does not match + // [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name. + google.protobuf.BytesValue qualifier_encoded = 1; + + // Qualifier string. + google.protobuf.StringValue qualifier_string = 2; + + // Optional. If the qualifier is not a valid BigQuery field identifier i.e. + // does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided + // as the column field name and is used as field name in queries. + string field_name = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type to convert the value in cells of this column. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // 'type' can also be set at the column family level. However, the setting at + // this level takes precedence if 'type' is set at both levels. + string type = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // 'encoding' can also be set at the column family level. However, the setting + // at this level takes precedence if 'encoding' is set at both levels. + string encoding = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set, only the latest version of value in this column + // are exposed. + // 'onlyReadLatest' can also be set at the column family level. However, the + // setting at this level takes precedence if 'onlyReadLatest' is set at both + // levels. + google.protobuf.BoolValue only_read_latest = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Information related to a Bigtable column family. +message BigtableColumnFamily { + // Identifier of the column family. + string family_id = 1; + + // Optional. The type to convert the value in cells of this column family. + // The values are expected to be encoded using HBase Bytes.toBytes function + // when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // + // * BYTES + // * STRING + // * INTEGER + // * FLOAT + // * BOOLEAN + // * JSON + // + // Default type is BYTES. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a type for it. + string type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The encoding of the values when the type is not STRING. + // Acceptable encoding values are: + // TEXT - indicates values are alphanumeric text strings. + // BINARY - indicates values are encoded using HBase Bytes.toBytes family of + // functions. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying an encoding for it. + string encoding = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Lists of columns that should be exposed as individual fields as + // opposed to a list of (column name, value) pairs. + // All columns whose qualifier matches a qualifier in this list can be + // accessed as `.`. + // Other columns can be accessed as a list through + // the `.Column` field. + repeated BigtableColumn columns = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this is set only the latest version of value are exposed for + // all columns in this column family. + // This can be overridden for a specific column by listing that column in + // 'columns' and specifying a different setting + // for that column. + google.protobuf.BoolValue only_read_latest = 5 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Cloud Bigtable data sources. +message BigtableOptions { + // Optional. List of column families to expose in the table schema along with + // their types. + // This list restricts the column families that can be referenced in queries + // and specifies their value types. + // You can use this list to do type conversions - see the 'type' field for + // more details. + // If you leave this list empty, all column families are present in the table + // schema and their values are read as BYTES. + // During a query only the column families referenced in that query are read + // from Bigtable. + repeated BigtableColumnFamily column_families = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the column families that are not + // specified in columnFamilies list are not exposed in the table schema. + // Otherwise, they are read with BYTES type values. + // The default value is false. + google.protobuf.BoolValue ignore_unspecified_column_families = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then the rowkey column families will be read + // and converted to string. Otherwise they are read with BYTES type values and + // users need to manually cast them with CAST if necessary. + // The default value is false. + google.protobuf.BoolValue read_rowkey_as_string = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If field is true, then each column family will be read as a + // single JSON column. Otherwise they are read as a repeated cell structure + // containing timestamp/value tuples. The default value is false. + google.protobuf.BoolValue output_column_families_as_json = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options specific to Google Sheets data sources. +message GoogleSheetsOptions { + // Optional. The number of rows at the top of a sheet that BigQuery will skip + // when reading the data. The default value is 0. This property is useful if + // you have header rows that should be skipped. When autodetect is on, + // the behavior is the following: + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int64Value skip_leading_rows = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Range of a sheet to query from. Only used when non-empty. + // Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id + // For example: sheet1!A1:B20 + string range = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +message ExternalDataConfiguration { + // Supported Object Metadata Types. + enum ObjectMetadata { + // Unspecified by default. + OBJECT_METADATA_UNSPECIFIED = 0; + + // A synonym for `SIMPLE`. + DIRECTORY = 1; + + // Directory listing of objects. + SIMPLE = 2; + } + + // MetadataCacheMode identifies if the table should use metadata caching for + // files from external source (eg Google Cloud Storage). + enum MetadataCacheMode { + // Unspecified metadata cache mode. + METADATA_CACHE_MODE_UNSPECIFIED = 0; + + // Set this mode to trigger automatic background refresh of metadata cache + // from the external source. Queries will use the latest available cache + // version within the table's maxStaleness interval. + AUTOMATIC = 1; + + // Set this mode to enable triggering manual refresh of the metadata cache + // from external source. Queries will use the latest manually triggered + // cache version within the table's maxStaleness interval. + MANUAL = 2; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. + // Size limits related to load jobs apply to external data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups, exactly one URI can be specified. Also, + // the '*' wildcard character is not allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default source URIs are expanded against the + // underlying storage. Other options include specifying manifest files. Only + // applicable to object storage systems. + FileSetSpecType file_set_spec_type = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the data. + // Schema is required for CSV and JSON formats if autodetect is not on. + // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, + // Avro, ORC and Parquet formats. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The data format. + // For CSV files, specify "CSV". + // For Google sheets, specify "GOOGLE_SHEETS". + // For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". + // For Avro files, specify "AVRO". + // For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". + // For Apache Iceberg tables, specify "ICEBERG". + // For ORC files, specify "ORC". + // For Parquet files, specify "PARQUET". + // [Beta] For Google Cloud Bigtable, specify "BIGTABLE". + string source_format = 3; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // reading data. If the number of bad records exceeds this value, an invalid + // error is returned in the job result. The default value is 0, which requires + // that all records are valid. This setting is ignored for Google Cloud + // Bigtable, Google Cloud Datastore backups, Avro, ORC and Parquet formats. + google.protobuf.Int32Value max_bad_records = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Try to detect schema and format options automatically. + // Any option specified explicitly will be honored. + google.protobuf.BoolValue autodetect = 5; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. + // The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names + // Google Cloud Bigtable: This setting is ignored. + // Google Cloud Datastore backups: This setting is ignored. + // Avro: This setting is ignored. + // ORC: This setting is ignored. + // Parquet: This setting is ignored. + google.protobuf.BoolValue ignore_unknown_values = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type of the data source. + // Possible values include GZIP and NONE. The default value is NONE. + // This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore + // backups, Avro, ORC and Parquet + // formats. An empty string is an invalid value. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to CSV. + CsvOptions csv_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to JSON. + JsonOptions json_options = 26 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to BIGTABLE. + BigtableOptions bigtable_options = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional options if sourceFormat is set to GOOGLE_SHEETS. + GoogleSheetsOptions google_sheets_options = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. Not all storage + // formats support hive partitioning -- requesting hive partitioning on an + // unsupported format will lead to an error, as will providing an invalid + // specification. + HivePartitioningOptions hive_partitioning_options = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The connection specifying the credentials to be used to read + // external storage, such as Azure Blob, Cloud Storage, or S3. The + // connection_id can have the form + // `{project_id}.{location_id};{connection_id}` or + // `projects/{project_id}/locations/{location_id}/connections/{connection_id}`. + string connection_id = 14 [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 16; + + // Optional. Additional properties to set if sourceFormat is set to AVRO. + AvroOptions avro_options = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. ObjectMetadata is used to create Object Tables. Object Tables + // contain a listing of objects (with their metadata) found at the + // source_uris. If ObjectMetadata is set, source_format should be omitted. + // + // Currently SIMPLE is the only supported Object Metadata type. + optional ObjectMetadata object_metadata = 22 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When creating an external table, the user can provide a reference + // file with the table schema. This is enabled for the following formats: + // AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Metadata Cache Mode for the table. Set this to enable caching of + // metadata from external data source. + MetadataCacheMode metadata_cache_mode = 24 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/external_dataset_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/external_dataset_reference.proto new file mode 100644 index 000000000..8d3a3b4c9 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/external_dataset_reference.proto @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "ExternalDatasetReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "bigqueryconnection.googleapis.com/Connection" + pattern: "projects/{project}/locations/{location}/connections/{connection}" +}; + +// Configures the access a dataset defined in an external metadata storage. +message ExternalDatasetReference { + // Required. External source that backs this dataset. + string external_source = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The connection id that is used to access the external_source. + // + // Format: + // projects/{project_id}/locations/{location_id}/connections/{connection_id} + string connection = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigqueryconnection.googleapis.com/Connection" + } + ]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/file_set_specification_type.proto b/test-fixtures/protos/google/cloud/bigquery/v2/file_set_specification_type.proto new file mode 100644 index 000000000..1068d20eb --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/file_set_specification_type.proto @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "FileSetSpecificationTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This enum defines how to interpret source URIs for load jobs and external +// tables. +enum FileSetSpecType { + // This option expands source URIs by listing files from the object store. It + // is the default behavior if FileSetSpecType is not set. + FILE_SET_SPEC_TYPE_FILE_SYSTEM_MATCH = 0; + + // This option indicates that the provided URIs are newline-delimited manifest + // files, with one URI per line. Wildcard URIs are not supported. + FILE_SET_SPEC_TYPE_NEW_LINE_DELIMITED_MANIFEST = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/hive_partitioning.proto b/test-fixtures/protos/google/cloud/bigquery/v2/hive_partitioning.proto new file mode 100644 index 000000000..76872bd1a --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/hive_partitioning.proto @@ -0,0 +1,86 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "HivePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Options for configuring hive partitioning detect. +message HivePartitioningOptions { + // Optional. When set, what mode of hive partitioning to use when reading + // data. The following modes are supported: + // + // * AUTO: automatically infer partition key name(s) and type(s). + // + // * STRINGS: automatically infer partition key name(s). All types are + // strings. + // + // * CUSTOM: partition key schema is encoded in the source URI prefix. + // + // Not all storage formats support hive partitioning. Requesting hive + // partitioning on an unsupported format will lead to an error. + // Currently supported formats are: JSON, CSV, ORC, Avro and Parquet. + string mode = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When hive partition detection is requested, a common prefix for + // all source uris must be required. The prefix must end immediately before + // the partition key encoding begins. For example, consider files following + // this data layout: + // + // gs://bucket/path_to_table/dt=2019-06-01/country=USA/id=7/file.avro + // + // gs://bucket/path_to_table/dt=2019-05-31/country=CA/id=3/file.avro + // + // When hive partitioning is requested with either AUTO or STRINGS detection, + // the common prefix can be either of gs://bucket/path_to_table or + // gs://bucket/path_to_table/. + // + // CUSTOM detection requires encoding the partitioning schema immediately + // after the common prefix. For CUSTOM, any of + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:STRING}/{country:STRING}/{id:INTEGER} + // + // * gs://bucket/path_to_table/{dt:DATE}/{country:STRING}/{id:STRING} + // + // would all be valid source URI prefixes. + string source_uri_prefix = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, queries over this table require a partition + // filter that can be used for partition elimination to be specified. + // + // Note that this field should only be true when creating a permanent + // external table or querying a temporary external table. + // + // Hive-partitioned loads with require_partition_filter explicitly set to + // true will fail. + google.protobuf.BoolValue require_partition_filter = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. For permanent external tables, this field is populated with + // the hive partition keys in the order they were inferred. The types of the + // partition keys can be deduced by checking the table schema (which will + // include the partition keys). Not every API will populate this field in the + // output. For example, Tables.Get will populate it, but Tables.List will not + // contain this field. + repeated string fields = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job.proto new file mode 100644 index 000000000..b15e1fb42 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job.proto @@ -0,0 +1,738 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/data_format_options.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/job_config.proto"; +import "google/cloud/bigquery/v2/job_creation_reason.proto"; +import "google/cloud/bigquery/v2/job_reference.proto"; +import "google/cloud/bigquery/v2/job_stats.proto"; +import "google/cloud/bigquery/v2/job_status.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Job Service. +// +// It should not be relied on for production use cases at this time. +service JobService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/devstorage.full_control," + "https://www.googleapis.com/auth/devstorage.read_only," + "https://www.googleapis.com/auth/devstorage.read_write"; + + // Requests that a job be cancelled. This call will return immediately, and + // the client will need to poll for the job status to see if the cancel + // completed successfully. Cancelled jobs may still incur costs. + rpc CancelJob(CancelJobRequest) returns (JobCancelResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/cancel" + }; + } + + // Returns information about a specific job. Job information is available for + // a six month period after creation. Requires that you're the person who ran + // the job, or have the Is Owner project role. + rpc GetJob(GetJobRequest) returns (Job) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}" + }; + } + + // Starts a new asynchronous job. + // + // This API has two different kinds of endpoint URIs, as this method supports + // a variety of use cases. + // + // * The *Metadata* URI is used for most interactions, as it accepts the job + // configuration directly. + // * The *Upload* URI is ONLY for the case when you're sending both a load job + // configuration and a data stream together. In this case, the Upload URI + // accepts the job configuration and the data as two distinct multipart MIME + // parts. + rpc InsertJob(InsertJobRequest) returns (Job) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/jobs" + body: "job" + }; + } + + // Requests the deletion of the metadata of a job. This call returns when the + // job's metadata is deleted. + rpc DeleteJob(DeleteJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/jobs/{job_id=*}/delete" + }; + } + + // Lists all jobs that you started in the specified project. Job information + // is available for a six month period after creation. The job list is sorted + // in reverse chronological order, by job creation time. Requires the Can View + // project role, or the Is Owner project role if you set the allUsers + // property. + rpc ListJobs(ListJobsRequest) returns (JobList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/jobs" + }; + } + + // RPC to get the results of a query job. + rpc GetQueryResults(GetQueryResultsRequest) + returns (GetQueryResultsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/queries/{job_id=*}" + }; + } + + // Runs a BigQuery SQL query synchronously and returns query results if the + // query completes within a specified timeout. + rpc Query(PostQueryRequest) returns (QueryResponse) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/queries" + body: "query_request" + }; + } +} + +message Job { + // Output only. The type of the resource. + string kind = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Opaque ID field of the job. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access the resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Email address of the user who ran the job. + string user_email = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 6 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Reference describing the unique-per-user name of the job. + JobReference job_reference = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The status of this job. Examine this value when polling an + // asynchronous job to see if the job is complete. + JobStatus status = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Full-projection-only] String representation of identity of + // requesting party. Populated for both first- and third-party identities. + // Only present for APIs that support third-party identities. + string principal_subject = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The reason why a Job was created. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Describes format of a jobs cancellation request. +message CancelJobRequest { + // Required. Project ID of the job to cancel + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job to cancel + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a jobs cancellation response. +message JobCancelResponse { + // The resource type of the response. + string kind = 1; + + // The final state of the job. + Job job = 2; +} + +// Describes format of a jobs get request. +message GetJobRequest { + // Required. Project ID of the requested job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the requested job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes format of a job insertion request. +message InsertJobRequest { + // Project ID of project that will be billed for the job. + string project_id = 1; + + // Jobs resource to insert. + Job job = 3; +} + +// Describes the format of a jobs deletion request. +message DeleteJobRequest { + // Required. Project ID of the job for which metadata is to be deleted. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the job for which metadata is to be deleted. If this is + // a parent job which has child jobs, the metadata from all child jobs will be + // deleted as well. Direct deletion of the metadata of child jobs is not + // allowed. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The geographic location of the job. Required. + // See details at: + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 3; +} + +// Describes the format of the list jobs request. +message ListJobsRequest { + // Projection is used to control what job information is returned. + enum Projection { + option allow_alias = true; + + // Does not include the job configuration + minimal = 0; + + // Does not include the job configuration + MINIMAL = 0; + + // Includes all job data + full = 1; + + // Includes all job data + FULL = 1; + } + + // StateFilter allows filtration by job execution state. + enum StateFilter { + option allow_alias = true; + + // Finished jobs + done = 0; + + // Finished jobs + DONE = 0; + + // Pending jobs + pending = 1; + + // Pending jobs + PENDING = 1; + + // Running jobs + running = 2; + + // Running jobs. + RUNNING = 2; + } + + // Project ID of the jobs to list. + string project_id = 1; + + // Whether to display jobs owned by all users in the project. Default False. + bool all_users = 2; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.Int32Value max_results = 3; + + // Min value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created after or at this timestamp are returned. + uint64 min_creation_time = 4; + + // Max value for job creation time, in milliseconds since the POSIX epoch. + // If set, only jobs created before or at this timestamp are returned. + google.protobuf.UInt64Value max_creation_time = 5; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 6; + + // Restrict information returned to a set of selected fields + Projection projection = 7; + + // Filter for job state + repeated StateFilter state_filter = 8; + + // If set, show only child jobs of the specified parent. Otherwise, show all + // top-level jobs. + string parent_job_id = 9; +} + +// ListFormatJob is a partial projection of job information returned as part +// of a jobs.list response. +message ListFormatJob { + // Unique opaque ID of the job. + string id = 1; + + // The resource type. + string kind = 2; + + // Unique opaque ID of the job. + JobReference job_reference = 3; + + // Running state of the job. When the state is DONE, errorResult can be + // checked to determine whether the job succeeded or failed. + string state = 4; + + // A result object that will be present only if the job has failed. + ErrorProto error_result = 5; + + // Output only. Information about the job, including starting time and ending + // time of the job. + JobStatistics statistics = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Describes the job configuration. + JobConfiguration configuration = 7 [(google.api.field_behavior) = REQUIRED]; + + // [Full-projection-only] Describes the status of this job. + JobStatus status = 8; + + // [Full-projection-only] Email address of the user who ran the job. + string user_email = 9; + + // [Full-projection-only] String representation of identity of requesting + // party. Populated for both first- and third-party identities. Only present + // for APIs that support third-party identities. + string principal_subject = 10; +} + +// JobList is the response format for a jobs.list call. +message JobList { + // A hash of this page of results. + string etag = 1; + + // The resource type of the response. + string kind = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // List of jobs that were requested. + repeated ListFormatJob jobs = 4; + + // A list of skipped locations that were unreachable. For more information + // about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations. Example: "europe-west5" + repeated string unreachable = 5; +} + +// Request object of GetQueryResults. +message GetQueryResultsRequest { + // Required. Project ID of the query job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job ID of the query job. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Zero-based index of the starting row. + google.protobuf.UInt64Value start_index = 3; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // Maximum number of results to read. + google.protobuf.UInt32Value max_results = 5; + + // Optional: Specifies the maximum amount of time, in milliseconds, that the + // client is willing to wait for the query to complete. By default, this limit + // is 10 seconds (10,000 milliseconds). If the query is complete, the + // jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6; + + // The geographic location of the job. You must specify the location to run + // the job for the following scenarios: + // + // * If the location to run a job is not in the `us` or + // the `eu` multi-regional location + // * If the job's location is in a single region (for example, + // `us-central1`) + // + // For more information, see + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 7; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 8 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response object of GetQueryResults. +message GetQueryResultsResponse { + // The resource type of the response. + string kind = 1; + + // A hash of this response. + string etag = 2; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 3; + + // Reference to the BigQuery Job that was created to run the query. This field + // will be present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + JobReference job_reference = 4; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. Present only + // when the query completes successfully. + google.protobuf.UInt64Value total_rows = 5; + + // A token used for paging results. When this token is non-empty, it + // indicates additional results are available. + string page_token = 6; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. Present only + // when the query completes successfully. + // + // The REST-based representation of this data leverages a series of + // JSON f,v objects for indicating fields and values. + repeated google.protobuf.Struct rows = 7; + + // The total number of bytes processed for this query. + google.protobuf.Int64Value total_bytes_processed = 8; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 9; + + // Output only. The first errors or warnings encountered during the running + // of the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 11; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for the query request. +message PostQueryRequest { + // Required. Project ID of the query request. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // The query request body. + QueryRequest query_request = 2; +} + +// Describes the format of the jobs.query request. +message QueryRequest { + // Job Creation Mode provides different options on job creation. + enum JobCreationMode { + // If unspecified JOB_CREATION_REQUIRED is the default. + JOB_CREATION_MODE_UNSPECIFIED = 0; + + // Default. Job creation is always required. + JOB_CREATION_REQUIRED = 1; + + // Job creation is optional. Returning immediate results is prioritized. + // BigQuery will automatically determine if a Job needs to be created. + // The conditions under which BigQuery can decide to not create a Job are + // subject to change. If Job creation is required, JOB_CREATION_REQUIRED + // mode should be used, which is the default. + JOB_CREATION_OPTIONAL = 2; + } + + // The resource type of the request. + string kind = 2; + + // Required. A query string to execute, using Google Standard SQL or legacy + // SQL syntax. Example: "SELECT COUNT(f1) FROM + // myProjectId.myDatasetId.myTableId". + string query = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The maximum number of rows of data to return per page of + // results. Setting this flag to a small value such as 1000 and then paging + // through results might improve reliability when the query result set is + // large. In addition to this limit, responses are also limited to 10 MB. By + // default, there is no maximum row count, and only the byte limit applies. + google.protobuf.UInt32Value max_results = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default datasetId and projectId to assume for any + // unqualified table names in the query. If not set, all table names in the + // query string must be qualified in the format 'datasetId.tableId'. + DatasetReference default_dataset = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Optional: Specifies the maximum amount of time, in milliseconds, + // that the client is willing to wait for the query to complete. By default, + // this limit is 10 seconds (10,000 milliseconds). If the query is complete, + // the jobComplete field in the response is true. If the query has not yet + // completed, jobComplete is false. + // + // You can request a longer timeout period in the timeoutMs field. However, + // the call is not guaranteed to wait for the specified timeout; it typically + // returns after around 200 seconds (200,000 milliseconds), even if the query + // is not complete. + // + // If jobComplete is false, you can continue to wait for the query to complete + // by calling the getQueryResults method until the jobComplete field in the + // getQueryResults response is true. + google.protobuf.UInt32Value timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, BigQuery doesn't run the job. Instead, if the + // query is valid, BigQuery returns statistics about the job such as how many + // bytes would be processed. If the query is invalid, an error returns. The + // default value is false. + bool dry_run = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. The default value is true. + google.protobuf.BoolValue use_query_cache = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Specifies whether to use BigQuery's legacy SQL dialect for this query. The + // default value is true. If set to false, the query will use BigQuery's + // GoogleSQL: https://cloud.google.com/bigquery/sql-reference/ When + // useLegacySql is set to false, the value of flattenResults is ignored; query + // will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 10; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 11; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 12; + + // The geographic location where the job should run. See details at + // https://cloud.google.com/bigquery/docs/locations#specifying_your_location. + string location = 13; + + // Optional. Output format adjustments. + DataFormatOptions format_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 16 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this query. + // Labels can be used to organize and group query jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label keys must start with a letter + // and each label in the list must have a different key. + map labels = 17 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Limits the bytes billed for this query. Queries with + // bytes billed above this limit will fail (without incurring a charge). + // If unspecified, the project default is used. + google.protobuf.Int64Value maximum_bytes_billed = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique user provided identifier to ensure idempotent behavior + // for queries. Note that this is different from the job_id. It has the + // following properties: + // + // 1. It is case-sensitive, limited to up to 36 ASCII characters. A UUID is + // recommended. + // + // 2. Read only queries can ignore this token since they are nullipotent by + // definition. + // + // 3. For the purposes of idempotency ensured by the request_id, a request + // is considered duplicate of another only if they have the same request_id + // and are actually duplicates. When determining whether a request is a + // duplicate of another request, all parameters in the request that + // may affect the result are considered. For example, query, + // connection_properties, query_parameters, use_legacy_sql are parameters + // that affect the result and are considered when determining whether a + // request is a duplicate, but properties like timeout_ms don't + // affect the result and are thus not considered. Dry run query + // requests are never considered duplicate of another request. + // + // 4. When a duplicate mutating query request is detected, it returns: + // a. the results of the mutation if it completes successfully within + // the timeout. + // b. the running operation if it is still in progress at the end of the + // timeout. + // + // 5. Its lifetime is limited to 15 minutes. In other words, if two + // requests are sent with the same request_id, but more than 15 minutes + // apart, idempotency is not guaranteed. + string request_id = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, creates a new session using a randomly generated + // session_id. If false, runs query with an existing session_id passed in + // ConnectionProperty, otherwise runs query in non-session mode. + // + // The session location will be set to QueryRequest.location if it is present, + // otherwise it's set to the default location based on existing routing logic. + google.protobuf.BoolValue create_session = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, jobs are always required. + // + // If set, the query request will follow the behavior described + // JobCreationMode. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationMode job_creation_mode = 22 + [(google.api.field_behavior) = OPTIONAL]; +} + +message QueryResponse { + // The resource type. + string kind = 1; + + // The schema of the results. Present only when the query completes + // successfully. + TableSchema schema = 2; + + // Reference to the Job that was created to run the query. This field will be + // present even if the original request timed out, in which case + // GetQueryResults can be used to read the results once the query has + // completed. Since this API only returns the first page of results, + // subsequent pages can be fetched via the same mechanism (GetQueryResults). + // + // If job_creation_mode was set to `JOB_CREATION_OPTIONAL` and the query + // completes without creating a job, this field will be empty. + JobReference job_reference = 3; + + // Optional. The reason why a Job was created. + // + // Only relevant when a job_reference is present in the response. + // If job_reference is not present it will always be unset. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + JobCreationReason job_creation_reason = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Auto-generated ID for the query. + // [Preview](https://cloud.google.com/products/#product-launch-stages) + string query_id = 14; + + // The total number of rows in the complete query result set, which can be + // more than the number of rows in this single page of results. + google.protobuf.UInt64Value total_rows = 4; + + // A token used for paging results. A non-empty token indicates that + // additional results are available. To see additional results, + // query the + // [`jobs.getQueryResults`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults) + // method. For more information, see [Paging through table + // data](https://cloud.google.com/bigquery/docs/paging-results). + string page_token = 5; + + // An object with as many results as can be contained within the maximum + // permitted reply size. To get any additional rows, you can call + // GetQueryResults and specify the jobReference returned above. + repeated google.protobuf.Struct rows = 6; + + // The total number of bytes processed for this query. If this query was a dry + // run, this is the number of bytes that would be processed if the query were + // run. + google.protobuf.Int64Value total_bytes_processed = 7; + + // Whether the query has completed or not. If rows or totalRows are present, + // this will always be true. If this is false, totalRows will not be + // available. + google.protobuf.BoolValue job_complete = 8; + + // Output only. The first errors or warnings encountered during the running of + // the job. The final message includes the number of errors that caused the + // process to stop. Errors here do not necessarily mean that the job has + // completed or was unsuccessful. For more information about error messages, + // see [Error + // messages](https://cloud.google.com/bigquery/docs/error-messages). + repeated ErrorProto errors = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 10; + + // Output only. The number of rows affected by a DML statement. Present only + // for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job_config.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job_config.proto new file mode 100644 index 000000000..9f42488c8 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job_config.proto @@ -0,0 +1,814 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/decimal_target_types.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/file_set_specification_type.proto"; +import "google/cloud/bigquery/v2/hive_partitioning.proto"; +import "google/cloud/bigquery/v2/json_extension.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/system_variable.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Properties for the destination table. +message DestinationTableProperties { + // Optional. Friendly name for the destination table. If the table already + // exists, it should be same as the existing friendly name. + google.protobuf.StringValue friendly_name = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The description for the destination table. + // This will only be used if the destination table is newly created. + // If the table already exists and a value different than the current + // description is provided, the job will fail. + google.protobuf.StringValue description = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The labels associated with this table. You can use these to + // organize and group your tables. This will only be used if the destination + // table is newly created. If the table already exists and labels are + // different than the current labels are provided, the job will fail. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// A connection-level property to customize query behavior. Under JDBC, these +// correspond directly to connection properties passed to the DriverManager. +// Under ODBC, these correspond to properties in the connection string. +// +// Currently supported connection properties: +// +// * **dataset_project_id**: represents the default project for datasets that +// are used in the query. Setting the +// system variable `@@dataset_project_id` achieves the same behavior. For +// more information about system variables, see: +// https://cloud.google.com/bigquery/docs/reference/system-variables +// +// * **time_zone**: represents the default timezone used to run the query. +// +// * **session_id**: associates the query with a given session. +// +// * **query_label**: associates the query with a given job label. If set, +// all subsequent queries in a script or session will have this label. For the +// format in which a you can specify a query label, see labels +// in the JobConfiguration resource type: +// https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration +// +// * **service_account**: indicates the service account to use to run a +// continuous query. If set, the query job uses the service account to access +// Google Cloud resources. Service account access is bounded by the IAM +// permissions that you have granted to the service account. +// +// Additional properties are allowed, but ignored. Specifying multiple +// connection properties with the same key returns an error. +message ConnectionProperty { + // The key of the property to set. + string key = 1; + + // The value of the property to set. + string value = 2; +} + +// JobConfigurationQuery configures a BigQuery query job. +message JobConfigurationQuery { + // [Required] SQL query text to execute. The useLegacySql field can be used + // to indicate whether the query uses legacy SQL or GoogleSQL. + string query = 1; + + // Optional. Describes the table where the query results should be stored. + // This property must be set for large results that exceed the maximum + // response size. For queries that produce anonymous (cached) results, this + // field will be populated by BigQuery. + TableReference destination_table = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. You can specify external table definitions, which operate as + // ephemeral tables that can be queried. These definitions are configured + // using a JSON map, where the string key represents the table identifier, and + // the value is the corresponding external data configuration object. + map external_table_definitions = 23 + [(google.api.field_behavior) = OPTIONAL]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 4; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints, and uses the schema from the query result. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the default dataset to use for unqualified + // table names in the query. This setting does not alter behavior of + // unqualified dataset names. Setting the system variable + // `@@dataset_id` achieves the same behavior. See + // https://cloud.google.com/bigquery/docs/reference/system-variables for more + // information on system variables. + DatasetReference default_dataset = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a priority for the query. Possible values include + // INTERACTIVE and BATCH. The default value is INTERACTIVE. + string priority = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, allows the query + // to produce arbitrarily large result tables at a slight cost in performance. + // Requires destinationTable to be set. + // For GoogleSQL queries, this flag is ignored and large results are + // always allowed. However, you must still set destinationTable when result + // size exceeds the allowed maximum response size. + google.protobuf.BoolValue allow_large_results = 10 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Whether to look for the result in the query cache. The query + // cache is a best-effort cache that will be flushed whenever tables in the + // query are modified. Moreover, the query cache is only available when a + // query does not have a destination table specified. The default value is + // true. + google.protobuf.BoolValue use_query_cache = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true and query uses legacy SQL dialect, flattens all nested + // and repeated fields in the query results. + // allowLargeResults must be true if this is set to false. + // For GoogleSQL queries, this flag is ignored and results are never + // flattened. + google.protobuf.BoolValue flatten_results = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Limits the bytes billed for this job. Queries that will have + // bytes billed beyond this limit will fail (without incurring a charge). + // If unspecified, this will be set to your project default. + google.protobuf.Int64Value maximum_bytes_billed = 14; + + // Optional. Specifies whether to use BigQuery's legacy SQL dialect for this + // query. The default value is true. If set to false, the query will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // When useLegacySql is set to false, the value of flattenResults is ignored; + // query will be run as if flattenResults is false. + google.protobuf.BoolValue use_legacy_sql = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // GoogleSQL only. Set to POSITIONAL to use positional (?) query parameters + // or to NAMED to use named (@myparam) query parameters in this query. + string parameter_mode = 16; + + // Query parameters for GoogleSQL queries. + repeated QueryParameter query_parameters = 17; + + // Output only. System variables for GoogleSQL queries. A system variable is + // output if the variable is settable and its value differs from the system + // default. + // "@@" prefix is not included in the name of the System variables. + optional SystemVariables system_variables = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Allows the schema of the destination table to be updated as a side effect + // of the query job. Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 18; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 19; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 22; + + // Clustering specification for the destination table. + Clustering clustering = 20; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 21; + + // Options controlling the execution of scripts. + ScriptOptions script_options = 24; + + // Connection properties which can modify the query behavior. + repeated ConnectionProperty connection_properties = 33; + + // If this property is true, the job creates a new session using a randomly + // generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 34; + + // Optional. Whether to run the query as continuous or a regular query. + // Continuous query is currently in experimental stage and not ready for + // general usage. + google.protobuf.BoolValue continuous = 36 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options related to script execution. +message ScriptOptions { + // KeyResultStatementKind controls how the key result is determined. + enum KeyResultStatementKind { + // Default value. + KEY_RESULT_STATEMENT_KIND_UNSPECIFIED = 0; + + // The last result determines the key result. + LAST = 1; + + // The first SELECT statement determines the key result. + FIRST_SELECT = 2; + } + + // Timeout period for each statement in a script. + google.protobuf.Int64Value statement_timeout_ms = 1; + + // Limit on the number of bytes billed per statement. Exceeding this budget + // results in an error. + google.protobuf.Int64Value statement_byte_budget = 2; + + // Determines which statement in the script represents the "key result", + // used to populate the schema and query results of the script job. + // Default is LAST. + KeyResultStatementKind key_result_statement = 4; +} + +// JobConfigurationLoad contains the configuration properties for loading data +// into a destination table. +message JobConfigurationLoad { + // Indicates the character map used for column names. + enum ColumnNameCharacterMap { + // Unspecified column name character map. + COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED = 0; + + // Support flexible column name and reject invalid column names. + STRICT = 1; + + // Support alphanumeric + underscore characters and names must start with a + // letter or underscore. Invalid column names will be normalized. + V1 = 2; + + // Support flexible column name. Invalid column names will be normalized. + V2 = 3; + } + + // [Required] The fully-qualified URIs that point to your data in Google + // Cloud. + // For Google Cloud Storage URIs: + // Each URI can contain one '*' wildcard character and it must come after + // the 'bucket' name. Size limits related to load jobs apply to external + // data sources. + // For Google Cloud Bigtable URIs: + // Exactly one URI can be specified and it has be a fully specified and + // valid HTTPS URL for a Google Cloud Bigtable table. + // For Google Cloud Datastore backups: + // Exactly one URI can be specified. Also, the '*' wildcard character is not + // allowed. + repeated string source_uris = 1; + + // Optional. Specifies how source URIs are interpreted for constructing the + // file set to load. By default, source URIs are expanded against the + // underlying storage. You can also specify manifest files to control how the + // file set is constructed. This option is only applicable to object storage + // systems. + FileSetSpecType file_set_spec_type = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The schema for the destination table. The schema can be + // omitted if the destination table already exists, or if you're loading data + // from Google Cloud Datastore. + TableSchema schema = 2 [(google.api.field_behavior) = OPTIONAL]; + + // [Required] The destination table to load the data into. + TableReference destination_table = 3; + + // Optional. [Experimental] Properties with which to create the destination + // table if it is new. + DestinationTableProperties destination_table_properties = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // data, removes the constraints and uses the schema from the load job. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_APPEND. + // Each action is atomic and only occurs if BigQuery is able to complete the + // job successfully. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string write_disposition = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies a string that represents a null value in a CSV file. + // For example, if you specify "\N", BigQuery interprets "\N" as a null value + // when loading a CSV file. + // The default value is the empty string. If you set this property to a custom + // value, BigQuery throws an error if an empty string is present for all data + // types except for STRING and BYTE. For STRING and BYTE columns, BigQuery + // interprets the empty string as an empty value. + google.protobuf.StringValue null_marker = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The separator character for fields in a CSV file. The separator + // is interpreted as a single byte. For files encoded in ISO-8859-1, any + // single character can be used as a separator. For files encoded in UTF-8, + // characters represented in decimal range 1-127 (U+0001-U+007F) can be used + // without any modification. UTF-8 characters encoded with multiple bytes + // (i.e. U+0080 and above) will have only the first byte used for separating + // fields. The remaining bytes will be treated as a part of the field. + // BigQuery also supports the escape sequence "\t" (U+0009) to specify a tab + // separator. The default value is comma (",", U+002C). + string field_delimiter = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of rows at the top of a CSV file that BigQuery will + // skip when loading the data. The default value is 0. This property is useful + // if you have header rows in the file that should be skipped. When autodetect + // is on, the behavior is the following: + // + // * skipLeadingRows unspecified - Autodetect tries to detect headers in the + // first row. If they are not detected, the row is read as data. Otherwise + // data is read starting from the second row. + // * skipLeadingRows is 0 - Instructs autodetect that there are no headers and + // data should be read starting from the first row. + // * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect + // headers in row N. If headers are not detected, row N is just skipped. + // Otherwise row N is used to extract column names for the detected schema. + google.protobuf.Int32Value skip_leading_rows = 9 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The character encoding of the data. + // The supported values are UTF-8, ISO-8859-1, UTF-16BE, UTF-16LE, UTF-32BE, + // and UTF-32LE. The default value is UTF-8. BigQuery decodes the data after + // the raw, binary data has been split using the values of the `quote` and + // `fieldDelimiter` properties. + // + // If you don't specify an encoding, or if you specify a UTF-8 encoding when + // the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to + // UTF-8. Generally, your data loads successfully, but it may not match + // byte-for-byte what you expect. To avoid this, specify the correct encoding + // by using the `--encoding` flag. + // + // If BigQuery can't convert a character other than the ASCII `0` character, + // BigQuery converts the character to the standard Unicode replacement + // character: �. + string encoding = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The value that is used to quote data sections in a CSV file. + // BigQuery converts the string to ISO-8859-1 encoding, and then uses the + // first byte of the encoded string to split the data in its raw, binary + // state. + // The default value is a double-quote ('"'). + // If your data does not contain quoted sections, set the property value to an + // empty string. + // If your data contains quoted newline characters, you must also set the + // allowQuotedNewlines property to true. + // To include the specific quote character within a quoted value, precede it + // with an additional matching quote character. For example, if you want to + // escape the default character ' " ', use ' "" '. + // @default " + google.protobuf.StringValue quote = 11 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of bad records that BigQuery can ignore when + // running the job. If the number of bad records exceeds this value, an + // invalid error is returned in the job result. + // The default value is 0, which requires that all records are valid. + // This is only supported for CSV and NEWLINE_DELIMITED_JSON file formats. + google.protobuf.Int32Value max_bad_records = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Indicates if BigQuery should allow quoted data sections that contain + // newline characters in a CSV file. The default value is false. + google.protobuf.BoolValue allow_quoted_newlines = 15; + + // Optional. The format of the data files. + // For CSV files, specify "CSV". For datastore backups, + // specify "DATASTORE_BACKUP". For newline-delimited JSON, + // specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". + // For parquet, specify "PARQUET". For orc, specify "ORC". + // The default value is CSV. + string source_format = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Accept rows that are missing trailing optional columns. + // The missing values are treated as nulls. + // If false, records with missing trailing columns are treated as bad records, + // and if there are too many bad records, an invalid error is returned in the + // job result. + // The default value is false. + // Only applicable to CSV, ignored for other formats. + google.protobuf.BoolValue allow_jagged_rows = 17 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Indicates if BigQuery should allow extra values that are not + // represented in the table schema. + // If true, the extra values are ignored. + // If false, records with extra columns are treated as bad records, and if + // there are too many bad records, an invalid error is returned in the job + // result. The default value is false. + // The sourceFormat property determines what BigQuery treats as an extra + // value: + // CSV: Trailing columns + // JSON: Named values that don't match any column names in the table schema + // Avro, Parquet, ORC: Fields in the file schema that don't exist in the + // table schema. + google.protobuf.BoolValue ignore_unknown_values = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity + // properties to load into BigQuery from a Cloud Datastore backup. Property + // names are case sensitive and must be top-level properties. If no properties + // are specified, BigQuery loads all properties. If any named property isn't + // found in the Cloud Datastore backup, an invalid error is returned in the + // job result. + repeated string projection_fields = 19; + + // Optional. Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + google.protobuf.BoolValue autodetect = 20 + [(google.api.field_behavior) = OPTIONAL]; + + // Allows the schema of the destination table to be updated as a side effect + // of the load job if a schema is autodetected or supplied in the job + // configuration. + // Schema update options are supported in two cases: + // when writeDisposition is WRITE_APPEND; + // when writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal tables, + // WRITE_TRUNCATE will always overwrite the schema. + // One or more of the following values are specified: + // + // * ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + // * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original + // schema to nullable. + repeated string schema_update_options = 21; + + // Time-based partitioning specification for the destination table. Only one + // of timePartitioning and rangePartitioning should be specified. + TimePartitioning time_partitioning = 22; + + // Range partitioning specification for the destination table. + // Only one of timePartitioning and rangePartitioning should be specified. + RangePartitioning range_partitioning = 26; + + // Clustering specification for the destination table. + Clustering clustering = 23; + + // Custom encryption configuration (e.g., Cloud KMS keys) + EncryptionConfiguration destination_encryption_configuration = 24; + + // Optional. If sourceFormat is set to "AVRO", indicates whether to interpret + // logical types as the corresponding BigQuery data type (for example, + // TIMESTAMP), instead of using the raw type (for example, INTEGER). + google.protobuf.BoolValue use_avro_logical_types = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The user can provide a reference file with the reader schema. + // This file is only loaded if it is part of source URIs, but is not loaded + // otherwise. It is enabled for the following formats: AVRO, PARQUET, ORC. + google.protobuf.StringValue reference_file_schema_uri = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When set, configures hive partitioning support. + // Not all storage formats support hive partitioning -- requesting hive + // partitioning on an unsupported format will lead to an error, as will + // providing an invalid specification. + HivePartitioningOptions hive_partitioning_options = 37 + [(google.api.field_behavior) = OPTIONAL]; + + // Defines the list of possible SQL data types to which the source decimal + // values are converted. This list and the precision and the scale parameters + // of the decimal field determine the target type. In the order of NUMERIC, + // BIGNUMERIC, and STRING, a + // type is picked if it is in the specified list and if it supports the + // precision and the scale. STRING supports all precision and scale values. + // If none of the listed types supports the precision and the scale, the type + // supporting the widest range in the specified list is picked, and if a value + // exceeds the supported range when reading the data, an error will be thrown. + // + // Example: Suppose the value of this field is ["NUMERIC", "BIGNUMERIC"]. + // If (precision,scale) is: + // + // * (38,9) -> NUMERIC; + // * (39,9) -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); + // * (38,10) -> BIGNUMERIC (NUMERIC cannot hold 10 fractional digits); + // * (76,38) -> BIGNUMERIC; + // * (77,38) -> BIGNUMERIC (error if value exeeds supported range). + // + // This field cannot contain duplicate types. The order of the types in this + // field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as + // ["NUMERIC", "BIGNUMERIC"] and NUMERIC always takes precedence over + // BIGNUMERIC. + // + // Defaults to ["NUMERIC", "STRING"] for ORC and ["NUMERIC"] for the other + // file formats. + repeated DecimalTargetType decimal_target_types = 39; + + // Optional. Load option to be used together with source_format + // newline-delimited JSON to indicate that a variant of JSON is being loaded. + // To load newline-delimited GeoJSON, specify GEOJSON (and source_format must + // be set to NEWLINE_DELIMITED_JSON). + JsonExtension json_extension = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Additional properties to set if sourceFormat is set to PARQUET. + ParquetOptions parquet_options = 42 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When sourceFormat is set to "CSV", this indicates whether the + // embedded ASCII control characters (the first 32 characters in the + // ASCII-table, from + // '\x00' to '\x1F') are preserved. + google.protobuf.BoolValue preserve_ascii_control_characters = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Connection properties which can modify the load job behavior. + // Currently, only the 'session_id' connection property is supported, and is + // used to resolve _SESSION appearing as the dataset id. + repeated ConnectionProperty connection_properties = 46 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If this property is true, the job creates a new session using a + // randomly generated session_id. To continue using a created session with + // subsequent queries, pass the existing session identifier as a + // `ConnectionProperty` value. The session identifier is returned as part of + // the `SessionInfo` message within the query statistics. + // + // The new session's location will be set to `Job.JobReference.location` if it + // is present, otherwise it's set to the default location based on existing + // routing logic. + google.protobuf.BoolValue create_session = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Character map supported for column names in CSV/Parquet loads. + // Defaults to STRICT and can be overridden by Project Config Service. Using + // this option with unsupporting load formats will result in an error. + ColumnNameCharacterMap column_name_character_map = 50 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. [Experimental] Configures the load job to copy files directly to + // the destination BigLake managed table, bypassing file content reading and + // rewriting. + // + // Copying files only is supported when all the following are true: + // + // * `source_uris` are located in the same Cloud Storage location as the + // destination table's `storage_uri` location. + // * `source_format` is `PARQUET`. + // * `destination_table` is an existing BigLake managed table. The table's + // schema does not have flexible column names. The table's columns do not + // have type parameters other than precision and scale. + // * No options other than the above are specified. + google.protobuf.BoolValue copy_files_only = 51 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationTableCopy configures a job that copies data from one table +// to another. +// For more information on copying tables, see [Copy a +// table](https://cloud.google.com/bigquery/docs/managing-tables#copy-table). +message JobConfigurationTableCopy { + // Indicates different operation types supported in table copy job. + enum OperationType { + // Unspecified operation type. + OPERATION_TYPE_UNSPECIFIED = 0; + + // The source and destination table have the same table type. + COPY = 1; + + // The source table type is TABLE and + // the destination table type is SNAPSHOT. + SNAPSHOT = 2; + + // The source table type is SNAPSHOT and + // the destination table type is TABLE. + RESTORE = 3; + + // The source and destination table have the same table type, + // but only bill for unique data. + CLONE = 4; + } + + // [Pick one] Source table to copy. + TableReference source_table = 1; + + // [Pick one] Source tables to copy. + repeated TableReference source_tables = 2; + + // [Required] The destination table. + TableReference destination_table = 3; + + // Optional. Specifies whether the job is allowed to create new tables. + // The following values are supported: + // + // * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. + // * CREATE_NEVER: The table must already exist. If it does not, + // a 'notFound' error is returned in the job result. + // + // The default value is CREATE_IF_NEEDED. + // Creation, truncation and append actions occur as one atomic update + // upon job completion. + string create_disposition = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the action that occurs if the destination table + // already exists. The following values are supported: + // + // * WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data and uses the schema and table constraints from the source table. + // * WRITE_APPEND: If the table already exists, BigQuery appends the data to + // the table. + // * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' + // error is returned in the job result. + // + // The default value is WRITE_EMPTY. Each action is atomic and only occurs if + // BigQuery is able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + string write_disposition = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration destination_encryption_configuration = 6; + + // Optional. Supported operation types in table copy job. + OperationType operation_type = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The time when the destination table expires. Expired tables will + // be deleted and their storage reclaimed. + google.protobuf.Timestamp destination_expiration_time = 9 + [(google.api.field_behavior) = OPTIONAL]; +} + +// JobConfigurationExtract configures a job that exports data from a BigQuery +// table into Google Cloud Storage. +message JobConfigurationExtract { + // Options related to model extraction. + message ModelExtractOptions { + // The 1-based ID of the trial to be exported from a hyperparameter tuning + // model. If not specified, the trial with id = + // [Model](https://cloud.google.com/bigquery/docs/reference/rest/v2/models#resource:-model).defaultTrialId + // is exported. This field is ignored for models not trained with + // hyperparameter tuning. + google.protobuf.Int64Value trial_id = 1; + } + + // Required. Source reference for the export. + oneof source { + // A reference to the table being exported. + TableReference source_table = 1; + + // A reference to the model being exported. + ModelReference source_model = 9; + } + + // [Pick one] A list of fully-qualified Google Cloud Storage URIs where the + // extracted table should be written. + repeated string destination_uris = 3; + + // Optional. Whether to print out a header row in the results. + // Default is true. Not applicable when extracting models. + google.protobuf.BoolValue print_header = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. When extracting data in CSV format, this defines the + // delimiter to use between fields in the exported data. + // Default is ','. Not applicable when extracting models. + string field_delimiter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The exported file format. Possible values include CSV, + // NEWLINE_DELIMITED_JSON, PARQUET, or AVRO for tables and ML_TF_SAVED_MODEL + // or ML_XGBOOST_BOOSTER for models. The default value for tables is CSV. + // Tables with nested or repeated fields cannot be exported as CSV. The + // default value for models is ML_TF_SAVED_MODEL. + string destination_format = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The compression type to use for exported files. Possible values + // include DEFLATE, GZIP, NONE, SNAPPY, and ZSTD. The default value is NONE. + // Not all compression formats are support for all file formats. DEFLATE is + // only supported for Avro. ZSTD is only supported for Parquet. Not applicable + // when extracting models. + string compression = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Whether to use logical types when extracting to AVRO format. Not applicable + // when extracting models. + google.protobuf.BoolValue use_avro_logical_types = 13; + + // Optional. Model extract options only applicable when extracting models. + ModelExtractOptions model_extract_options = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +message JobConfiguration { + // Output only. The type of the job. Can be QUERY, LOAD, EXTRACT, COPY or + // UNKNOWN. + string job_type = 8; + + // [Pick one] Configures a query job. + JobConfigurationQuery query = 1; + + // [Pick one] Configures a load job. + JobConfigurationLoad load = 2; + + // [Pick one] Copies a table. + JobConfigurationTableCopy copy = 3; + + // [Pick one] Configures an extract job. + JobConfigurationExtract extract = 4; + + // Optional. If set, don't actually run this job. A valid query will return + // a mostly empty response with some processing statistics, while an invalid + // query will return the same error it would if it wasn't a dry run. Behavior + // of non-query jobs is undefined. + google.protobuf.BoolValue dry_run = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Job timeout in milliseconds. If this time limit is exceeded, + // BigQuery will attempt to stop a longer job, but may not always succeed in + // canceling it before the job completes. For example, a job that takes more + // than 60 seconds to complete has a better chance of being stopped than a job + // that takes 10 seconds to complete. + google.protobuf.Int64Value job_timeout_ms = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this job. You can use these to organize and + // group your jobs. + // Label keys and values can be no longer than 63 characters, can only contain + // lowercase letters, numeric characters, underscores and dashes. + // International characters are allowed. Label values are optional. Label + // keys must start with a letter and each label in the list must have a + // different key. + map labels = 7; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job_creation_reason.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job_creation_reason.proto new file mode 100644 index 000000000..0cede032b --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job_creation_reason.proto @@ -0,0 +1,60 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JobCreationReasonProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Reason about why a Job was created from a +// [`jobs.query`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query) +// method when used with `JOB_CREATION_OPTIONAL` Job creation mode. +// +// For +// [`jobs.insert`](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert) +// method calls it will always be `REQUESTED`. +// +// [Preview](https://cloud.google.com/products/#product-launch-stages) +message JobCreationReason { + // Indicates the high level reason why a job was created. + enum Code { + // Reason is not specified. + CODE_UNSPECIFIED = 0; + + // Job creation was requested. + REQUESTED = 1; + + // The query request ran beyond a system defined timeout specified by the + // [timeoutMs field in the + // QueryRequest](https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#queryrequest). + // As a result it was considered a long running operation for which a job + // was created. + LONG_RUNNING = 2; + + // The results from the query cannot fit in the response. + LARGE_RESULTS = 3; + + // BigQuery has determined that the query needs to be executed as a Job. + OTHER = 4; + } + + // Output only. Specifies the high level reason why a Job was created. + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job_reference.proto new file mode 100644 index 000000000..d7f3ece6f --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job_reference.proto @@ -0,0 +1,45 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// A job reference is a fully qualified identifier for referring to a job. +message JobReference { + // Required. The ID of the project containing this job. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the job. The ID must contain only letters (a-z, A-Z), + // numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 + // characters. + string job_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The geographic location of the job. The default value is US. + // + // For more information about BigQuery locations, see: + // https://cloud.google.com/bigquery/docs/locations + google.protobuf.StringValue location = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated string location_alternative = 5; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job_stats.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job_stats.proto new file mode 100644 index 000000000..877e00392 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job_stats.proto @@ -0,0 +1,1439 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/v2/dataset_reference.proto"; +import "google/cloud/bigquery/v2/model.proto"; +import "google/cloud/bigquery/v2/query_parameter.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/cloud/bigquery/v2/session_info.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatsProto"; +option java_package = "com.google.cloud.bigquery.v2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; +option (google.api.resource_definition) = { + type: "storage.googleapis.com/Bucket" + pattern: "*" +}; + +// An operation within a stage. +message ExplainQueryStep { + // Machine-readable operation type. + string kind = 1; + + // Human-readable description of the step(s). + repeated string substeps = 2; +} + +// A single stage of query execution. +message ExplainQueryStage { + // Indicates the type of compute mode. + enum ComputeMode { + // ComputeMode type not specified. + COMPUTE_MODE_UNSPECIFIED = 0; + + // This stage was processed using BigQuery slots. + BIGQUERY = 1; + + // This stage was processed using BI Engine compute. + BI_ENGINE = 2; + } + + // Human-readable name for the stage. + string name = 1; + + // Unique ID for the stage within the plan. + google.protobuf.Int64Value id = 2; + + // Stage start time represented as milliseconds since the epoch. + int64 start_ms = 3; + + // Stage end time represented as milliseconds since the epoch. + int64 end_ms = 4; + + // IDs for stages that are inputs to this stage. + repeated int64 input_stages = 5; + + // Relative amount of time the average shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_avg = 6; + + // Milliseconds the average shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_avg = 7; + + // Relative amount of time the slowest shard spent waiting to be + // scheduled. + google.protobuf.DoubleValue wait_ratio_max = 8; + + // Milliseconds the slowest shard spent waiting to be scheduled. + google.protobuf.Int64Value wait_ms_max = 9; + + // Relative amount of time the average shard spent reading input. + google.protobuf.DoubleValue read_ratio_avg = 10; + + // Milliseconds the average shard spent reading input. + google.protobuf.Int64Value read_ms_avg = 11; + + // Relative amount of time the slowest shard spent reading input. + google.protobuf.DoubleValue read_ratio_max = 12; + + // Milliseconds the slowest shard spent reading input. + google.protobuf.Int64Value read_ms_max = 13; + + // Relative amount of time the average shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_avg = 14; + + // Milliseconds the average shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_avg = 15; + + // Relative amount of time the slowest shard spent on CPU-bound tasks. + google.protobuf.DoubleValue compute_ratio_max = 16; + + // Milliseconds the slowest shard spent on CPU-bound tasks. + google.protobuf.Int64Value compute_ms_max = 17; + + // Relative amount of time the average shard spent on writing output. + google.protobuf.DoubleValue write_ratio_avg = 18; + + // Milliseconds the average shard spent on writing output. + google.protobuf.Int64Value write_ms_avg = 19; + + // Relative amount of time the slowest shard spent on writing output. + google.protobuf.DoubleValue write_ratio_max = 20; + + // Milliseconds the slowest shard spent on writing output. + google.protobuf.Int64Value write_ms_max = 21; + + // Total number of bytes written to shuffle. + google.protobuf.Int64Value shuffle_output_bytes = 22; + + // Total number of bytes written to shuffle and spilled to disk. + google.protobuf.Int64Value shuffle_output_bytes_spilled = 23; + + // Number of records read into the stage. + google.protobuf.Int64Value records_read = 24; + + // Number of records written by the stage. + google.protobuf.Int64Value records_written = 25; + + // Number of parallel input segments to be processed + google.protobuf.Int64Value parallel_inputs = 26; + + // Number of parallel input segments completed. + google.protobuf.Int64Value completed_parallel_inputs = 27; + + // Current status for this stage. + string status = 28; + + // List of operations within the stage in dependency order (approximately + // chronological). + repeated ExplainQueryStep steps = 29; + + // Slot-milliseconds used by the stage. + google.protobuf.Int64Value slot_ms = 30; + + // Output only. Compute mode for this stage. + ComputeMode compute_mode = 31 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Summary of the state of query execution at a given time. +message QueryTimelineSample { + // Milliseconds elapsed since the start of query execution. + google.protobuf.Int64Value elapsed_ms = 1; + + // Cumulative slot-ms consumed by the query. + google.protobuf.Int64Value total_slot_ms = 2; + + // Total units of work remaining for the query. This number can be revised + // (increased or decreased) while the query is running. + google.protobuf.Int64Value pending_units = 3; + + // Total parallel units of work completed by this query. + google.protobuf.Int64Value completed_units = 4; + + // Total number of active workers. This does not correspond directly to + // slot usage. This is the largest value observed since the last sample. + google.protobuf.Int64Value active_units = 5; + + // Units of work that can be scheduled immediately. Providing additional slots + // for these units of work will accelerate the query, if no other query in + // the reservation needs additional slots. + google.protobuf.Int64Value estimated_runnable_units = 7; +} + +// The external service cost is a portion of the total cost, these costs are not +// additive with total_bytes_billed. Moreover, this field only track external +// service costs that will show up as BigQuery costs (e.g. training BigQuery +// ML job with google cloud CAIP or Automl Tables services), not other costs +// which may be accrued by running the query (e.g. reading from Bigtable or +// Cloud Storage). The external service costs with different billing sku (e.g. +// CAIP job is charged based on VM usage) are converted to BigQuery +// billed_bytes and slot_ms with equivalent amount of US dollars. Services may +// not directly correlate to these metrics, but these are the equivalents for +// billing purposes. +// Output only. +message ExternalServiceCost { + // External service name. + string external_service = 1; + + // External service cost in terms of bigquery bytes processed. + google.protobuf.Int64Value bytes_processed = 2; + + // External service cost in terms of bigquery bytes billed. + google.protobuf.Int64Value bytes_billed = 3; + + // External service cost in terms of bigquery slot milliseconds. + google.protobuf.Int64Value slot_ms = 4; + + // Non-preemptable reserved slots used for external job. + // For example, reserved slots for Cloua AI Platform job are the VM usages + // converted to BigQuery slot with equivalent mount of price. + int64 reserved_slot_count = 5; +} + +// Statistics for the EXPORT DATA statement as part of Query Job. EXTRACT +// JOB statistics are populated in JobStatistics4. +message ExportDataStatistics { + // Number of destination files generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value file_count = 1; + + // [Alpha] Number of destination rows generated in case of EXPORT DATA + // statement only. + google.protobuf.Int64Value row_count = 2; +} + +// Reason why BI Engine didn't accelerate the query (or sub-query). +message BiEngineReason { + // Indicates the high-level reason for no/partial acceleration + enum Code { + // BiEngineReason not specified. + CODE_UNSPECIFIED = 0; + + // No reservation available for BI Engine acceleration. + NO_RESERVATION = 1; + + // Not enough memory available for BI Engine acceleration. + INSUFFICIENT_RESERVATION = 2; + + // This particular SQL text is not supported for acceleration by BI Engine. + UNSUPPORTED_SQL_TEXT = 4; + + // Input too large for acceleration by BI Engine. + INPUT_TOO_LARGE = 5; + + // Catch-all code for all other cases for partial or disabled acceleration. + OTHER_REASON = 6; + + // One or more tables were not eligible for BI Engine acceleration. + TABLE_EXCLUDED = 7; + } + + // Output only. High-level BI Engine reason for partial or disabled + // acceleration + Code code = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Free form human-readable reason for partial or disabled + // acceleration. + string message = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BI Engine specific query. +// Populated as part of JobStatistics2 +message BiEngineStatistics { + // Indicates the type of BI Engine acceleration. + enum BiEngineMode { + // BiEngineMode type not specified. + ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine disabled the acceleration. bi_engine_reasons + // specifies a more detailed reason. + DISABLED = 1; + + // Part of the query was accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL = 2; + + // All of the query was accelerated using BI Engine. + FULL = 3; + } + + // Indicates the type of BI Engine acceleration. + enum BiEngineAccelerationMode { + // BiEngineMode type not specified. + BI_ENGINE_ACCELERATION_MODE_UNSPECIFIED = 0; + + // BI Engine acceleration was attempted but disabled. bi_engine_reasons + // specifies a more detailed reason. + BI_ENGINE_DISABLED = 1; + + // Some inputs were accelerated using BI Engine. + // See bi_engine_reasons for why parts of the query were not + // accelerated. + PARTIAL_INPUT = 2; + + // All of the query inputs were accelerated using BI Engine. + FULL_INPUT = 3; + + // All of the query was accelerated using BI Engine. + FULL_QUERY = 4; + } + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineMode bi_engine_mode = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Specifies which mode of BI Engine acceleration was performed + // (if any). + BiEngineAccelerationMode acceleration_mode = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // In case of DISABLED or PARTIAL bi_engine_mode, these contain the + // explanatory reasons as to why BI Engine could not accelerate. + // In case the full query was accelerated, this field is not populated. + repeated BiEngineReason bi_engine_reasons = 2; +} + +// Reason about why no search index was used in the search query (or +// sub-query). +message IndexUnusedReason { + // Indicates the high-level reason for the scenario when no search index was + // used. + enum Code { + // Code not specified. + CODE_UNSPECIFIED = 0; + + // Indicates the search index configuration has not been created. + INDEX_CONFIG_NOT_AVAILABLE = 1; + + // Indicates the search index creation has not been completed. + PENDING_INDEX_CREATION = 2; + + // Indicates the base table has been truncated (rows have been removed + // from table with TRUNCATE TABLE statement) since the last time the search + // index was refreshed. + BASE_TABLE_TRUNCATED = 3; + + // Indicates the search index configuration has been changed since the last + // time the search index was refreshed. + INDEX_CONFIG_MODIFIED = 4; + + // Indicates the search query accesses data at a timestamp before the last + // time the search index was refreshed. + TIME_TRAVEL_QUERY = 5; + + // Indicates the usage of search index will not contribute to any pruning + // improvement for the search function, e.g. when the search predicate is in + // a disjunction with other non-search predicates. + NO_PRUNING_POWER = 6; + + // Indicates the search index does not cover all fields in the search + // function. + UNINDEXED_SEARCH_FIELDS = 7; + + // Indicates the search index does not support the given search query + // pattern. + UNSUPPORTED_SEARCH_PATTERN = 8; + + // Indicates the query has been optimized by using a materialized view. + OPTIMIZED_WITH_MATERIALIZED_VIEW = 9; + + // Indicates the query has been secured by data masking, and thus search + // indexes are not applicable. + SECURED_BY_DATA_MASKING = 11; + + // Indicates that the search index and the search function call do not + // have the same text analyzer. + MISMATCHED_TEXT_ANALYZER = 12; + + // Indicates the base table is too small (below a certain threshold). + // The index does not provide noticeable search performance gains + // when the base table is too small. + BASE_TABLE_TOO_SMALL = 13; + + // Indicates that the total size of indexed base tables in your organization + // exceeds your region's limit and the index is not used in the query. To + // index larger base tables, you can + // use + // your own reservation for index-management jobs. + BASE_TABLE_TOO_LARGE = 14; + + // Indicates that the estimated performance gain from using the search index + // is too low for the given search query. + ESTIMATED_PERFORMANCE_GAIN_TOO_LOW = 15; + + // Indicates that search indexes can not be used for search query with + // STANDARD edition. + NOT_SUPPORTED_IN_STANDARD_EDITION = 17; + + // Indicates that an option in the search function that cannot make use of + // the index has been selected. + INDEX_SUPPRESSED_BY_FUNCTION_OPTION = 18; + + // Indicates that the query was cached, and thus the search index was not + // used. + QUERY_CACHE_HIT = 19; + + // The index cannot be used in the search query because it is stale. + STALE_INDEX = 20; + + // Indicates an internal error that causes the search index to be unused. + INTERNAL_ERROR = 10; + + // Indicates that the reason search indexes cannot be used in the query is + // not covered by any of the other IndexUnusedReason options. + OTHER_REASON = 16; + } + + // Specifies the high-level reason for the scenario when no search index was + // used. + optional Code code = 1; + + // Free form human-readable reason for the scenario when no search index was + // used. + optional string message = 2; + + // Specifies the base table involved in the reason that no search index was + // used. + optional TableReference base_table = 3; + + // Specifies the name of the unused search index, if available. + optional string index_name = 4; +} + +// Statistics for a search query. +// Populated as part of JobStatistics2. +message SearchStatistics { + // Indicates the type of search index usage in the entire search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No search indexes were used in the search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the search query used search indexes. See [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use search indexes. + PARTIALLY_USED = 2; + + // The entire search query used search indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Statistics for a vector search query. +// Populated as part of JobStatistics2. +message VectorSearchStatistics { + // Indicates the type of vector index usage in the entire vector search query. + enum IndexUsageMode { + // Index usage mode not specified. + INDEX_USAGE_MODE_UNSPECIFIED = 0; + + // No vector indexes were used in the vector search query. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for detailed reasons. + UNUSED = 1; + + // Part of the vector search query used vector indexes. See + // [`indexUnusedReasons`] + // (/bigquery/docs/reference/rest/v2/Job#IndexUnusedReason) + // for why other parts of the query did not use vector indexes. + PARTIALLY_USED = 2; + + // The entire vector search query used vector indexes. + FULLY_USED = 4; + } + + // Specifies the index usage mode for the query. + IndexUsageMode index_usage_mode = 1; + + // When `indexUsageMode` is `UNUSED` or `PARTIALLY_USED`, this field explains + // why indexes were not used in all or part of the vector search query. If + // `indexUsageMode` is `FULLY_USED`, this field is not populated. + repeated IndexUnusedReason index_unused_reasons = 2; +} + +// Query optimization information for a QUERY job. +message QueryInfo { + // Output only. Information about query optimizations. + google.protobuf.Struct optimization_details = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a LOAD query. +message LoadQueryStatistics { + // Output only. Number of source files in a LOAD query. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a LOAD query. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a LOAD query. + // Note that while a LOAD query is in the running state, this value may + // change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note that while a LOAD query + // is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered while processing a LOAD + // query. Note that if the job has failed because of more bad records + // encountered than the maximum allowed in the load job configuration, then + // this number can be less than the total number of bad records present in the + // input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a query job. +message JobStatistics2 { + // Output only. Describes execution plan for the query. + repeated ExplainQueryStage query_plan = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The original estimate of bytes processed for the job. + google.protobuf.Int64Value estimated_bytes_processed = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of partitions processed from all partitioned + // tables referenced in the job. + google.protobuf.Int64Value total_partitions_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For dry-run jobs, totalBytesProcessed is an estimate and this + // field specifies the accuracy of the estimate. Possible values can be: + // UNKNOWN: accuracy of the estimate is unknown. + // PRECISE: estimate is precise. + // LOWER_BOUND: estimate is lower bound of what the query would cost. + // UPPER_BOUND: estimate is upper bound of what the query would cost. + string total_bytes_processed_accuracy = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If the project is configured to use on-demand pricing, + // then this field contains the total bytes billed for the job. + // If the project is configured to use flat-rate pricing, then you are + // not billed for bytes and this field is informational only. + google.protobuf.Int64Value total_bytes_billed = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Billing tier for the job. This is a BigQuery-specific concept + // which is not related to the Google Cloud notion of "free tier". The value + // here is a measure of the query's resource consumption relative to the + // amount of data scanned. For on-demand queries, the limit is 100, and all + // queries within this limit are billed at the standard on-demand rates. + // On-demand queries that exceed this limit will fail with a + // billingTierLimitExceeded error. + google.protobuf.Int32Value billing_tier = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Whether the query result was fetched from the query cache. + google.protobuf.BoolValue cache_hit = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced tables for the job. Queries that reference more + // than 50 tables will not have a complete list. + repeated TableReference referenced_tables = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced routines for the job. + repeated RoutineReference referenced_routines = 24 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the results. Present only for successful dry + // run of non-legacy SQL queries. + TableSchema schema = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows affected by a DML statement. Present + // only for DML statements INSERT, UPDATE or DELETE. + google.protobuf.Int64Value num_dml_affected_rows = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Detailed statistics for DML statements INSERT, UPDATE, DELETE, + // MERGE or TRUNCATE. + DmlStats dml_stats = 32 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. GoogleSQL only: list of undeclared query + // parameters detected during a dry run validation. + repeated QueryParameter undeclared_query_parameters = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The type of query statement, if valid. + // Possible values: + // + // * `SELECT`: + // [`SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_list) + // statement. + // * `ASSERT`: + // [`ASSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements#assert) + // statement. + // * `INSERT`: + // [`INSERT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#insert_statement) + // statement. + // * `UPDATE`: + // [`UPDATE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#update_statement) + // statement. + // * `DELETE`: + // [`DELETE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `MERGE`: + // [`MERGE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + // statement. + // * `CREATE_TABLE`: [`CREATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement) + // statement, without `AS SELECT`. + // * `CREATE_TABLE_AS_SELECT`: [`CREATE TABLE AS + // SELECT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#query_statement) + // statement. + // * `CREATE_VIEW`: [`CREATE + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_view_statement) + // statement. + // * `CREATE_MODEL`: [`CREATE + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#create_model_statement) + // statement. + // * `CREATE_MATERIALIZED_VIEW`: [`CREATE MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement) + // statement. + // * `CREATE_FUNCTION`: [`CREATE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_function_statement) + // statement. + // * `CREATE_TABLE_FUNCTION`: [`CREATE TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_function_statement) + // statement. + // * `CREATE_PROCEDURE`: [`CREATE + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure) + // statement. + // * `CREATE_ROW_ACCESS_POLICY`: [`CREATE ROW ACCESS + // POLICY`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_row_access_policy_statement) + // statement. + // * `CREATE_SCHEMA`: [`CREATE + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_schema_statement) + // statement. + // * `CREATE_SNAPSHOT_TABLE`: [`CREATE SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_snapshot_table_statement) + // statement. + // * `CREATE_SEARCH_INDEX`: [`CREATE SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_search_index_statement) + // statement. + // * `DROP_TABLE`: [`DROP + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_statement) + // statement. + // * `DROP_EXTERNAL_TABLE`: [`DROP EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_external_table_statement) + // statement. + // * `DROP_VIEW`: [`DROP + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_view_statement) + // statement. + // * `DROP_MODEL`: [`DROP + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model) + // statement. + // * `DROP_MATERIALIZED_VIEW`: [`DROP MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement) + // statement. + // * `DROP_FUNCTION` : [`DROP + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_function_statement) + // statement. + // * `DROP_TABLE_FUNCTION` : [`DROP TABLE + // FUNCTION`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_table_function) + // statement. + // * `DROP_PROCEDURE`: [`DROP + // PROCEDURE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_procedure_statement) + // statement. + // * `DROP_SEARCH_INDEX`: [`DROP SEARCH + // INDEX`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_search_index) + // statement. + // * `DROP_SCHEMA`: [`DROP + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_schema_statement) + // statement. + // * `DROP_SNAPSHOT_TABLE`: [`DROP SNAPSHOT + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_snapshot_table_statement) + // statement. + // * `DROP_ROW_ACCESS_POLICY`: [`DROP [ALL] ROW ACCESS + // POLICY|POLICIES`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_row_access_policy_statement) + // statement. + // * `ALTER_TABLE`: [`ALTER + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement) + // statement. + // * `ALTER_VIEW`: [`ALTER + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement) + // statement. + // * `ALTER_MATERIALIZED_VIEW`: [`ALTER MATERIALIZED + // VIEW`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement) + // statement. + // * `ALTER_SCHEMA`: [`ALTER + // SCHEMA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#aalter_schema_set_options_statement) + // statement. + // * `SCRIPT`: + // [`SCRIPT`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language). + // * `TRUNCATE_TABLE`: [`TRUNCATE + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#truncate_table_statement) + // statement. + // * `CREATE_EXTERNAL_TABLE`: [`CREATE EXTERNAL + // TABLE`](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement) + // statement. + // * `EXPORT_DATA`: [`EXPORT + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement) + // statement. + // * `EXPORT_MODEL`: [`EXPORT + // MODEL`](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-export-model) + // statement. + // * `LOAD_DATA`: [`LOAD + // DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#load_data_statement) + // statement. + // * `CALL`: + // [`CALL`](https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call) + // statement. + string statement_type = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL operation performed, possibly + // dependent on the pre-existence of the DDL target. + string ddl_operation_performed = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target table. Present only for + // CREATE/DROP TABLE/VIEW and DROP ALL ROW ACCESS POLICIES queries. + TableReference ddl_target_table = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The table after rename. Present only for ALTER TABLE RENAME TO + // query. + TableReference ddl_destination_table = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target row access policy. Present only for + // CREATE/DROP ROW ACCESS POLICY queries. + RowAccessPolicyReference ddl_target_row_access_policy = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of row access policies affected by a DDL statement. + // Present only for DROP ALL ROW ACCESS POLICIES queries. + google.protobuf.Int64Value ddl_affected_row_access_policy_count = 27 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Beta] The DDL target routine. Present only for + // CREATE/DROP FUNCTION/PROCEDURE queries. + RoutineReference ddl_target_routine = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The DDL target dataset. Present only for CREATE/ALTER/DROP + // SCHEMA(dataset) queries. + DatasetReference ddl_target_dataset = 30 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a BigQuery ML training job. + MlStatistics ml_statistics = 23 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Stats for EXPORT DATA statement. + ExportDataStatistics export_data_statistics = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Job cost breakdown as bigquery internal cost and external + // service costs. + repeated ExternalServiceCost external_service_costs = 28 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. BI Engine specific Statistics. + BiEngineStatistics bi_engine_statistics = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a LOAD query. + LoadQueryStatistics load_query_statistics = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced table for DCL statement. + TableReference dcl_target_table = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced view for DCL statement. + TableReference dcl_target_view = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Referenced dataset for DCL statement. + DatasetReference dcl_target_dataset = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Search query specific statistics. + SearchStatistics search_statistics = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Vector Search query specific statistics. + VectorSearchStatistics vector_search_statistics = 44 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Performance insights. + PerformanceInsights performance_insights = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query optimization information for a QUERY job. + QueryInfo query_info = 39 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of a Spark procedure job. + SparkStatistics spark_statistics = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes transferred for cross-cloud queries such as Cross + // Cloud Transfer and CREATE TABLE AS SELECT (CTAS). + google.protobuf.Int64Value transferred_bytes = 41 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of materialized views of a query job. + MaterializedViewStatistics materialized_view_statistics = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics of metadata cache usage in a query for BigLake + // tables. + MetadataCacheStatistics metadata_cache_statistics = 43 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a load job. +message JobStatistics3 { + // Output only. Number of source files in a load job. + google.protobuf.Int64Value input_files = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of bytes of source data in a load job. + google.protobuf.Int64Value input_file_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of rows imported in a load job. + // Note that while an import job is in the running state, this + // value may change. + google.protobuf.Int64Value output_rows = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the loaded data in bytes. Note + // that while a load job is in the running state, this value may change. + google.protobuf.Int64Value output_bytes = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of bad records encountered. Note that if the job + // has failed because of more bad records encountered than the maximum + // allowed in the load job configuration, then this number can be less than + // the total number of bad records present in the input data. + google.protobuf.Int64Value bad_records = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for an extract job. +message JobStatistics4 { + // Output only. Number of files per destination URI or URI pattern + // specified in the extract configuration. These values will be in the same + // order as the URIs specified in the 'destinationUris' field. + repeated int64 destination_uri_file_counts = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of user bytes extracted into the result. This is the + // byte count as computed by BigQuery for billing purposes + // and doesn't have any relationship with the number of actual + // result bytes extracted in the desired format. + google.protobuf.Int64Value input_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes a timeline of job execution. + repeated QueryTimelineSample timeline = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a copy job. +message CopyJobStatistics { + // Output only. Number of rows copied to the destination table. + google.protobuf.Int64Value copied_rows = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes copied to the destination table. + google.protobuf.Int64Value copied_logical_bytes = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to a BigQuery ML training job. +message MlStatistics { + // Training type. + enum TrainingType { + // Unspecified training type. + TRAINING_TYPE_UNSPECIFIED = 0; + + // Single training with fixed parameter space. + SINGLE_TRAINING = 1; + + // [Hyperparameter tuning + // training](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + HPARAM_TUNING = 2; + } + + // Output only. Maximum number of iterations specified as max_iterations in + // the 'CREATE MODEL' query. The actual number of iterations may be less than + // this number due to early stop. + int64 max_iterations = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Results for all completed iterations. + // Empty for [hyperparameter tuning + // jobs](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview). + repeated Model.TrainingRun.IterationResult iteration_results = 2; + + // Output only. The type of the model that is being trained. + Model.ModelType model_type = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Training type of the job. + TrainingType training_type = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter tuning + // job](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // sorted by trial_id. + repeated Model.HparamTuningTrial hparam_trials = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Job statistics specific to the child job of a script. +message ScriptStatistics { + // Describes how the job is evaluated. + enum EvaluationKind { + // Default value. + EVALUATION_KIND_UNSPECIFIED = 0; + + // The statement appears directly in the script. + STATEMENT = 1; + + // The statement evaluates an expression that appears in the script. + EXPRESSION = 2; + } + + // Represents the location of the statement/expression being evaluated. + // Line and column numbers are defined as follows: + // + // - Line and column numbers start with one. That is, line 1 column 1 denotes + // the start of the script. + // - When inside a stored procedure, all line/column numbers are relative + // to the procedure body, not the script in which the procedure was defined. + // - Start/end positions exclude leading/trailing comments and whitespace. + // The end position always ends with a ";", when present. + // - Multi-byte Unicode characters are treated as just one column. + // - If the original script (or procedure definition) contains TAB characters, + // a tab "snaps" the indentation forward to the nearest multiple of 8 + // characters, plus 1. For example, a TAB on column 1, 2, 3, 4, 5, 6 , or 8 + // will advance the next character to column 9. A TAB on column 9, 10, 11, + // 12, 13, 14, 15, or 16 will advance the next character to column 17. + message ScriptStackFrame { + // Output only. One-based start line. + int32 start_line = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based start column. + int32 start_column = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end line. + int32 end_line = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. One-based end column. + int32 end_column = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the active procedure, empty if in a top-level + // script. + string procedure_id = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Text of the current statement/expression. + string text = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Whether this child job was a statement or expression. + EvaluationKind evaluation_kind = 1; + + // Stack trace showing the line/column/procedure name of each frame on the + // stack at the point where the current evaluation happened. The leaf frame + // is first, the primary script is last. Never empty. + repeated ScriptStackFrame stack_frames = 2; +} + +// Statistics for row-level security. +message RowLevelSecurityStatistics { + // Whether any accessed data was protected by row access policies. + bool row_level_security_applied = 1; +} + +// Statistics for data-masking. +message DataMaskingStatistics { + // Whether any accessed data was protected by the data masking. + bool data_masking_applied = 1; +} + +// Statistics for a single job execution. +message JobStatistics { + // [Alpha] Information of a multi-statement transaction. + message TransactionInfo { + // Output only. [Alpha] Id of the transaction. + string transaction_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Creation time of this job, in milliseconds since the epoch. + // This field will be present on all jobs. + int64 creation_time = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Start time of this job, in milliseconds since the epoch. + // This field will be present when the job transitions from the PENDING state + // to either RUNNING or DONE. + int64 start_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. End time of this job, in milliseconds since the epoch. This + // field will be present whenever a job is in the DONE state. + int64 end_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total bytes processed for the job. + google.protobuf.Int64Value total_bytes_processed = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [TrustedTester] Job progress (0.0 -> 1.0) for LOAD and + // EXTRACT jobs. + google.protobuf.DoubleValue completion_ratio = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Quotas which delayed this job's start time. + repeated string quota_deferments = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a query job. + JobStatistics2 query = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a load job. + JobStatistics3 load = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for an extract job. + JobStatistics4 extract = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for a copy job. + CopyJobStatistics copy = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Slot-milliseconds for the job. + google.protobuf.Int64Value total_slot_ms = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the primary reservation assigned to this job. Note + // that this could be different than reservations reported in the reservation + // usage field if parent reservations were used to execute this job. + string reservation_id = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of child jobs executed. + int64 num_child_jobs = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this is a child job, specifies the job ID of the parent. + string parent_job_id = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If this a child job of a script, specifies information about + // the context of this job within the script. + ScriptStatistics script_statistics = 14 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for row-level security. Present only for query and + // extract jobs. + RowLevelSecurityStatistics row_level_security_statistics = 16 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics for data-masking. Present only for query and + // extract jobs. + DataMaskingStatistics data_masking_statistics = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. [Alpha] Information of the multi-statement transaction if this + // job is part of one. + // + // This property is only expected on a child job or a job that is in a + // session. A script parent job is not part of the transaction started in the + // script. + TransactionInfo transaction_info = 17 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information of the session if this job is part of one. + SessionInfo session_info = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The duration in milliseconds of the execution of the final + // attempt of this job, as BigQuery may internally re-attempt to execute the + // job. + int64 final_execution_duration_ms = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of edition corresponding to the reservation for this job + // at the time of this update. + ReservationEdition edition = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Detailed statistics for DML statements +message DmlStats { + // Output only. Number of inserted Rows. Populated by DML INSERT and MERGE + // statements + google.protobuf.Int64Value inserted_row_count = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of deleted Rows. populated by DML DELETE, MERGE and + // TRUNCATE statements. + google.protobuf.Int64Value deleted_row_count = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of updated Rows. Populated by DML UPDATE and MERGE + // statements. + google.protobuf.Int64Value updated_row_count = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights for the job. +message PerformanceInsights { + // Output only. Average execution ms of previous runs. Indicates the job ran + // slow compared to previous executions. To find previous executions, use + // INFORMATION_SCHEMA tables and filter jobs with same query hash. + int64 avg_previous_execution_ms = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Standalone query stage performance insights, for exploring + // potential improvements. + repeated StagePerformanceStandaloneInsight + stage_performance_standalone_insights = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Query stage performance insights compared to previous runs, + // for diagnosing performance regression. + repeated StagePerformanceChangeInsight stage_performance_change_insights = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Performance insights compared to the previous executions for a specific +// stage. +message StagePerformanceChangeInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Input data change insight of the query stage. + optional InputDataChange input_data_change = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Details about the input data change insight. +message InputDataChange { + // Output only. Records read difference percentage compared to a previous run. + float records_read_diff_percentage = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Standalone performance insights for a specific stage. +message StagePerformanceStandaloneInsight { + // Output only. The stage id that the insight mapped to. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has a slot contention issue. + optional bool slot_contention = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. True if the stage has insufficient shuffle quota. + optional bool insufficient_shuffle_quota = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If present, the stage had the following reasons for being + // disqualified from BI Engine execution. + repeated BiEngineReason bi_engine_reasons = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. High cardinality joins in the stage. + repeated HighCardinalityJoin high_cardinality_joins = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Partition skew in the stage. + optional PartitionSkew partition_skew = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// High cardinality join detailed information. +message HighCardinalityJoin { + // Output only. Count of left input rows. + int64 left_rows = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of right input rows. + int64 right_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Count of the output rows. + int64 output_rows = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The index of the join operator in the ExplainQueryStep lists. + int32 step_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Partition skew detailed information. +message PartitionSkew { + // Details about source stages which produce skewed data. + message SkewSource { + // Output only. Stage id of the skew source stage. + int64 stage_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Source stages which produce skewed data. + repeated SkewSource skew_sources = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Statistics for a BigSpark query. +// Populated as part of JobStatistics2 +message SparkStatistics { + // Spark job logs can be filtered by these fields in Cloud Logging. + message LoggingInfo { + // Output only. Resource type used for logging. + string resource_type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Project ID where the Spark logs were written. + string project_id = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Spark job ID if a Spark job is created successfully. + optional string spark_job_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Location where the Spark job is executed. + // A location is selected by BigQueury for jobs configured to run in a + // multi-region. + optional string spark_job_location = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Endpoints returned from Dataproc. + // Key list: + // - history_server_endpoint: A link to Spark job UI. + map endpoints = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Logging info is used to generate a link to Cloud Logging. + optional LoggingInfo logging_info = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The Cloud KMS encryption key that is used to protect the + // resources created by the Spark job. If the Spark procedure uses the invoker + // security mode, the Cloud KMS encryption key is either inferred from the + // provided system variable, + // `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery + // job's project (if the CMEK organization policy is enforced). Otherwise, the + // Cloud KMS key is either inferred from the Spark connection associated with + // the procedure (if it is provided), or from the default key of the Spark + // connection's project if the CMEK organization policy is enforced. + // + // Example: + // + // * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]` + optional string kms_key_name = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + } + ]; + + // Output only. The Google Cloud Storage bucket that is used as the default + // file system by the Spark application. This field is only filled when the + // Spark procedure uses the invoker security mode. The `gcsStagingBucket` + // bucket is inferred from the `@@spark_proc_properties.staging_bucket` system + // variable (if it is provided). Otherwise, BigQuery creates a default staging + // bucket for the job and returns the bucket name in this field. + // + // Example: + // + // * `gs://[bucket_name]` + optional string gcs_staging_bucket = 6 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { type: "storage.googleapis.com/Bucket" } + ]; +} + +// Statistics of materialized views considered in a query job. +message MaterializedViewStatistics { + // Materialized views considered for the query job. Only certain materialized + // views are used. For a detailed list, see the child message. + // + // If many materialized views are considered, then the list might be + // incomplete. + repeated MaterializedView materialized_view = 1; +} + +// A materialized view considered for a query job. +message MaterializedView { + // Reason why a materialized view was not chosen for a query. For more + // information, see [Understand why materialized views were + // rejected](https://cloud.google.com/bigquery/docs/materialized-views-use#understand-rejected). + enum RejectedReason { + // Default unspecified value. + REJECTED_REASON_UNSPECIFIED = 0; + + // View has no cached data because it has not refreshed yet. + NO_DATA = 1; + + // The estimated cost of the view is more expensive than another view or the + // base table. + // + // Note: The estimate cost might not match the billed cost. + COST = 2; + + // View has no cached data because a base table is truncated. + BASE_TABLE_TRUNCATED = 3; + + // View is invalidated because of a data change in one or more base tables. + // It could be any recent change if the + // [`max_staleness`](https://cloud.google.com/bigquery/docs/materialized-views-create#max_staleness) + // option is not set for the view, or otherwise any change outside of the + // staleness window. + BASE_TABLE_DATA_CHANGE = 4; + + // View is invalidated because a base table's partition expiration has + // changed. + BASE_TABLE_PARTITION_EXPIRATION_CHANGE = 5; + + // View is invalidated because a base table's partition has expired. + BASE_TABLE_EXPIRED_PARTITION = 6; + + // View is invalidated because a base table has an incompatible metadata + // change. + BASE_TABLE_INCOMPATIBLE_METADATA_CHANGE = 7; + + // View is invalidated because it was refreshed with a time zone other than + // that of the current job. + TIME_ZONE = 8; + + // View is outside the time travel window. + OUT_OF_TIME_TRAVEL_WINDOW = 9; + + // View is inaccessible to the user because of a fine-grained security + // policy on one of its base tables. + BASE_TABLE_FINE_GRAINED_SECURITY_POLICY = 10; + + // One of the view's base tables is too stale. For example, the cached + // metadata of a BigLake external table needs to be updated. + BASE_TABLE_TOO_STALE = 11; + } + + // The candidate materialized view. + optional TableReference table_reference = 1; + + // Whether the materialized view is chosen for the query. + // + // A materialized view can be chosen to rewrite multiple parts of the same + // query. If a materialized view is chosen to rewrite any part of the query, + // then this field is true, even if the materialized view was not chosen to + // rewrite others parts. + optional bool chosen = 2; + + // If present, specifies a best-effort estimation of the bytes saved by using + // the materialized view rather than its base tables. + optional int64 estimated_bytes_saved = 3; + + // If present, specifies the reason why the materialized view was not chosen + // for the query. + optional RejectedReason rejected_reason = 4; +} + +// Table level detail on the usage of metadata caching. Only set for Metadata +// caching eligible tables referenced in the query. +message TableMetadataCacheUsage { + // Reasons for not using metadata caching. + enum UnusedReason { + // Unused reasons not specified. + UNUSED_REASON_UNSPECIFIED = 0; + + // Metadata cache was outside the table's maxStaleness. + EXCEEDED_MAX_STALENESS = 1; + + // Metadata caching feature is not enabled. [Update BigLake tables] + // (/bigquery/docs/create-cloud-storage-table-biglake#update-biglake-tables) + // to enable the metadata caching. + METADATA_CACHING_NOT_ENABLED = 3; + + // Other unknown reason. + OTHER_REASON = 2; + } + + // Metadata caching eligible table referenced in the query. + optional TableReference table_reference = 1; + + // Reason for not using metadata caching for the table. + optional UnusedReason unused_reason = 2; + + // Free form human-readable reason metadata caching was unused for + // the job. + optional string explanation = 3; + + // Duration since last refresh as of this job for managed tables (indicates + // metadata cache staleness as seen by this job). + google.protobuf.Duration staleness = 5; + + // [Table + // type](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.type). + string table_type = 6; +} + +// Statistics for metadata caching in BigLake tables. +message MetadataCacheStatistics { + // Set for the Metadata caching eligible tables referenced in the query. + repeated TableMetadataCacheUsage table_metadata_cache_usage = 1; +} + +// The type of editions. +// Different features and behaviors are provided to different editions +// Capacity commitments and reservations are linked to editions. +enum ReservationEdition { + // Default value, which will be treated as ENTERPRISE. + RESERVATION_EDITION_UNSPECIFIED = 0; + + // Standard edition. + STANDARD = 1; + + // Enterprise edition. + ENTERPRISE = 2; + + // Enterprise plus edition. + ENTERPRISE_PLUS = 3; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/job_status.proto b/test-fixtures/protos/google/cloud/bigquery/v2/job_status.proto new file mode 100644 index 000000000..71f0a33dc --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/job_status.proto @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/error.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "JobStatusProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message JobStatus { + // Output only. Final error result of the job. If present, indicates that the + // job has completed and was unsuccessful. + ErrorProto error_result = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The first errors encountered during the running of the job. + // The final message includes the number of errors that caused the process to + // stop. Errors here do not necessarily mean that the job has not completed or + // was unsuccessful. + repeated ErrorProto errors = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Running state of the job. Valid states include 'PENDING', + // 'RUNNING', and 'DONE'. + string state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/json_extension.proto b/test-fixtures/protos/google/cloud/bigquery/v2/json_extension.proto new file mode 100644 index 000000000..49338d746 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/json_extension.proto @@ -0,0 +1,34 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "JsonExtensionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Used to indicate that a JSON variant, rather than normal JSON, is being used +// as the source_format. This should only be used in combination with the +// JSON source format. +enum JsonExtension { + // The default if provided value is not one included in the enum, or the value + // is not specified. The source formate is parsed without any modification. + JSON_EXTENSION_UNSPECIFIED = 0; + + // Use GeoJSON variant of JSON. See https://tools.ietf.org/html/rfc7946. + GEOJSON = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/location_metadata.proto b/test-fixtures/protos/google/cloud/bigquery/v2/location_metadata.proto new file mode 100644 index 000000000..391bd5ae4 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/location_metadata.proto @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "LocationMetadataProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// BigQuery-specific metadata about a location. This will be set on +// google.cloud.location.Location.metadata in Cloud Location API +// responses. +message LocationMetadata { + // The legacy BigQuery location ID, e.g. “EU” for the “europe” location. + // This is for any API consumers that need the legacy “US” and “EU” locations. + string legacy_location_id = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/map_target_type.proto b/test-fixtures/protos/google/cloud/bigquery/v2/map_target_type.proto new file mode 100644 index 000000000..dc66e7d7a --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/map_target_type.proto @@ -0,0 +1,33 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "MapTargetTypeProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Indicates the map target type. Only applies to parquet maps. +enum MapTargetType { + // In this mode, the map will have the following schema: + // struct map_field_name { repeated struct key_value { key value } }. + MAP_TARGET_TYPE_UNSPECIFIED = 0; + + // In this mode, the map will have the following schema: + // repeated struct map_field_name { key value }. + ARRAY_OF_STRUCT = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/model.proto b/test-fixtures/protos/google/cloud/bigquery/v2/model.proto new file mode 100644 index 000000000..dc3311876 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/model.proto @@ -0,0 +1,2040 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/model_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Model Service. +// +// It should not be relied on for production use cases at this time. +service ModelService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified model resource by model ID. + rpc GetModel(GetModelRequest) returns (Model) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } + + // Lists all models in the specified dataset. Requires the READER dataset + // role. After retrieving the list of models, you can get information about a + // particular model by calling the models.get method. + rpc ListModels(ListModelsRequest) returns (ListModelsResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models" + }; + option (google.api.method_signature) = "project_id,dataset_id,max_results"; + } + + // Patch specific fields in the specified model. + rpc PatchModel(PatchModelRequest) returns (Model) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + body: "model" + }; + option (google.api.method_signature) = + "project_id,dataset_id,model_id,model"; + } + + // Deletes the model specified by modelId from the dataset. + rpc DeleteModel(DeleteModelRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/models/{model_id=*}" + }; + option (google.api.method_signature) = "project_id,dataset_id,model_id"; + } +} + +// Remote Model Info +message RemoteModelInfo { + // Supported service type for remote model. + enum RemoteServiceType { + // Unspecified remote service type. + REMOTE_SERVICE_TYPE_UNSPECIFIED = 0; + + // V3 Cloud AI Translation API. See more details at [Cloud Translation API] + // (https://cloud.google.com/translate/docs/reference/rest). + CLOUD_AI_TRANSLATE_V3 = 1; + + // V1 Cloud AI Vision API See more details at [Cloud Vision API] + // (https://cloud.google.com/vision/docs/reference/rest). + CLOUD_AI_VISION_V1 = 2; + + // V1 Cloud AI Natural Language API. See more details at [REST Resource: + // documents](https://cloud.google.com/natural-language/docs/reference/rest/v1/documents). + CLOUD_AI_NATURAL_LANGUAGE_V1 = 3; + + // V2 Speech-to-Text API. See more details at [Google Cloud Speech-to-Text + // V2 API](https://cloud.google.com/speech-to-text/v2/docs) + CLOUD_AI_SPEECH_TO_TEXT_V2 = 7; + } + + // Remote services are services outside of BigQuery used by remote models for + // predictions. A remote service is backed by either an arbitrary endpoint or + // a selected remote service type, but not both. + oneof remote_service { + // Output only. The endpoint for remote model. + string endpoint = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The remote service type for remote model. + RemoteServiceType remote_service_type = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Fully qualified name of the user-provided connection object of + // the remote model. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Max number of rows in each batch sent to the remote service. + // If unset, the number of rows in each batch is set dynamically. + int64 max_batching_rows = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The model version for LLM. + string remote_model_version = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The name of the speech recognizer to use for speech + // recognition. The expected format is + // `projects/{project}/locations/{location}/recognizers/{recognizer}`. + // Customers can specify this field at model creation. If not specified, a + // default recognizer `projects/{model + // project}/locations/global/recognizers/_` will be used. See more details at + // [recognizers](https://cloud.google.com/speech-to-text/v2/docs/reference/rest/v2/projects.locations.recognizers) + string speech_recognizer = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single transform column. +message TransformColumn { + // Output only. Name of the column. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data type of the column after the transform. + StandardSqlDataType type = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The SQL expression used in the column transform. + string transform_sql = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Model { + // Indicates the type of the Model. + enum ModelType { + // Default value. + MODEL_TYPE_UNSPECIFIED = 0; + + // Linear regression model. + LINEAR_REGRESSION = 1; + + // Logistic regression based classification model. + LOGISTIC_REGRESSION = 2; + + // K-means clustering model. + KMEANS = 3; + + // Matrix factorization model. + MATRIX_FACTORIZATION = 4; + + // DNN classifier model. + DNN_CLASSIFIER = 5; + + // An imported TensorFlow model. + TENSORFLOW = 6; + + // DNN regressor model. + DNN_REGRESSOR = 7; + + // An imported XGBoost model. + XGBOOST = 8; + + // Boosted tree regressor model. + BOOSTED_TREE_REGRESSOR = 9; + + // Boosted tree classifier model. + BOOSTED_TREE_CLASSIFIER = 10; + + // ARIMA model. + ARIMA = 11; + + // AutoML Tables regression model. + AUTOML_REGRESSOR = 12; + + // AutoML Tables classification model. + AUTOML_CLASSIFIER = 13; + + // Prinpical Component Analysis model. + PCA = 14; + + // Wide-and-deep classifier model. + DNN_LINEAR_COMBINED_CLASSIFIER = 16; + + // Wide-and-deep regressor model. + DNN_LINEAR_COMBINED_REGRESSOR = 17; + + // Autoencoder model. + AUTOENCODER = 18; + + // New name for the ARIMA model. + ARIMA_PLUS = 19; + + // ARIMA with external regressors. + ARIMA_PLUS_XREG = 23; + + // Random forest regressor model. + RANDOM_FOREST_REGRESSOR = 24; + + // Random forest classifier model. + RANDOM_FOREST_CLASSIFIER = 25; + + // An imported TensorFlow Lite model. + TENSORFLOW_LITE = 26; + + // An imported ONNX model. + ONNX = 28; + + // Model to capture the columns and logic in the TRANSFORM clause along with + // statistics useful for ML analytic functions. + TRANSFORM_ONLY = 29; + } + + // Loss metric to evaluate model training performance. + enum LossType { + // Default value. + LOSS_TYPE_UNSPECIFIED = 0; + + // Mean squared loss, used for linear regression. + MEAN_SQUARED_LOSS = 1; + + // Mean log loss, used for logistic regression. + MEAN_LOG_LOSS = 2; + } + + // Distance metric used to compute the distance between two points. + enum DistanceType { + // Default value. + DISTANCE_TYPE_UNSPECIFIED = 0; + + // Eculidean distance. + EUCLIDEAN = 1; + + // Cosine distance. + COSINE = 2; + } + + // Indicates the method to split input data into multiple tables. + enum DataSplitMethod { + // Default value. + DATA_SPLIT_METHOD_UNSPECIFIED = 0; + + // Splits data randomly. + RANDOM = 1; + + // Splits data with the user provided tags. + CUSTOM = 2; + + // Splits data sequentially. + SEQUENTIAL = 3; + + // Data split will be skipped. + NO_SPLIT = 4; + + // Splits data automatically: Uses NO_SPLIT if the data size is small. + // Otherwise uses RANDOM. + AUTO_SPLIT = 5; + } + + // Type of supported data frequency for time series forecasting models. + enum DataFrequency { + // Default value. + DATA_FREQUENCY_UNSPECIFIED = 0; + + // Automatically inferred from timestamps. + AUTO_FREQUENCY = 1; + + // Yearly data. + YEARLY = 2; + + // Quarterly data. + QUARTERLY = 3; + + // Monthly data. + MONTHLY = 4; + + // Weekly data. + WEEKLY = 5; + + // Daily data. + DAILY = 6; + + // Hourly data. + HOURLY = 7; + + // Per-minute data. + PER_MINUTE = 8; + } + + // Type of supported holiday regions for time series forecasting models. + enum HolidayRegion { + // Holiday region unspecified. + HOLIDAY_REGION_UNSPECIFIED = 0; + + // Global. + GLOBAL = 1; + + // North America. + NA = 2; + + // Japan and Asia Pacific: Korea, Greater China, India, Australia, and New + // Zealand. + JAPAC = 3; + + // Europe, the Middle East and Africa. + EMEA = 4; + + // Latin America and the Caribbean. + LAC = 5; + + // United Arab Emirates + AE = 6; + + // Argentina + AR = 7; + + // Austria + AT = 8; + + // Australia + AU = 9; + + // Belgium + BE = 10; + + // Brazil + BR = 11; + + // Canada + CA = 12; + + // Switzerland + CH = 13; + + // Chile + CL = 14; + + // China + CN = 15; + + // Colombia + CO = 16; + + // Czechoslovakia + CS = 17; + + // Czech Republic + CZ = 18; + + // Germany + DE = 19; + + // Denmark + DK = 20; + + // Algeria + DZ = 21; + + // Ecuador + EC = 22; + + // Estonia + EE = 23; + + // Egypt + EG = 24; + + // Spain + ES = 25; + + // Finland + FI = 26; + + // France + FR = 27; + + // Great Britain (United Kingdom) + GB = 28; + + // Greece + GR = 29; + + // Hong Kong + HK = 30; + + // Hungary + HU = 31; + + // Indonesia + ID = 32; + + // Ireland + IE = 33; + + // Israel + IL = 34; + + // India + IN = 35; + + // Iran + IR = 36; + + // Italy + IT = 37; + + // Japan + JP = 38; + + // Korea (South) + KR = 39; + + // Latvia + LV = 40; + + // Morocco + MA = 41; + + // Mexico + MX = 42; + + // Malaysia + MY = 43; + + // Nigeria + NG = 44; + + // Netherlands + NL = 45; + + // Norway + NO = 46; + + // New Zealand + NZ = 47; + + // Peru + PE = 48; + + // Philippines + PH = 49; + + // Pakistan + PK = 50; + + // Poland + PL = 51; + + // Portugal + PT = 52; + + // Romania + RO = 53; + + // Serbia + RS = 54; + + // Russian Federation + RU = 55; + + // Saudi Arabia + SA = 56; + + // Sweden + SE = 57; + + // Singapore + SG = 58; + + // Slovenia + SI = 59; + + // Slovakia + SK = 60; + + // Thailand + TH = 61; + + // Turkey + TR = 62; + + // Taiwan + TW = 63; + + // Ukraine + UA = 64; + + // United States + US = 65; + + // Venezuela + VE = 66; + + // Viet Nam + VN = 67; + + // South Africa + ZA = 68; + } + + // Enums for seasonal period. + message SeasonalPeriod { + // Seasonal period type. + enum SeasonalPeriodType { + // Unspecified seasonal period. + SEASONAL_PERIOD_TYPE_UNSPECIFIED = 0; + + // No seasonality + NO_SEASONALITY = 1; + + // Daily period, 24 hours. + DAILY = 2; + + // Weekly period, 7 days. + WEEKLY = 3; + + // Monthly period, 30 days or irregular. + MONTHLY = 4; + + // Quarterly period, 90 days or irregular. + QUARTERLY = 5; + + // Yearly period, 365 days or irregular. + YEARLY = 6; + } + } + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + enum ColorSpace { + // Unspecified color space + COLOR_SPACE_UNSPECIFIED = 0; + + // RGB + RGB = 1; + + // HSV + HSV = 2; + + // YIQ + YIQ = 3; + + // YUV + YUV = 4; + + // GRAYSCALE + GRAYSCALE = 5; + } + + // Enums for kmeans model type. + message KmeansEnums { + // Indicates the method used to initialize the centroids for KMeans + // clustering algorithm. + enum KmeansInitializationMethod { + // Unspecified initialization method. + KMEANS_INITIALIZATION_METHOD_UNSPECIFIED = 0; + + // Initializes the centroids randomly. + RANDOM = 1; + + // Initializes the centroids using data specified in + // kmeans_initialization_column. + CUSTOM = 2; + + // Initializes with kmeans++. + KMEANS_PLUS_PLUS = 3; + } + } + + // Enums for XGBoost model type. + message BoostedTreeOptionEnums { + // Booster types supported. Refer to booster parameter in XGBoost. + enum BoosterType { + // Unspecified booster type. + BOOSTER_TYPE_UNSPECIFIED = 0; + + // Gbtree booster. + GBTREE = 1; + + // Dart booster. + DART = 2; + } + + // Type of normalization algorithm for boosted tree models using dart + // booster. Refer to normalize_type in XGBoost. + enum DartNormalizeType { + // Unspecified dart normalize type. + DART_NORMALIZE_TYPE_UNSPECIFIED = 0; + + // New trees have the same weight of each of dropped trees. + TREE = 1; + + // New trees have the same weight of sum of dropped trees. + FOREST = 2; + } + + // Tree construction algorithm used in boosted tree models. + // Refer to tree_method in XGBoost. + enum TreeMethod { + // Unspecified tree method. + TREE_METHOD_UNSPECIFIED = 0; + + // Use heuristic to choose the fastest method. + AUTO = 1; + + // Exact greedy algorithm. + EXACT = 2; + + // Approximate greedy algorithm using quantile sketch and gradient + // histogram. + APPROX = 3; + + // Fast histogram optimized approximate greedy algorithm. + HIST = 4; + } + } + + // Enums for hyperparameter tuning. + message HparamTuningEnums { + // Available evaluation metrics used as hyperparameter tuning objectives. + enum HparamTuningObjective { + // Unspecified evaluation metric. + HPARAM_TUNING_OBJECTIVE_UNSPECIFIED = 0; + + // Mean absolute error. + // mean_absolute_error = AVG(ABS(label - predicted)) + MEAN_ABSOLUTE_ERROR = 1; + + // Mean squared error. + // mean_squared_error = AVG(POW(label - predicted, 2)) + MEAN_SQUARED_ERROR = 2; + + // Mean squared log error. + // mean_squared_log_error = AVG(POW(LN(1 + label) - LN(1 + predicted), 2)) + MEAN_SQUARED_LOG_ERROR = 3; + + // Mean absolute error. + // median_absolute_error = APPROX_QUANTILES(absolute_error, 2)[OFFSET(1)] + MEDIAN_ABSOLUTE_ERROR = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + // r_squared = 1 - SUM(squared_error)/(COUNT(label)*VAR_POP(label)) + R_SQUARED = 5; + + // Explained variance. + // explained_variance = 1 - VAR_POP(label_error)/VAR_POP(label) + EXPLAINED_VARIANCE = 6; + + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged metric + // treating each class as a binary classifier. + PRECISION = 7; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + RECALL = 8; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a globally micro-averaged metric. + ACCURACY = 9; + + // The F1 score is an average of recall and precision. For multiclass this + // is a macro-averaged metric. + F1_SCORE = 10; + + // Logorithmic Loss. For multiclass this is a macro-averaged metric. + LOG_LOSS = 11; + + // Area Under an ROC Curve. For multiclass this is a macro-averaged + // metric. + ROC_AUC = 12; + + // Davies-Bouldin Index. + DAVIES_BOULDIN_INDEX = 13; + + // Mean Average Precision. + MEAN_AVERAGE_PRECISION = 14; + + // Normalized Discounted Cumulative Gain. + NORMALIZED_DISCOUNTED_CUMULATIVE_GAIN = 15; + + // Average Rank. + AVERAGE_RANK = 16; + } + } + + // Indicates the learning rate optimization strategy to use. + enum LearnRateStrategy { + // Default value. + LEARN_RATE_STRATEGY_UNSPECIFIED = 0; + + // Use line search to determine learning rate. + LINE_SEARCH = 1; + + // Use a constant learning rate. + CONSTANT = 2; + } + + // Indicates the optimization strategy used for training. + enum OptimizationStrategy { + // Default value. + OPTIMIZATION_STRATEGY_UNSPECIFIED = 0; + + // Uses an iterative batch gradient descent algorithm. + BATCH_GRADIENT_DESCENT = 1; + + // Uses a normal equation to solve linear regression problem. + NORMAL_EQUATION = 2; + } + + // Indicates the training algorithm to use for matrix factorization models. + enum FeedbackType { + // Default value. + FEEDBACK_TYPE_UNSPECIFIED = 0; + + // Use weighted-als for implicit feedback problems. + IMPLICIT = 1; + + // Use nonweighted-als for explicit feedback problems. + EXPLICIT = 2; + } + + // Evaluation metrics for regression and explicit feedback type matrix + // factorization models. + message RegressionMetrics { + // Mean absolute error. + google.protobuf.DoubleValue mean_absolute_error = 1; + + // Mean squared error. + google.protobuf.DoubleValue mean_squared_error = 2; + + // Mean squared log error. + google.protobuf.DoubleValue mean_squared_log_error = 3; + + // Median absolute error. + google.protobuf.DoubleValue median_absolute_error = 4; + + // R^2 score. This corresponds to r2_score in ML.EVALUATE. + google.protobuf.DoubleValue r_squared = 5; + } + + // Aggregate metrics for classification/classifier models. For multi-class + // models, the metrics are either macro-averaged or micro-averaged. When + // macro-averaged, the metrics are calculated for each label and then an + // unweighted average is taken of those values. When micro-averaged, the + // metric is calculated globally by counting the total number of correctly + // predicted rows. + message AggregateClassificationMetrics { + // Precision is the fraction of actual positive predictions that had + // positive actual labels. For multiclass this is a macro-averaged + // metric treating each class as a binary classifier. + google.protobuf.DoubleValue precision = 1; + + // Recall is the fraction of actual positive labels that were given a + // positive prediction. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue recall = 2; + + // Accuracy is the fraction of predictions given the correct label. For + // multiclass this is a micro-averaged metric. + google.protobuf.DoubleValue accuracy = 3; + + // Threshold at which the metrics are computed. For binary + // classification models this is the positive class threshold. + // For multi-class classfication models this is the confidence + // threshold. + google.protobuf.DoubleValue threshold = 4; + + // The F1 score is an average of recall and precision. For multiclass + // this is a macro-averaged metric. + google.protobuf.DoubleValue f1_score = 5; + + // Logarithmic Loss. For multiclass this is a macro-averaged metric. + google.protobuf.DoubleValue log_loss = 6; + + // Area Under a ROC Curve. For multiclass this is a macro-averaged + // metric. + google.protobuf.DoubleValue roc_auc = 7; + } + + // Evaluation metrics for binary classification/classifier models. + message BinaryClassificationMetrics { + // Confusion matrix for binary classification models. + message BinaryConfusionMatrix { + // Threshold value used when computing each of the following metric. + google.protobuf.DoubleValue positive_class_threshold = 1; + + // Number of true samples predicted as true. + google.protobuf.Int64Value true_positives = 2; + + // Number of false samples predicted as true. + google.protobuf.Int64Value false_positives = 3; + + // Number of true samples predicted as false. + google.protobuf.Int64Value true_negatives = 4; + + // Number of false samples predicted as false. + google.protobuf.Int64Value false_negatives = 5; + + // The fraction of actual positive predictions that had positive actual + // labels. + google.protobuf.DoubleValue precision = 6; + + // The fraction of actual positive labels that were given a positive + // prediction. + google.protobuf.DoubleValue recall = 7; + + // The equally weighted average of recall and precision. + google.protobuf.DoubleValue f1_score = 8; + + // The fraction of predictions given the correct label. + google.protobuf.DoubleValue accuracy = 9; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Binary confusion matrix at multiple thresholds. + repeated BinaryConfusionMatrix binary_confusion_matrix_list = 2; + + // Label representing the positive class. + string positive_label = 3; + + // Label representing the negative class. + string negative_label = 4; + } + + // Evaluation metrics for multi-class classification/classifier models. + message MultiClassClassificationMetrics { + // Confusion matrix for multi-class classification models. + message ConfusionMatrix { + // A single entry in the confusion matrix. + message Entry { + // The predicted label. For confidence_threshold > 0, we will + // also add an entry indicating the number of items under the + // confidence threshold. + string predicted_label = 1; + + // Number of items being predicted as this label. + google.protobuf.Int64Value item_count = 2; + } + + // A single row in the confusion matrix. + message Row { + // The original label of this row. + string actual_label = 1; + + // Info describing predicted label distribution. + repeated Entry entries = 2; + } + + // Confidence threshold used when computing the entries of the + // confusion matrix. + google.protobuf.DoubleValue confidence_threshold = 1; + + // One row per actual label. + repeated Row rows = 2; + } + + // Aggregate classification metrics. + AggregateClassificationMetrics aggregate_classification_metrics = 1; + + // Confusion matrix at different thresholds. + repeated ConfusionMatrix confusion_matrix_list = 2; + } + + // Evaluation metrics for clustering models. + message ClusteringMetrics { + // Message containing the information about one cluster. + message Cluster { + // Representative value of a single feature within the cluster. + message FeatureValue { + // Representative value of a categorical feature. + message CategoricalValue { + // Represents the count of a single category within the cluster. + message CategoryCount { + // The name of category. + string category = 1; + + // The count of training samples matching the category within the + // cluster. + google.protobuf.Int64Value count = 2; + } + + // Counts of all categories for the categorical feature. If there are + // more than ten categories, we return top ten (by count) and return + // one more CategoryCount with category "_OTHER_" and count as + // aggregate counts of remaining categories. + repeated CategoryCount category_counts = 1; + } + + // The feature column name. + string feature_column = 1; + + // Value. + oneof value { + // The numerical feature value. This is the centroid value for this + // feature. + google.protobuf.DoubleValue numerical_value = 2; + + // The categorical feature value. + CategoricalValue categorical_value = 3; + } + } + + // Centroid id. + int64 centroid_id = 1; + + // Values of highly variant features for this cluster. + repeated FeatureValue feature_values = 2; + + // Count of training data rows that were assigned to this cluster. + google.protobuf.Int64Value count = 3; + } + + // Davies-Bouldin index. + google.protobuf.DoubleValue davies_bouldin_index = 1; + + // Mean of squared distances between each sample to its cluster centroid. + google.protobuf.DoubleValue mean_squared_distance = 2; + + // Information for all clusters. + repeated Cluster clusters = 3; + } + + // Evaluation metrics used by weighted-ALS models specified by + // feedback_type=implicit. + message RankingMetrics { + // Calculates a precision per user for all the items by ranking them and + // then averages all the precisions across all the users. + google.protobuf.DoubleValue mean_average_precision = 1; + + // Similar to the mean squared error computed in regression and explicit + // recommendation models except instead of computing the rating directly, + // the output from evaluate is computed against a preference which is 1 or 0 + // depending on if the rating exists or not. + google.protobuf.DoubleValue mean_squared_error = 2; + + // A metric to determine the goodness of a ranking calculated from the + // predicted confidence by comparing it to an ideal rank measured by the + // original ratings. + google.protobuf.DoubleValue normalized_discounted_cumulative_gain = 3; + + // Determines the goodness of a ranking by computing the percentile rank + // from the predicted confidence and dividing it by the original rank. + google.protobuf.DoubleValue average_rank = 4; + } + + // Model evaluation metrics for ARIMA forecasting models. + message ArimaForecastingMetrics { + // Model evaluation metrics for a single ARIMA forecasting model. + message ArimaSingleModelForecastingMetrics { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 2; + + // Is arima model fitted with drift or not. It is always false when d + // is not 1. + google.protobuf.BoolValue has_drift = 3; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 4; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 9; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 5; + + // If true, holiday_effect is a part of time series decomposition result. + google.protobuf.BoolValue has_holiday_effect = 6; + + // If true, spikes_and_dips is a part of time series decomposition result. + google.protobuf.BoolValue has_spikes_and_dips = 7; + + // If true, step_changes is a part of time series decomposition result. + google.protobuf.BoolValue has_step_changes = 8; + } + + // Repeated as there can be many metric sets (one for each model) in + // auto-arima and the large-scale case. + repeated ArimaSingleModelForecastingMetrics + arima_single_model_forecasting_metrics = 6; + } + + // Model evaluation metrics for dimensionality reduction models. + message DimensionalityReductionMetrics { + // Total percentage of variance explained by the selected principal + // components. + google.protobuf.DoubleValue total_explained_variance_ratio = 1; + } + + // Evaluation metrics of a model. These are either computed on all training + // data or just the eval data based on whether eval data was used during + // training. These are not present for imported models. + message EvaluationMetrics { + // Metrics. + oneof metrics { + // Populated for regression models and explicit feedback type matrix + // factorization models. + RegressionMetrics regression_metrics = 1; + + // Populated for binary classification/classifier models. + BinaryClassificationMetrics binary_classification_metrics = 2; + + // Populated for multi-class classification/classifier models. + MultiClassClassificationMetrics multi_class_classification_metrics = 3; + + // Populated for clustering models. + ClusteringMetrics clustering_metrics = 4; + + // Populated for implicit feedback type matrix factorization models. + RankingMetrics ranking_metrics = 5; + + // Populated for ARIMA models. + ArimaForecastingMetrics arima_forecasting_metrics = 6; + + // Evaluation metrics when the model is a dimensionality reduction model, + // which currently includes PCA. + DimensionalityReductionMetrics dimensionality_reduction_metrics = 7; + } + } + + // Data split result. This contains references to the training and evaluation + // data tables that were used to train the model. + message DataSplitResult { + // Table reference of the training data after split. + TableReference training_table = 1; + + // Table reference of the evaluation data after split. + TableReference evaluation_table = 2; + + // Table reference of the test data after split. + TableReference test_table = 3; + } + + // Arima order, can be used for both non-seasonal and seasonal parts. + message ArimaOrder { + // Order of the autoregressive part. + google.protobuf.Int64Value p = 1; + + // Order of the differencing part. + google.protobuf.Int64Value d = 2; + + // Order of the moving-average part. + google.protobuf.Int64Value q = 3; + } + + // ARIMA model fitting metrics. + message ArimaFittingMetrics { + // Log-likelihood. + google.protobuf.DoubleValue log_likelihood = 1; + + // AIC. + google.protobuf.DoubleValue aic = 2; + + // Variance. + google.protobuf.DoubleValue variance = 3; + } + + // Global explanations containing the top most important features + // after training. + message GlobalExplanation { + // Explanation for a single feature. + message Explanation { + // The full feature name. For non-numerical features, will be formatted + // like `.`. Overall size of feature + // name will always be truncated to first 120 characters. + string feature_name = 1; + + // Attribution of feature. + google.protobuf.DoubleValue attribution = 2; + } + + // A list of the top global explanations. Sorted by absolute value of + // attribution in descending order. + repeated Explanation explanations = 1; + + // Class label for this set of global explanations. Will be empty/null for + // binary logistic and linear regression models. Sorted alphabetically in + // descending order. + string class_label = 2; + } + + // Encoding methods for categorical features. + message CategoryEncodingMethod { + // Supported encoding methods for categorical features. + enum EncodingMethod { + // Unspecified encoding method. + ENCODING_METHOD_UNSPECIFIED = 0; + + // Applies one-hot encoding. + ONE_HOT_ENCODING = 1; + + // Applies label encoding. + LABEL_ENCODING = 2; + + // Applies dummy encoding. + DUMMY_ENCODING = 3; + } + } + + // PCA solver options. + message PcaSolverOptionEnums { + // Enums for supported PCA solvers. + enum PcaSolver { + // Default value. + UNSPECIFIED = 0; + + // Full eigen-decoposition. + FULL = 1; + + // Randomized SVD. + RANDOMIZED = 2; + + // Auto. + AUTO = 3; + } + } + + // Model registry options. + message ModelRegistryOptionEnums { + // Enums for supported model registries. + enum ModelRegistry { + // Default value. + MODEL_REGISTRY_UNSPECIFIED = 0; + + // Vertex AI. + VERTEX_AI = 1; + } + } + + // Information about a single training query run for the model. + message TrainingRun { + // Options used in model training. + message TrainingOptions { + // The maximum number of iterations in training. Used only for iterative + // training algorithms. + int64 max_iterations = 1; + + // Type of loss function used during training run. + LossType loss_type = 2; + + // Learning rate in training. Used only for iterative training algorithms. + double learn_rate = 3; + + // L1 regularization coefficient. + google.protobuf.DoubleValue l1_regularization = 4; + + // L2 regularization coefficient. + google.protobuf.DoubleValue l2_regularization = 5; + + // When early_stop is true, stops training when accuracy improvement is + // less than 'min_relative_progress'. Used only for iterative training + // algorithms. + google.protobuf.DoubleValue min_relative_progress = 6; + + // Whether to train a model from the last checkpoint. + google.protobuf.BoolValue warm_start = 7; + + // Whether to stop early when the loss doesn't improve significantly + // any more (compared to min_relative_progress). Used only for iterative + // training algorithms. + google.protobuf.BoolValue early_stop = 8; + + // Name of input label columns in training data. + repeated string input_label_columns = 9; + + // The data split type for training and evaluation, e.g. RANDOM. + DataSplitMethod data_split_method = 10; + + // The fraction of evaluation data over the whole input data. The rest + // of data will be used as training data. The format should be double. + // Accurate to two decimal places. + // Default value is 0.2. + double data_split_eval_fraction = 11; + + // The column to split data with. This column won't be used as a + // feature. + // 1. When data_split_method is CUSTOM, the corresponding column should + // be boolean. The rows with true value tag are eval data, and the false + // are training data. + // 2. When data_split_method is SEQ, the first DATA_SPLIT_EVAL_FRACTION + // rows (from smallest to largest) in the corresponding column are used + // as training data, and the rest are eval data. It respects the order + // in Orderable data types: + // https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#data-type-properties + string data_split_column = 12; + + // The strategy to determine learn rate for the current iteration. + LearnRateStrategy learn_rate_strategy = 13; + + // Specifies the initial learning rate for the line search learn rate + // strategy. + double initial_learn_rate = 16; + + // Weights associated with each label class, for rebalancing the + // training data. Only applicable for classification models. + map label_class_weights = 17; + + // User column specified for matrix factorization models. + string user_column = 18; + + // Item column specified for matrix factorization models. + string item_column = 19; + + // Distance type for clustering models. + DistanceType distance_type = 20; + + // Number of clusters for clustering models. + int64 num_clusters = 21; + + // Google Cloud Storage URI from which the model was imported. Only + // applicable for imported models. + string model_uri = 22; + + // Optimization strategy for training linear regression models. + OptimizationStrategy optimization_strategy = 23; + + // Hidden units for dnn models. + repeated int64 hidden_units = 24; + + // Batch size for dnn models. + int64 batch_size = 25; + + // Dropout probability for dnn models. + google.protobuf.DoubleValue dropout = 26; + + // Maximum depth of a tree for boosted tree models. + int64 max_tree_depth = 27; + + // Subsample fraction of the training data to grow tree to prevent + // overfitting for boosted tree models. + double subsample = 28; + + // Minimum split loss for boosted tree models. + google.protobuf.DoubleValue min_split_loss = 29; + + // Booster type for boosted tree models. + BoostedTreeOptionEnums.BoosterType booster_type = 60; + + // Number of parallel trees constructed during each iteration for boosted + // tree models. + google.protobuf.Int64Value num_parallel_tree = 61; + + // Type of normalization algorithm for boosted tree models using + // dart booster. + BoostedTreeOptionEnums.DartNormalizeType dart_normalize_type = 62; + + // Tree construction algorithm for boosted tree models. + BoostedTreeOptionEnums.TreeMethod tree_method = 63; + + // Minimum sum of instance weight needed in a child for boosted tree + // models. + google.protobuf.Int64Value min_tree_child_weight = 64; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + google.protobuf.DoubleValue colsample_bytree = 65; + + // Subsample ratio of columns for each level for boosted tree models. + google.protobuf.DoubleValue colsample_bylevel = 66; + + // Subsample ratio of columns for each node(split) for boosted tree + // models. + google.protobuf.DoubleValue colsample_bynode = 67; + + // Num factors specified for matrix factorization models. + int64 num_factors = 30; + + // Feedback type that specifies which algorithm to run for matrix + // factorization. + FeedbackType feedback_type = 31; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + google.protobuf.DoubleValue wals_alpha = 32; + + // The method used to initialize the centroids for kmeans algorithm. + KmeansEnums.KmeansInitializationMethod kmeans_initialization_method = 33; + + // The column used to provide the initial centroids for kmeans algorithm + // when kmeans_initialization_method is CUSTOM. + string kmeans_initialization_column = 34; + + // Column to be designated as time series timestamp for ARIMA model. + string time_series_timestamp_column = 35; + + // Column to be designated as time series data for ARIMA model. + string time_series_data_column = 36; + + // Whether to enable auto ARIMA or not. + google.protobuf.BoolValue auto_arima = 37; + + // A specification of the non-seasonal part of the ARIMA model: the three + // components (p, d, q) are the AR order, the degree of differencing, and + // the MA order. + ArimaOrder non_seasonal_order = 38; + + // The data frequency of a time series. + DataFrequency data_frequency = 39; + + // Whether or not p-value test should be computed for this model. Only + // available for linear and logistic regression models. + google.protobuf.BoolValue calculate_p_values = 40; + + // Include drift when fitting an ARIMA model. + google.protobuf.BoolValue include_drift = 41; + + // The geographical region based on which the holidays are considered in + // time series modeling. If a valid value is specified, then holiday + // effects modeling is enabled. + HolidayRegion holiday_region = 42; + + // A list of geographical regions that are used for time series modeling. + repeated HolidayRegion holiday_regions = 71; + + // The time series id column that was used during ARIMA model training. + string time_series_id_column = 43; + + // The time series id columns that were used during ARIMA model training. + repeated string time_series_id_columns = 51; + + // The number of periods ahead that need to be forecasted. + int64 horizon = 44; + + // The max value of the sum of non-seasonal p and q. + int64 auto_arima_max_order = 46; + + // The min value of the sum of non-seasonal p and q. + int64 auto_arima_min_order = 83; + + // Number of trials to run this hyperparameter tuning job. + int64 num_trials = 47; + + // Maximum number of trials to run in parallel. + int64 max_parallel_trials = 48; + + // The target evaluation metrics to optimize the hyperparameters for. + repeated HparamTuningEnums.HparamTuningObjective + hparam_tuning_objectives = 54; + + // If true, perform decompose time series and save the results. + google.protobuf.BoolValue decompose_time_series = 50; + + // If true, clean spikes and dips in the input time series. + google.protobuf.BoolValue clean_spikes_and_dips = 52; + + // If true, detect step changes and make data adjustment in the input time + // series. + google.protobuf.BoolValue adjust_step_changes = 53; + + // If true, enable global explanation during training. + google.protobuf.BoolValue enable_global_explain = 55; + + // Number of paths for the sampled Shapley explain method. + int64 sampled_shapley_num_paths = 56; + + // Number of integral steps for the integrated gradients explain method. + int64 integrated_gradients_num_steps = 57; + + // Categorical feature encoding method. + CategoryEncodingMethod.EncodingMethod category_encoding_method = 58; + + // Based on the selected TF version, the corresponding docker image is + // used to train external models. + string tf_version = 70; + + // Enums for color space, used for processing images in Object Table. + // See more details at + // https://www.tensorflow.org/io/tutorials/colorspace. + ColorSpace color_space = 72; + + // Name of the instance weight column for training data. + // This column isn't be used as a feature. + string instance_weight_column = 73; + + // Smoothing window size for the trend component. When a positive value is + // specified, a center moving average smoothing is applied on the history + // trend. When the smoothing window is out of the boundary at the + // beginning or the end of the trend, the first element or the last + // element is padded to fill the smoothing window before the average is + // applied. + int64 trend_smoothing_window_size = 74; + + // The fraction of the interpolated length of the time series that's used + // to model the time series trend component. All of the time points of the + // time series are used to model the non-trend component. This training + // option accelerates modeling training without sacrificing much + // forecasting accuracy. You can use this option with + // `minTimeSeriesLength` but not with `maxTimeSeriesLength`. + double time_series_length_fraction = 75; + + // The minimum number of time points in a time series that are used in + // modeling the trend component of the time series. If you use this option + // you must also set the `timeSeriesLengthFraction` option. This training + // option ensures that enough time points are available when you use + // `timeSeriesLengthFraction` in trend modeling. This is particularly + // important when forecasting multiple time series in a single query using + // `timeSeriesIdColumn`. If the total number of time points is less than + // the `minTimeSeriesLength` value, then the query uses all available time + // points. + int64 min_time_series_length = 76; + + // The maximum number of time points in a time series that can be used in + // modeling the trend component of the time series. Don't use this option + // with the `timeSeriesLengthFraction` or `minTimeSeriesLength` options. + int64 max_time_series_length = 77; + + // User-selected XGBoost versions for training of XGBoost models. + string xgboost_version = 78; + + // Whether to use approximate feature contribution method in XGBoost model + // explanation for global explain. + google.protobuf.BoolValue approx_global_feature_contrib = 84; + + // Whether the model should include intercept during model training. + google.protobuf.BoolValue fit_intercept = 85; + + // Number of principal components to keep in the PCA model. Must be <= the + // number of features. + int64 num_principal_components = 86; + + // The minimum ratio of cumulative explained variance that needs to be + // given by the PCA model. + double pca_explained_variance_ratio = 87; + + // If true, scale the feature values by dividing the feature standard + // deviation. Currently only apply to PCA. + google.protobuf.BoolValue scale_features = 88; + + // The solver for PCA. + PcaSolverOptionEnums.PcaSolver pca_solver = 89; + + // Whether to calculate class weights automatically based on the + // popularity of each label. + google.protobuf.BoolValue auto_class_weights = 90; + + // Activation function of the neural nets. + string activation_fn = 91; + + // Optimizer used for training the neural nets. + string optimizer = 92; + + // Budget in hours for AutoML training. + double budget_hours = 93; + + // Whether to standardize numerical features. Default to true. + google.protobuf.BoolValue standardize_features = 94; + + // L1 regularization coefficient to activations. + double l1_reg_activation = 95; + + // The model registry. + ModelRegistryOptionEnums.ModelRegistry model_registry = 96; + + // The version aliases to apply in Vertex AI model registry. Always + // overwrite if the version aliases exists in a existing model. + repeated string vertex_ai_model_version_aliases = 97; + } + + // Information about a single iteration of the training run. + message IterationResult { + // Information about a single cluster for clustering model. + message ClusterInfo { + // Centroid id. + int64 centroid_id = 1; + + // Cluster radius, the average distance from centroid + // to each point assigned to the cluster. + google.protobuf.DoubleValue cluster_radius = 2; + + // Cluster size, the total number of points assigned to the cluster. + google.protobuf.Int64Value cluster_size = 3; + } + + // (Auto-)arima fitting result. Wrap everything in ArimaResult for easier + // refactoring if we want to use model-specific iteration results. + message ArimaResult { + // Arima coefficients. + message ArimaCoefficients { + // Auto-regressive coefficients, an array of double. + repeated double auto_regressive_coefficients = 1; + + // Moving-average coefficients, an array of double. + repeated double moving_average_coefficients = 2; + + // Intercept coefficient, just a double not an array. + google.protobuf.DoubleValue intercept_coefficient = 3; + } + + // Arima model information. + message ArimaModelInfo { + // Non-seasonal order. + ArimaOrder non_seasonal_order = 1; + + // Arima coefficients. + ArimaCoefficients arima_coefficients = 2; + + // Arima fitting metrics. + ArimaFittingMetrics arima_fitting_metrics = 3; + + // Whether Arima model fitted with drift or not. It is always false + // when d is not 1. + google.protobuf.BoolValue has_drift = 4; + + // The time_series_id value for this time series. It will be one of + // the unique values from the time_series_id_column specified during + // ARIMA model training. Only present when time_series_id_column + // training option was used. + string time_series_id = 5; + + // The tuple of time_series_ids identifying this time series. It will + // be one of the unique tuples of values present in the + // time_series_id_columns specified during ARIMA model training. Only + // present when time_series_id_columns training option was used and + // the order of values here are same as the order of + // time_series_id_columns. + repeated string time_series_ids = 10; + + // Seasonal periods. Repeated because multiple periods are supported + // for one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 6; + + // If true, holiday_effect is a part of time series decomposition + // result. + google.protobuf.BoolValue has_holiday_effect = 7; + + // If true, spikes_and_dips is a part of time series decomposition + // result. + google.protobuf.BoolValue has_spikes_and_dips = 8; + + // If true, step_changes is a part of time series decomposition + // result. + google.protobuf.BoolValue has_step_changes = 9; + } + + // This message is repeated because there are multiple arima models + // fitted in auto-arima. For non-auto-arima model, its size is one. + repeated ArimaModelInfo arima_model_info = 1; + + // Seasonal periods. Repeated because multiple periods are supported for + // one time series. + repeated SeasonalPeriod.SeasonalPeriodType seasonal_periods = 2; + } + + // Principal component infos, used only for eigen decomposition based + // models, e.g., PCA. Ordered by explained_variance in the descending + // order. + message PrincipalComponentInfo { + // Id of the principal component. + google.protobuf.Int64Value principal_component_id = 1; + + // Explained variance by this principal component, which is simply the + // eigenvalue. + google.protobuf.DoubleValue explained_variance = 2; + + // Explained_variance over the total explained variance. + google.protobuf.DoubleValue explained_variance_ratio = 3; + + // The explained_variance is pre-ordered in the descending order to + // compute the cumulative explained variance ratio. + google.protobuf.DoubleValue cumulative_explained_variance_ratio = 4; + } + + // Index of the iteration, 0 based. + google.protobuf.Int32Value index = 1; + + // Time taken to run the iteration in milliseconds. + google.protobuf.Int64Value duration_ms = 4; + + // Loss computed on the training data at the end of iteration. + google.protobuf.DoubleValue training_loss = 5; + + // Loss computed on the eval data at the end of iteration. + google.protobuf.DoubleValue eval_loss = 6; + + // Learn rate used for this iteration. + double learn_rate = 7; + + // Information about top clusters for clustering models. + repeated ClusterInfo cluster_infos = 8; + + // Arima result. + ArimaResult arima_result = 9; + + // The information of the principal components. + repeated PrincipalComponentInfo principal_component_infos = 10; + } + + // Output only. Options that were used for this training run, includes + // user specified and default options that were used. + TrainingOptions training_options = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The start time of this training run. + google.protobuf.Timestamp start_time = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Output of each iteration run, results.size() <= + // max_iterations. + repeated IterationResult results = 6 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The evaluation metrics over training/eval data that were + // computed at the end of training. + EvaluationMetrics evaluation_metrics = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Data split result of the training run. Only set when the + // input data is actually split. + DataSplitResult data_split_result = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the model level. Applies to both regression and classification models. + GlobalExplanation model_level_global_explanation = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Global explanation contains the explanation of top features + // on the class level. Applies to classification models only. + repeated GlobalExplanation class_level_global_explanations = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The model id in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_id = 14; + + // Output only. The model version in the [Vertex AI Model + // Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction) + // for this training run. + string vertex_ai_model_version = 15 + [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Search space for a double hyperparameter. + message DoubleHparamSearchSpace { + // Range of a double hyperparameter. + message DoubleRange { + // Min value of the double parameter. + google.protobuf.DoubleValue min = 1; + + // Max value of the double parameter. + google.protobuf.DoubleValue max = 2; + } + + // Discrete candidates of a double hyperparameter. + message DoubleCandidates { + // Candidates for the double parameter in increasing order. + repeated google.protobuf.DoubleValue candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the double hyperparameter. + DoubleRange range = 1; + + // Candidates of the double hyperparameter. + DoubleCandidates candidates = 2; + } + } + + // Search space for an int hyperparameter. + message IntHparamSearchSpace { + // Range of an int hyperparameter. + message IntRange { + // Min value of the int parameter. + google.protobuf.Int64Value min = 1; + + // Max value of the int parameter. + google.protobuf.Int64Value max = 2; + } + + // Discrete candidates of an int hyperparameter. + message IntCandidates { + // Candidates for the int parameter in increasing order. + repeated google.protobuf.Int64Value candidates = 1; + } + + // Search space. + oneof search_space { + // Range of the int hyperparameter. + IntRange range = 1; + + // Candidates of the int hyperparameter. + IntCandidates candidates = 2; + } + } + + // Search space for string and enum. + message StringHparamSearchSpace { + // Canididates for the string or enum parameter in lower case. + repeated string candidates = 1; + } + + // Search space for int array. + message IntArrayHparamSearchSpace { + // An array of int. + message IntArray { + // Elements in the int array. + repeated int64 elements = 1; + } + + // Candidates for the int array parameter. + repeated IntArray candidates = 1; + } + + // Hyperparameter search spaces. + // These should be a subset of training_options. + message HparamSearchSpaces { + // Learning rate of training jobs. + DoubleHparamSearchSpace learn_rate = 2; + + // L1 regularization coefficient. + DoubleHparamSearchSpace l1_reg = 3; + + // L2 regularization coefficient. + DoubleHparamSearchSpace l2_reg = 4; + + // Number of clusters for k-means. + IntHparamSearchSpace num_clusters = 26; + + // Number of latent factors to train on. + IntHparamSearchSpace num_factors = 31; + + // Hidden units for neural network models. + IntArrayHparamSearchSpace hidden_units = 34; + + // Mini batch sample size. + IntHparamSearchSpace batch_size = 37; + + // Dropout probability for dnn model training and boosted tree models + // using dart booster. + DoubleHparamSearchSpace dropout = 38; + + // Maximum depth of a tree for boosted tree models. + IntHparamSearchSpace max_tree_depth = 41; + + // Subsample the training data to grow tree to prevent overfitting for + // boosted tree models. + DoubleHparamSearchSpace subsample = 42; + + // Minimum split loss for boosted tree models. + DoubleHparamSearchSpace min_split_loss = 43; + + // Hyperparameter for matrix factoration when implicit feedback type is + // specified. + DoubleHparamSearchSpace wals_alpha = 49; + + // Booster type for boosted tree models. + StringHparamSearchSpace booster_type = 56; + + // Number of parallel trees for boosted tree models. + IntHparamSearchSpace num_parallel_tree = 57; + + // Dart normalization type for boosted tree models. + StringHparamSearchSpace dart_normalize_type = 58; + + // Tree construction algorithm for boosted tree models. + StringHparamSearchSpace tree_method = 59; + + // Minimum sum of instance weight needed in a child for boosted tree models. + IntHparamSearchSpace min_tree_child_weight = 60; + + // Subsample ratio of columns when constructing each tree for boosted tree + // models. + DoubleHparamSearchSpace colsample_bytree = 61; + + // Subsample ratio of columns for each level for boosted tree models. + DoubleHparamSearchSpace colsample_bylevel = 62; + + // Subsample ratio of columns for each node(split) for boosted tree models. + DoubleHparamSearchSpace colsample_bynode = 63; + + // Activation functions of neural network models. + StringHparamSearchSpace activation_fn = 67; + + // Optimizer of TF models. + StringHparamSearchSpace optimizer = 68; + } + + // Training info of a trial in [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models. + message HparamTuningTrial { + // Current status of the trial. + enum TrialStatus { + // Default value. + TRIAL_STATUS_UNSPECIFIED = 0; + + // Scheduled but not started. + NOT_STARTED = 1; + + // Running state. + RUNNING = 2; + + // The trial succeeded. + SUCCEEDED = 3; + + // The trial failed. + FAILED = 4; + + // The trial is infeasible due to the invalid params. + INFEASIBLE = 5; + + // Trial stopped early because it's not promising. + STOPPED_EARLY = 6; + } + + // 1-based index of the trial. + int64 trial_id = 1; + + // Starting time of the trial. + int64 start_time_ms = 2; + + // Ending time of the trial. + int64 end_time_ms = 3; + + // The hyperprameters selected for this trial. + TrainingRun.TrainingOptions hparams = 4; + + // Evaluation metrics of this trial calculated on the test data. + // Empty in Job API. + EvaluationMetrics evaluation_metrics = 5; + + // The status of the trial. + TrialStatus status = 6; + + // Error message for FAILED and INFEASIBLE trial. + string error_message = 7; + + // Loss computed on the training data at the end of trial. + google.protobuf.DoubleValue training_loss = 8; + + // Loss computed on the eval data at the end of trial. + google.protobuf.DoubleValue eval_loss = 9; + + // Hyperparameter tuning evaluation metrics of this trial calculated on the + // eval data. Unlike evaluation_metrics, only the fields corresponding to + // the hparam_tuning_objectives are set. + EvaluationMetrics hparam_tuning_evaluation_metrics = 10; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Unique identifier for this model. + ModelReference model_reference = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this model was created, in millisecs since the + // epoch. + int64 creation_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this model was last modified, in millisecs since + // the epoch. + int64 last_modified_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. A user-friendly description of this model. + string description = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A descriptive name for this model. + string friendly_name = 14 [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this model. You can use these to organize + // and group your models. Label keys and values can be no longer + // than 63 characters, can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // Label values are optional. Label keys must start with a letter and each + // label in the list must have a different key. + map labels = 15; + + // Optional. The time when this model expires, in milliseconds since the + // epoch. If not present, the model will persist indefinitely. Expired models + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created models. + int64 expiration_time = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the model resides. This value + // is inherited from the dataset. + string location = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). This shows the + // encryption configuration of the model data while stored in BigQuery + // storage. This field can be used with PatchModel to update encryption key + // for an already encrypted model. + EncryptionConfiguration encryption_configuration = 17; + + // Output only. Type of the model resource. + ModelType model_type = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Information for all training runs in increasing order of start_time. + repeated TrainingRun training_runs = 9; + + // Output only. Input feature columns for the model inference. If the model is + // trained with TRANSFORM clause, these are the input of the TRANSFORM clause. + repeated StandardSqlField feature_columns = 10 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Label columns that were used to train this model. + // The output of the model will have a "predicted_" prefix to these columns. + repeated StandardSqlField label_columns = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This field will be populated if a TRANSFORM clause was used to + // train a model. TRANSFORM clause (if used) takes feature_columns as input + // and outputs transform_columns. transform_columns then are used to train the + // model. + repeated TransformColumn transform_columns = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. All hyperparameter search spaces in this model. + HparamSearchSpaces hparam_search_spaces = 18 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The default trial_id to use in TVFs when the trial_id is not + // passed in. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the best trial ID. For multi-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, this is the smallest trial ID among all Pareto optimal trials. + int64 default_trial_id = 21 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Trials of a [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // model sorted by trial_id. + repeated HparamTuningTrial hparam_trials = 20 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. For single-objective [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it only contains the best trial. For multi-objective + // [hyperparameter + // tuning](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-hp-tuning-overview) + // models, it contains all Pareto optimal trials sorted by trial_id. + repeated int64 optimal_trial_ids = 22 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Remote model info + RemoteModelInfo remote_model_info = 25 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Request format for getting information about a BigQuery ML model. +message GetModelRequest { + // Required. Project ID of the requested model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the requested model. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +message PatchModelRequest { + // Required. Project ID of the model to patch. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to patch. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to patch. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. Patched model. + // Follows RFC5789 patch semantics. Missing fields are not updated. + // To clear a field, explicitly set to default value. + Model model = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for deleting BigQuery ML models. +message DeleteModelRequest { + // Required. Project ID of the model to delete. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the model to delete. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Model ID of the model to delete. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for listing BigQuery ML models. +message ListModelsRequest { + // Required. Project ID of the models to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the models to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call to request the next page of + // results + string page_token = 4; +} + +// Response format for a single page when listing BigQuery ML models. +message ListModelsResponse { + // Models in the requested dataset. Only the following fields are populated: + // model_reference, model_type, creation_time, last_modified_time and + // labels. + repeated Model models = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/model_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/model_reference.proto new file mode 100644 index 000000000..9f190254e --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/model_reference.proto @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ModelReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a model. +message ModelReference { + // Required. The ID of the project containing this model. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this model. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the model. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + string model_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/partitioning_definition.proto b/test-fixtures/protos/google/cloud/bigquery/v2/partitioning_definition.proto new file mode 100644 index 000000000..f331cb56a --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/partitioning_definition.proto @@ -0,0 +1,49 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "PartitioningDefinitionProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The partitioning information, which includes managed table, external table +// and metastore partitioned table partition information. +message PartitioningDefinition { + // Optional. Details about each partitioning column. This field is output only + // for all partitioning types other than metastore partitioned tables. + // BigQuery native tables only support 1 partitioning column. Other table + // types may support 0, 1 or more partitioning columns. + // For metastore partitioned tables, the order must match the definition order + // in the Hive Metastore, where it must match the physical layout of the + // table. For example, + // + // CREATE TABLE a_table(id BIGINT, name STRING) + // PARTITIONED BY (city STRING, state STRING). + // + // In this case the values must be ['city', 'state'] in that order. + repeated PartitionedColumn partitioned_column = 1 + [(google.api.field_behavior) = OPTIONAL]; +} + +// The partitioning column information. +message PartitionedColumn { + // Required. The name of the partition column. + optional string field = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/privacy_policy.proto b/test-fixtures/protos/google/cloud/bigquery/v2/privacy_policy.proto new file mode 100644 index 000000000..35f40a0a0 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/privacy_policy.proto @@ -0,0 +1,169 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "PrivacyPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents privacy policy associated with "aggregation threshold" method. +message AggregationThresholdPolicy { + // Optional. The threshold for the "aggregation threshold" policy. + optional int64 threshold = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column(s) associated with this policy. + // For now, only one column per data source object (table, view) is allowed as + // a privacy unit column. + // Representing as a repeated field in metadata for extensibility to + // multiple columns in future. + // Duplicates and Repeated struct fields are not allowed. + // For nested fields, use dot notation ("outer.inner") + repeated string privacy_unit_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy associated with "differential privacy" method. +message DifferentialPrivacyPolicy { + // Optional. The maximum epsilon value that a query can consume. If the + // subscriber specifies epsilon as a parameter in a SELECT query, it must be + // less than or equal to this value. The epsilon parameter controls the amount + // of noise that is added to the groups — a higher epsilon means less noise. + optional double max_epsilon_per_query = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The delta value that is used per query. Delta represents the + // probability that any row will fail to be epsilon differentially private. + // Indicates the risk associated with exposing aggregate rows in the result of + // a query. + optional double delta_per_query = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum groups contributed value that is used per query. + // Represents the maximum number of groups to which each protected entity can + // contribute. Changing this value does not improve or worsen privacy. The + // best value for accuracy and utility depends on the query and data. + optional int64 max_groups_contributed = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The privacy unit column associated with this policy. Differential + // privacy policies can only have one privacy unit column per data source + // object (table, view). + optional string privacy_unit_column = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total epsilon budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of epsilon they request in their query. If there is sufficient + // budget, then the subscriber query attempts to complete. It might still fail + // due to other reasons, in which case the charge is refunded. If there is + // insufficient budget the query is rejected. There might be multiple charge + // attempts if a single query references multiple views. In this case there + // must be sufficient budget for all charges or the query is rejected and + // charges are refunded in best effort. The budget does not have a refresh + // policy and can only be updated via ALTER VIEW or circumvented by creating a + // new view that can be queried with a fresh budget. + optional double epsilon_budget = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The total delta budget for all queries against the + // privacy-protected view. Each subscriber query against this view charges the + // amount of delta that is pre-defined by the contributor through the privacy + // policy delta_per_query field. If there is sufficient budget, then the + // subscriber query attempts to complete. It might still fail due to other + // reasons, in which case the charge is refunded. If there is insufficient + // budget the query is rejected. There might be multiple charge attempts if a + // single query references multiple views. In this case there must be + // sufficient budget for all charges or the query is rejected and charges are + // refunded in best effort. The budget does not have a refresh policy and can + // only be updated via ALTER VIEW or circumvented by creating a new view that + // can be queried with a fresh budget. + optional double delta_budget = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The epsilon budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double epsilon_budget_remaining = 7 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The delta budget remaining. If budget is exhausted, no more + // queries are allowed. Note that the budget for queries that are in progress + // is deducted before the query executes. If the query fails or is cancelled + // then the budget is refunded. In this case the amount of budget remaining + // can increase. + optional double delta_budget_remaining = 8 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents privacy policy associated with "join restrictions". Join +// restriction gives data providers the ability to enforce joins on the +// 'join_allowed_columns' when data is queried from a privacy protected view. +message JoinRestrictionPolicy { + // Enum for Join Restrictions policy. + enum JoinCondition { + // A join is neither required nor restricted on any column. Default value. + JOIN_CONDITION_UNSPECIFIED = 0; + + // A join is required on at least one of the specified columns. + JOIN_ANY = 1; + + // A join is required on all specified columns. + JOIN_ALL = 2; + + // A join is not required, but if present it is only permitted on + // 'join_allowed_columns' + JOIN_NOT_REQUIRED = 3; + + // Joins are blocked for all queries. + JOIN_BLOCKED = 4; + } + + // Optional. Specifies if a join is required or not on queries for the view. + // Default is JOIN_CONDITION_UNSPECIFIED. + optional JoinCondition join_condition = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The only columns that joins are allowed on. + // This field is must be specified for join_conditions JOIN_ANY and JOIN_ALL + // and it cannot be set for JOIN_BLOCKED. + repeated string join_allowed_columns = 2 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents privacy policy that contains the privacy requirements specified by +// the data owner. Currently, this is only supported on views. +message PrivacyPolicy { + // Privacy policy associated with this requirement specification. Only one of + // the privacy methods is allowed per data source object. + oneof privacy_policy { + // Optional. Policy used for aggregation thresholds. + AggregationThresholdPolicy aggregation_threshold_policy = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Policy used for differential privacy. + DifferentialPrivacyPolicy differential_privacy_policy = 3 + [(google.api.field_behavior) = OPTIONAL]; + } + + // Optional. Join restriction policy is outside of the one of policies, since + // this policy can be set along with other policies. This policy gives data + // providers the ability to enforce joins on the 'join_allowed_columns' when + // data is queried from a privacy protected view. + optional JoinRestrictionPolicy join_restriction_policy = 1 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/project.proto b/test-fixtures/protos/google/cloud/bigquery/v2/project.proto new file mode 100644 index 000000000..f04d3962e --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/project.proto @@ -0,0 +1,61 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "ProjectProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Project Service. +// +// It should not be relied on for production use cases at this time. +service ProjectService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // RPC to get the service account for a project used for interactions with + // Google Cloud KMS + rpc GetServiceAccount(GetServiceAccountRequest) + returns (GetServiceAccountResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/serviceAccount" + }; + } +} + +// Request object of GetServiceAccount +message GetServiceAccountRequest { + // Required. ID of the project. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Response object of GetServiceAccount +message GetServiceAccountResponse { + // The resource type of the response. + string kind = 1; + + // The service account email address. + string email = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/query_parameter.proto b/test-fixtures/protos/google/cloud/bigquery/v2/query_parameter.proto new file mode 100644 index 000000000..e65a95b80 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/query_parameter.proto @@ -0,0 +1,101 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "QueryParameterProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The type of a struct parameter. +message QueryParameterStructType { + // Optional. The name of this field. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this field. + QueryParameterType type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Human-oriented description of the field. + string description = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The type of a query parameter. +message QueryParameterType { + // Required. The top level type of this field. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The type of the array's elements, if this is an array. + QueryParameterType array_type = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The types of the fields of this struct, in order, if this is a + // struct. + repeated QueryParameterStructType struct_types = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The element type of the range, if this is a range. + QueryParameterType range_element_type = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Represents the value of a range. +message RangeValue { + // Optional. The start value of the range. A missing value represents an + // unbounded start. + QueryParameterValue start = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The end value of the range. A missing value represents an + // unbounded end. + QueryParameterValue end = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The value of a query parameter. +message QueryParameterValue { + // Optional. The value of this value, if a simple scalar type. + google.protobuf.StringValue value = 1 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The array values, if this is an array type. + repeated QueryParameterValue array_values = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // The struct field values. + map struct_values = 3; + + // Optional. The range value, if this is a range type. + RangeValue range_value = 6 [(google.api.field_behavior) = OPTIONAL]; + + // This field should not be used. + repeated google.protobuf.Value alt_struct_values = 5; +} + +// A parameter given to a query. +message QueryParameter { + // Optional. If unset, this is a positional parameter. Otherwise, should be + // unique within a query. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The type of this parameter. + QueryParameterType parameter_type = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The value of this parameter. + QueryParameterValue parameter_value = 3 + [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/range_partitioning.proto b/test-fixtures/protos/google/cloud/bigquery/v2/range_partitioning.proto new file mode 100644 index 000000000..1cfded0c9 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/range_partitioning.proto @@ -0,0 +1,47 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RangePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RangePartitioning { + // Defines the ranges for range partitioning. + message Range { + // Required. The start of range partitioning, inclusive. This field is an + // INT64 value represented as a string. + string start = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The end of range partitioning, exclusive. This field is an + // INT64 value represented as a string. + string end = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The width of each interval. This field is an INT64 value + // represented as a string. + string interval = 3 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The name of the column to partition the table on. It must be a + // top-level, INT64 column whose mode is NULLABLE or REQUIRED. + string field = 1 [(google.api.field_behavior) = REQUIRED]; + + // Defines the ranges for range partitioning. + Range range = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/restriction_config.proto b/test-fixtures/protos/google/cloud/bigquery/v2/restriction_config.proto new file mode 100644 index 000000000..5d9422db3 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/restriction_config.proto @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RestrictionConfigProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message RestrictionConfig { + // RestrictionType specifies the type of dataset/table restriction. + enum RestrictionType { + // Should never be used. + RESTRICTION_TYPE_UNSPECIFIED = 0; + + // Restrict data egress. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RESTRICTED_DATA_EGRESS = 1; + } + + // Output only. Specifies the type of dataset/table restriction. + RestrictionType type = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/routine.proto b/test-fixtures/protos/google/cloud/bigquery/v2/routine.proto new file mode 100644 index 000000000..352b74524 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/routine.proto @@ -0,0 +1,540 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/routine_reference.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Routine Service. +// +// It should not be relied on for production use cases at this time. +service RoutineService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified routine resource by routine ID. + rpc GetRoutine(GetRoutineRequest) returns (Routine) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Creates a new routine in the dataset. + rpc InsertRoutine(InsertRoutineRequest) returns (Routine) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + body: "routine" + }; + } + + // Updates information in an existing routine. The update method replaces the + // entire Routine resource. + rpc UpdateRoutine(UpdateRoutineRequest) returns (Routine) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + body: "routine" + }; + } + + // Patches information in an existing routine. The patch method does a partial + // update to an existing Routine resource. + rpc PatchRoutine(PatchRoutineRequest) returns (Routine) {} + + // Deletes the routine specified by routineId from the dataset. + rpc DeleteRoutine(DeleteRoutineRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines/{routine_id=*}" + }; + } + + // Lists all routines in the specified dataset. Requires the READER dataset + // role. + rpc ListRoutines(ListRoutinesRequest) returns (ListRoutinesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/routines" + }; + } +} + +// A user-defined function or a stored procedure. +message Routine { + // The fine-grained type of the routine. + enum RoutineType { + // Default value. + ROUTINE_TYPE_UNSPECIFIED = 0; + + // Non-built-in persistent scalar function. + SCALAR_FUNCTION = 1; + + // Stored procedure. + PROCEDURE = 2; + + // Non-built-in persistent TVF. + TABLE_VALUED_FUNCTION = 3; + + // Non-built-in persistent aggregate function. + AGGREGATE_FUNCTION = 4; + } + + // The language of the routine. + enum Language { + // Default value. + LANGUAGE_UNSPECIFIED = 0; + + // SQL language. + SQL = 1; + + // JavaScript language. + JAVASCRIPT = 2; + + // Python language. + PYTHON = 3; + + // Java language. + JAVA = 4; + + // Scala language. + SCALA = 5; + } + + // Input/output argument of a function or a stored procedure. + message Argument { + // Represents the kind of a given argument. + enum ArgumentKind { + // Default value. + ARGUMENT_KIND_UNSPECIFIED = 0; + + // The argument is a variable with fully specified type, which can be a + // struct or an array, but not a table. + FIXED_TYPE = 1; + + // The argument is any type, including struct or array, but not a table. + // To be added: FIXED_TABLE, ANY_TABLE + ANY_TYPE = 2; + } + + // The input/output mode of the argument. + enum Mode { + // Default value. + MODE_UNSPECIFIED = 0; + + // The argument is input-only. + IN = 1; + + // The argument is output-only. + OUT = 2; + + // The argument is both an input and an output. + INOUT = 3; + } + + // Optional. The name of this argument. Can be absent for function return + // argument. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defaults to FIXED_TYPE. + ArgumentKind argument_kind = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies whether the argument is input or output. + // Can be set for procedures only. + Mode mode = 3; + + // Required unless argument_kind = ANY_TYPE. + StandardSqlDataType data_type = 4; + + // Optional. Whether the argument is an aggregate function parameter. + // Must be Unset for routine types other than AGGREGATE_FUNCTION. + // For AGGREGATE_FUNCTION, if set to false, it is equivalent to adding "NOT + // AGGREGATE" clause in DDL; Otherwise, it is equivalent to omitting "NOT + // AGGREGATE" clause in DDL. + google.protobuf.BoolValue is_aggregate = 6 + [(google.api.field_behavior) = OPTIONAL]; + } + + // JavaScript UDF determinism levels. + // + // If all JavaScript UDFs are DETERMINISTIC, the query result is + // potentially cachable (see below). If any JavaScript UDF is + // NOT_DETERMINISTIC, the query result is not cacheable. + // + // Even if a JavaScript UDF is deterministic, many other factors can prevent + // usage of cached query results. Example factors include but not limited to: + // DDL/DML, non-deterministic SQL function calls, update of referenced + // tables/views/UDFs or imported JavaScript libraries. + // + // SQL UDFs cannot have determinism specified. Their determinism is + // automatically determined. + enum DeterminismLevel { + // The determinism of the UDF is unspecified. + DETERMINISM_LEVEL_UNSPECIFIED = 0; + + // The UDF is deterministic, meaning that 2 function calls with the same + // inputs always produce the same result, even across 2 query runs. + DETERMINISTIC = 1; + + // The UDF is not deterministic. + NOT_DETERMINISTIC = 2; + } + + // Security mode. + enum SecurityMode { + // The security mode of the routine is unspecified. + SECURITY_MODE_UNSPECIFIED = 0; + + // The routine is to be executed with the privileges of the user who + // defines it. + DEFINER = 1; + + // The routine is to be executed with the privileges of the user who + // invokes it. + INVOKER = 2; + } + + // Options for a remote user-defined function. + message RemoteFunctionOptions { + // Endpoint of the user-provided remote service, e.g. + // ```https://us-east1-my_gcf_project.cloudfunctions.net/remote_add``` + string endpoint = 1; + + // Fully qualified name of the user-provided connection object which holds + // the authentication information to send requests to the remote service. + // Format: + // ```"projects/{projectId}/locations/{locationId}/connections/{connectionId}"``` + string connection = 2; + + // User-defined context as a set of key/value pairs, which will be sent as + // function invocation context together with batched arguments in the + // requests to the remote service. The total number of bytes of keys and + // values must be less than 8KB. + map user_defined_context = 3; + + // Max number of rows in each batch sent to the remote service. + // If absent or if 0, BigQuery dynamically decides the number of rows in a + // batch. + int64 max_batching_rows = 4; + } + + // Data governance type values. Only supports `DATA_MASKING`. + enum DataGovernanceType { + // The data governance type is unspecified. + DATA_GOVERNANCE_TYPE_UNSPECIFIED = 0; + + // The data governance type is data masking. + DATA_MASKING = 1; + } + + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this routine. + RoutineReference routine_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The type of routine. + RoutineType routine_type = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this routine was created, in milliseconds since + // the epoch. + int64 creation_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this routine was last modified, in milliseconds + // since the epoch. + int64 last_modified_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defaults to "SQL" if remote_function_options field is absent, not + // set otherwise. + Language language = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. + repeated Argument arguments = 7; + + // Optional if language = "SQL"; required otherwise. + // Cannot be set if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return type is inferred from definition_body at query time + // in each query that references this routine. If present, then the evaluated + // result will be cast to the specified returned type at query time. + // + // For example, for the functions created with the following statements: + // + // * `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);` + // + // * `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));` + // + // * `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));` + // + // The return_type is `{type_kind: "FLOAT64"}` for `Add` and `Decrement`, and + // is absent for `Increment` (inferred as FLOAT64 at query time). + // + // Suppose the function `Add` is replaced by + // `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);` + // + // Then the inferred return type of `Increment` is automatically changed to + // INT64 at query time, while the return type of `Decrement` remains FLOAT64. + StandardSqlDataType return_type = 10; + + // Optional. Can be set only if routine_type = "TABLE_VALUED_FUNCTION". + // + // If absent, the return table type is inferred from definition_body at query + // time in each query that references this routine. If present, then the + // columns in the evaluated table result will be cast to match the column + // types specified in return table type, at query time. + StandardSqlTableType return_table_type = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If language = "JAVASCRIPT", this field stores the path of the + // imported JAVASCRIPT libraries. + repeated string imported_libraries = 8; + + // Required. The body of the routine. + // + // For functions, this is the expression in the AS clause. + // + // If language=SQL, it is the substring inside (but excluding) the + // parentheses. For example, for the function created with the following + // statement: + // + // `CREATE FUNCTION JoinLines(x string, y string) as (concat(x, "\n", y))` + // + // The definition_body is `concat(x, "\n", y)` (\n is not replaced with + // linebreak). + // + // If language=JAVASCRIPT, it is the evaluated string in the AS clause. + // For example, for the function created with the following statement: + // + // `CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return "\n";\n'` + // + // The definition_body is + // + // `return "\n";\n` + // + // Note that both \n are replaced with linebreaks. + string definition_body = 9; + + // Optional. The description of the routine, if defined. + string description = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The determinism level of the JavaScript UDF, if defined. + DeterminismLevel determinism_level = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The security mode of the routine, if defined. If not defined, the + // security mode is automatically determined from the routine's configuration. + SecurityMode security_mode = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Use this option to catch many common errors. Error checking is + // not exhaustive, and successfully creating a procedure doesn't guarantee + // that the procedure will successfully execute at runtime. If `strictMode` is + // set to `TRUE`, the procedure body is further checked for errors such as + // non-existent tables or columns. The `CREATE PROCEDURE` statement fails if + // the body fails any of these checks. + // + // If `strictMode` is set to `FALSE`, the procedure body is checked only for + // syntax. For procedures that invoke themselves recursively, specify + // `strictMode=FALSE` to avoid non-existent procedure errors during + // validation. + // + // Default value is `TRUE`. + google.protobuf.BoolValue strict_mode = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Remote function specific options. + RemoteFunctionOptions remote_function_options = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Spark specific options. + SparkOptions spark_options = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `DATA_MASKING`, the function is validated and made + // available as a masking function. For more information, see [Create custom + // masking + // routines](https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask). + DataGovernanceType data_governance_type = 17 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Options for a user-defined Spark routine. +message SparkOptions { + // Fully qualified name of the user-provided Spark connection object. Format: + // ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"``` + string connection = 1; + + // Runtime version. If not specified, the default runtime version is used. + string runtime_version = 2; + + // Custom container image for the runtime environment. + string container_image = 3; + + // Configuration properties as a set of key/value pairs, which will be passed + // on to the Spark application. For more information, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html) and the + // [procedure option + // list](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list). + map properties = 4; + + // The main file/jar URI of the Spark application. Exactly one of the + // definition_body field and the main_file_uri field must be set for Python. + // Exactly one of main_class and main_file_uri field + // should be set for Java/Scala language type. + string main_file_uri = 5; + + // Python files to be placed on the PYTHONPATH for PySpark application. + // Supported file types: `.py`, `.egg`, and `.zip`. For more information + // about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string py_file_uris = 6; + + // JARs to include on the driver and executor CLASSPATH. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string jar_uris = 7; + + // Files to be placed in the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string file_uris = 8; + + // Archive files to be extracted into the working directory of each executor. + // For more information about Apache Spark, see + // [Apache Spark](https://spark.apache.org/docs/latest/index.html). + repeated string archive_uris = 9; + + // The fully qualified name of a class in jar_uris, for example, + // com.example.wordcount. Exactly one of main_class and main_jar_uri field + // should be set for Java/Scala language type. + string main_class = 10; +} + +// Describes the format for getting information about a routine. +message GetRoutineRequest { + // Required. Project ID of the requested routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the requested routine + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for inserting a routine. +message InsertRoutineRequest { + // Required. Project ID of the new routine + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new routine + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource to insert + Routine routine = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for updating a routine. +message UpdateRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will replace the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for the partial update (patch) of a routine. +message PatchRoutineRequest { + // Required. Project ID of the routine to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to update + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A routine resource which will be used to partially + // update the specified routine + Routine routine = 4 [(google.api.field_behavior) = REQUIRED]; + + // Only the Routine fields in the field mask are updated + // by the given routine. Repeated routine fields will be fully replaced + // if contained in the field mask. + google.protobuf.FieldMask field_mask = 5; +} + +// Describes the format for deleting a routine. +message DeleteRoutineRequest { + // Required. Project ID of the routine to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routine to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Routine ID of the routine to delete + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes the format for listing routines. +message ListRoutinesRequest { + // Required. Project ID of the routines to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the routines to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; + + // If set, then only the Routines matching this filter are returned. + // The supported format is `routineType:{RoutineType}`, where `{RoutineType}` + // is a RoutineType enum. For example: `routineType:SCALAR_FUNCTION`. + string filter = 6; +} + +// Describes the format of a single result page when listing routines. +message ListRoutinesResponse { + // Routines in the requested dataset. Unless read_mask is set in the request, + // only the following fields are populated: + // etag, project_id, dataset_id, routine_id, routine_type, creation_time, + // last_modified_time, language, and remote_function_options. + repeated Routine routines = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/routine_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/routine_reference.proto new file mode 100644 index 000000000..65ab1ae8d --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/routine_reference.proto @@ -0,0 +1,37 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "RoutineReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a routine. +message RoutineReference { + // Required. The ID of the project containing this routine. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this routine. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the routine. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string routine_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy.proto b/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy.proto new file mode 100644 index 000000000..c6eb2e9b7 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy.proto @@ -0,0 +1,108 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/row_access_policy_reference.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Service for interacting with row access policies. +service RowAccessPolicyService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Lists all row access policies on the specified table. + rpc ListRowAccessPolicies(ListRowAccessPoliciesRequest) + returns (ListRowAccessPoliciesResponse) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}/rowAccessPolicies" + }; + } +} + +// Request message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesRequest { + // Required. Project ID of the row access policies to list. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of row access policies to list. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to list row access policies. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Page token, returned by a previous call, to request the next page of + // results. + string page_token = 4; + + // The maximum number of results to return in a single response page. Leverage + // the page tokens to iterate through the entire collection. + int32 page_size = 5; +} + +// Response message for the ListRowAccessPolicies method. +message ListRowAccessPoliciesResponse { + // Row access policies on the requested table. + repeated RowAccessPolicy row_access_policies = 1; + + // A token to request the next page of results. + string next_page_token = 2; +} + +// Represents access on a subset of rows on the specified table, defined by its +// filter predicate. Access to the subset of rows is controlled by its IAM +// policy. +message RowAccessPolicy { + // Output only. A hash of this resource. + string etag = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this row access policy. + RowAccessPolicyReference row_access_policy_reference = 2 + [(google.api.field_behavior) = REQUIRED]; + + // Required. A SQL boolean expression that represents the rows defined by this + // row access policy, similar to the boolean expression in a WHERE clause of a + // SELECT query on a table. + // References to other tables, routines, and temporary functions are not + // supported. + // + // Examples: region="EU" + // date_field = CAST('2019-9-27' as DATE) + // nullable_field is not NULL + // numeric_field BETWEEN 1.0 AND 5.0 + string filter_predicate = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this row access policy was created, in + // milliseconds since the epoch. + google.protobuf.Timestamp creation_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this row access policy was last modified, in + // milliseconds since the epoch. + google.protobuf.Timestamp last_modified_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto new file mode 100644 index 000000000..28028dab0 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/row_access_policy_reference.proto @@ -0,0 +1,41 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "RowAccessPolicyReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Id path of a row access policy. +message RowAccessPolicyReference { + // Required. The ID of the project containing this row access policy. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this row access policy. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table containing this row access policy. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the row access policy. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 256 characters. + string policy_id = 4 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/session_info.proto b/test-fixtures/protos/google/cloud/bigquery/v2/session_info.proto new file mode 100644 index 000000000..333ab3b7e --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/session_info.proto @@ -0,0 +1,30 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SessionInfoProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// [Preview] Information related to sessions. +message SessionInfo { + // Output only. The id of the session. + string session_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/standard_sql.proto b/test-fixtures/protos/google/cloud/bigquery/v2/standard_sql.proto new file mode 100644 index 000000000..0f63b2d5f --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/standard_sql.proto @@ -0,0 +1,166 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "StandardSqlProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// The data type of a variable such as a function argument. +// Examples include: +// +// * INT64: `{"typeKind": "INT64"}` +// +// * ARRAY: +// +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "STRING"} +// } +// +// * STRUCT>: +// +// { +// "typeKind": "STRUCT", +// "structType": +// { +// "fields": +// [ +// { +// "name": "x", +// "type": {"typeKind": "STRING"} +// }, +// { +// "name": "y", +// "type": +// { +// "typeKind": "ARRAY", +// "arrayElementType": {"typeKind": "DATE"} +// } +// } +// ] +// } +// } +// +// * RANGE: +// +// { +// "typeKind": "RANGE", +// "rangeElementType": {"typeKind": "DATE"} +// } +message StandardSqlDataType { + // The kind of the datatype. + enum TypeKind { + // Invalid type. + TYPE_KIND_UNSPECIFIED = 0; + + // Encoded as a string in decimal format. + INT64 = 2; + + // Encoded as a boolean "false" or "true". + BOOL = 5; + + // Encoded as a number, or string "NaN", "Infinity" or "-Infinity". + FLOAT64 = 7; + + // Encoded as a string value. + STRING = 8; + + // Encoded as a base64 string per RFC 4648, section 4. + BYTES = 9; + + // Encoded as an RFC 3339 timestamp with mandatory "Z" time zone string: + // 1985-04-12T23:20:50.52Z + TIMESTAMP = 19; + + // Encoded as RFC 3339 full-date format string: 1985-04-12 + DATE = 10; + + // Encoded as RFC 3339 partial-time format string: 23:20:50.52 + TIME = 20; + + // Encoded as RFC 3339 full-date "T" partial-time: 1985-04-12T23:20:50.52 + DATETIME = 21; + + // Encoded as fully qualified 3 part: 0-5 15 2:30:45.6 + INTERVAL = 26; + + // Encoded as WKT + GEOGRAPHY = 22; + + // Encoded as a decimal string. + NUMERIC = 23; + + // Encoded as a decimal string. + BIGNUMERIC = 24; + + // Encoded as a string. + JSON = 25; + + // Encoded as a list with types matching Type.array_type. + ARRAY = 16; + + // Encoded as a list with fields of type Type.struct_type[i]. List is used + // because a JSON object cannot have duplicate field names. + STRUCT = 17; + + // Encoded as a pair with types matching range_element_type. Pairs must + // begin with "[", end with ")", and be separated by ", ". + RANGE = 29; + } + + // Required. The top level type of this field. + // Can be any GoogleSQL data type (e.g., "INT64", "DATE", "ARRAY"). + TypeKind type_kind = 1 [(google.api.field_behavior) = REQUIRED]; + + // For complex types, the sub type information. + oneof sub_type { + // The type of the array's elements, if type_kind = "ARRAY". + StandardSqlDataType array_element_type = 2; + + // The fields of this struct, in order, if type_kind = "STRUCT". + StandardSqlStructType struct_type = 3; + + // The type of the range's elements, if type_kind = "RANGE". + StandardSqlDataType range_element_type = 4; + } +} + +// A field or a column. +message StandardSqlField { + // Optional. The name of this field. Can be absent for struct fields. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The type of this parameter. Absent if not explicitly + // specified (e.g., CREATE FUNCTION statement can omit the return type; + // in this case the output parameter does not have this "type" field). + StandardSqlDataType type = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// The representation of a SQL STRUCT type. +message StandardSqlStructType { + // Fields within the struct. + repeated StandardSqlField fields = 1; +} + +// A table type +message StandardSqlTableType { + // The columns in this table type + repeated StandardSqlField columns = 1; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/system_variable.proto b/test-fixtures/protos/google/cloud/bigquery/v2/system_variable.proto new file mode 100644 index 000000000..4437f0f4e --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/system_variable.proto @@ -0,0 +1,36 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/standard_sql.proto"; +import "google/protobuf/struct.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_multiple_files = true; +option java_outer_classname = "SystemVariableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// System variables given to a query. +message SystemVariables { + // Output only. Data type for each system variable. + map types = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Value for each system variable. + google.protobuf.Struct values = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/table.proto b/test-fixtures/protos/google/cloud/bigquery/v2/table.proto new file mode 100644 index 000000000..9f93ae5df --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/table.proto @@ -0,0 +1,730 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/biglake_config.proto"; +import "google/cloud/bigquery/v2/clustering.proto"; +import "google/cloud/bigquery/v2/encryption_config.proto"; +import "google/cloud/bigquery/v2/error.proto"; +import "google/cloud/bigquery/v2/external_catalog_table_options.proto"; +import "google/cloud/bigquery/v2/external_data_config.proto"; +import "google/cloud/bigquery/v2/partitioning_definition.proto"; +import "google/cloud/bigquery/v2/privacy_policy.proto"; +import "google/cloud/bigquery/v2/range_partitioning.proto"; +import "google/cloud/bigquery/v2/restriction_config.proto"; +import "google/cloud/bigquery/v2/table_constraints.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; +import "google/cloud/bigquery/v2/table_schema.proto"; +import "google/cloud/bigquery/v2/time_partitioning.proto"; +import "google/cloud/bigquery/v2/udf_resource.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// This is an experimental RPC service definition for the BigQuery +// Table Service. +// +// It should not be relied on for production use cases at this time. +service TableService { + option (google.api.default_host) = "bigquery.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + + // Gets the specified table resource by table ID. + // This method does not return the data in the table, it only returns the + // table resource, which describes the structure of this table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Creates a new, empty table in the dataset. + rpc InsertTable(InsertTableRequest) returns (Table) { + option (google.api.http) = { + post: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire table resource, whereas the patch method only replaces fields that + // are provided in the submitted table resource. + // This method supports RFC5789 patch semantics. + rpc PatchTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + patch: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Updates information in an existing table. The update method replaces the + // entire Table resource, whereas the patch method only replaces fields that + // are provided in the submitted Table resource. + rpc UpdateTable(UpdateOrPatchTableRequest) returns (Table) { + option (google.api.http) = { + put: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + body: "table" + }; + } + + // Deletes the table specified by tableId from the dataset. + // If the table contains data, all the data will be deleted. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables/{table_id=*}" + }; + } + + // Lists all tables in the specified dataset. Requires the READER dataset + // role. + rpc ListTables(ListTablesRequest) returns (TableList) { + option (google.api.http) = { + get: "/bigquery/v2/projects/{project_id=*}/datasets/{dataset_id=*}/tables" + }; + } +} + +// Replication info of a table created using `AS REPLICA` DDL like: +// `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` +message TableReplicationInfo { + // Replication status of the table created using `AS REPLICA` like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + enum ReplicationStatus { + // Default value. + REPLICATION_STATUS_UNSPECIFIED = 0; + + // Replication is Active with no errors. + ACTIVE = 1; + + // Source object is deleted. + SOURCE_DELETED = 2; + + // Source revoked replication permissions. + PERMISSION_DENIED = 3; + + // Source configuration doesn’t allow replication. + UNSUPPORTED_CONFIGURATION = 4; + } + + // Required. Source table reference that is replicated. + TableReference source_table = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Specifies the interval at which the source table is polled for + // updates. + // It's Optional. If not specified, default replication interval would be + // applied. + int64 replication_interval_ms = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. If source is a materialized view, this field + // signifies the last refresh time of the source. + int64 replicated_source_last_refresh_time = 3 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication status of configured replication. + ReplicationStatus replication_status = 4 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; + + // Optional. Output only. Replication error that will permanently stopped + // table replication. + ErrorProto replication_error = 5 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.field_behavior) = OPTIONAL + ]; +} + +// Describes the definition of a logical view. +message ViewDefinition { + // Required. A query that BigQuery executes when the view is referenced. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Describes user-defined function resources used in the query. + repeated UserDefinedFunctionResource user_defined_function_resources = 2; + + // Specifies whether to use BigQuery's legacy SQL for this view. + // The default value is true. If set to false, the view will use + // BigQuery's GoogleSQL: + // https://cloud.google.com/bigquery/sql-reference/ + // + // Queries and views that reference this view must use the same flag value. + // A wrapper is used here because the default value is True. + google.protobuf.BoolValue use_legacy_sql = 3; + + // True if the column names are explicitly specified. For example by using the + // 'CREATE VIEW v(c1, c2) AS ...' syntax. + // Can only be set for GoogleSQL views. + bool use_explicit_column_names = 4; + + // Optional. Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Foreign view representations. + repeated ForeignViewDefinition foreign_definitions = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A view can be represented in multiple ways. Each representation has its own +// dialect. This message stores the metadata required for these representations. +message ForeignViewDefinition { + // Required. The query that defines the view. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Represents the dialect of the query. + string dialect = 7 [(google.api.field_behavior) = OPTIONAL]; +} + +// Definition and configuration of a materialized view. +message MaterializedViewDefinition { + // Required. A query whose results are persisted. + string query = 1 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The time when this materialized view was last refreshed, in + // milliseconds since the epoch. + int64 last_refresh_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Enable automatic refresh of the materialized view when the base + // table is updated. The default value is "true". + google.protobuf.BoolValue enable_refresh = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum frequency at which this materialized view will be + // refreshed. The default value is "1800000" (30 minutes). + google.protobuf.UInt64Value refresh_interval_ms = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. This option declares the intention to construct a materialized + // view that isn't refreshed incrementally. + google.protobuf.BoolValue allow_non_incremental_definition = 6 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Status of a materialized view. +// The last refresh timestamp status is omitted here, but is present in the +// MaterializedViewDefinition message. +message MaterializedViewStatus { + // Output only. Refresh watermark of materialized view. The base tables' data + // were collected into the materialized view cache until this time. + google.protobuf.Timestamp refresh_watermark = 1 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Error result of the last automatic refresh. If present, + // indicates that the last automatic refresh was unsuccessful. + ErrorProto last_refresh_status = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about base table and snapshot time of the snapshot. +message SnapshotDefinition { + // Required. Reference describing the ID of the table that was snapshot. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was snapshot. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp snapshot_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Information about base table and clone time of a table clone. +message CloneDefinition { + // Required. Reference describing the ID of the table that was cloned. + TableReference base_table_reference = 1 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The time at which the base table was cloned. This value is + // reported in the JSON response using RFC3339 format. + google.protobuf.Timestamp clone_time = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +message Streamingbuffer { + // Output only. A lower-bound estimate of the number of bytes currently in + // the streaming buffer. + uint64 estimated_bytes = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A lower-bound estimate of the number of rows currently in the + // streaming buffer. + uint64 estimated_rows = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains the timestamp of the oldest entry in the streaming + // buffer, in milliseconds since the epoch, if the streaming buffer is + // available. + fixed64 oldest_entry_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +message Table { + // The type of resource ID. + string kind = 1; + + // Output only. A hash of this resource. + string etag = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. An opaque ID uniquely identifying the table. + string id = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A URL that can be used to access this resource again. + string self_link = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Reference describing the ID of this table. + TableReference table_reference = 5 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A descriptive name for this table. + google.protobuf.StringValue friendly_name = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A user-friendly description of this table. + google.protobuf.StringValue description = 7 + [(google.api.field_behavior) = OPTIONAL]; + + // The labels associated with this table. You can use these to organize and + // group your tables. Label keys and values can be no longer than 63 + // characters, can only contain lowercase letters, numeric characters, + // underscores and dashes. International characters are allowed. Label values + // are optional. Label keys must start with a letter and each label in the + // list must have a different key. + map labels = 8; + + // Optional. Describes the schema of this table. + TableSchema schema = 9 [(google.api.field_behavior) = OPTIONAL]; + + // If specified, configures time-based partitioning for this table. + TimePartitioning time_partitioning = 10; + + // If specified, configures range partitioning for this table. + RangePartitioning range_partitioning = 27; + + // Clustering specification for the table. Must be specified with time-based + // partitioning, data in the table will be first partitioned and subsequently + // clustered. + Clustering clustering = 23; + + // Optional. If set to true, queries over this table require + // a partition filter that can be used for partition elimination to be + // specified. + google.protobuf.BoolValue require_partition_filter = 28 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The partition information for all table formats, including + // managed partitioned tables, hive partitioned tables, iceberg partitioned, + // and metastore partitioned tables. This field is only populated for + // metastore partitioned tables. For other table formats, this is an output + // only field. + optional PartitioningDefinition partition_definition = 51 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The size of this table in logical bytes, excluding any data in + // the streaming buffer. + google.protobuf.Int64Value num_bytes = 11 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This includes + // storage used for time travel. + google.protobuf.Int64Value num_physical_bytes = 26 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of logical bytes in the table that are considered + // "long-term storage". + google.protobuf.Int64Value num_long_term_bytes = 12 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of rows of data in this table, excluding any data + // in the streaming buffer. + google.protobuf.UInt64Value num_rows = 13 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. The defaultTableExpirationMs + // property of the encapsulating dataset can be used to set a default + // expirationTime on newly created tables. + google.protobuf.Int64Value expiration_time = 15 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time when this table was last modified, in milliseconds + // since the epoch. + fixed64 last_modified_time = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Describes the table type. The following values are supported: + // + // * `TABLE`: A normal BigQuery table. + // * `VIEW`: A virtual table defined by a SQL query. + // * `EXTERNAL`: A table that references data stored in an external storage + // system, such as Google Cloud Storage. + // * `MATERIALIZED_VIEW`: A precomputed view defined by a SQL query. + // * `SNAPSHOT`: An immutable BigQuery table that preserves the contents of a + // base table at a particular time. See additional information on + // [table + // snapshots](https://cloud.google.com/bigquery/docs/table-snapshots-intro). + // + // The default value is `TABLE`. + string type = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The view definition. + ViewDefinition view = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The materialized view definition. + MaterializedViewDefinition materialized_view = 25 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The materialized view status. + MaterializedViewStatus materialized_view_status = 42 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Describes the data format, location, and other properties of + // a table stored outside of BigQuery. By defining these properties, the data + // source can then be queried as if it were a standard BigQuery table. + ExternalDataConfiguration external_data_configuration = 19 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the configuration of a BigLake managed table. + BigLakeConfiguration biglake_configuration = 45 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The geographic location where the table resides. This value + // is inherited from the dataset. + string location = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Contains information regarding this table's streaming buffer, + // if one is present. This field will be absent if the table is not being + // streamed to or if there is no data in the streaming buffer. + Streamingbuffer streaming_buffer = 21 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfiguration encryption_configuration = 22; + + // Output only. Contains information about the snapshot. This value is set via + // snapshot creation. + SnapshotDefinition snapshot_definition = 29 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Defines the default collation specification of new STRING fields + // in the table. During table creation or update, if a STRING field is added + // to this table without explicit collation specified, then the table inherits + // the table default collation. A change to this field affects only fields + // added afterwards, and does not alter the existing fields. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue default_collation = 30 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Defines the default rounding mode specification of new decimal + // fields (NUMERIC OR BIGNUMERIC) in the table. During table creation or + // update, if a decimal field is added to this table without an explicit + // rounding mode specified, then the field inherits the table default + // rounding mode. Changing this field doesn't affect existing fields. + TableFieldSchema.RoundingMode default_rounding_mode = 44 + [(google.api.field_behavior) = OPTIONAL]; + + // Output only. Contains information about the clone. This value is set via + // the clone operation. + CloneDefinition clone_definition = 31 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by time travel storage (deleted + // or changed data). This data is not kept in real time, and might be delayed + // by a few seconds to a few minutes. + google.protobuf.Int64Value num_time_travel_physical_bytes = 33 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Total number of logical bytes in the table or materialized + // view. + google.protobuf.Int64Value num_total_logical_bytes = 34 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are less than 90 days old. + google.protobuf.Int64Value num_active_logical_bytes = 35 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of logical bytes that are more than 90 days old. + google.protobuf.Int64Value num_long_term_logical_bytes = 36 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes used by current live data storage. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_current_physical_bytes = 53 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The physical size of this table in bytes. This also includes + // storage used for time travel. This data is not kept in real time, and might + // be delayed by a few seconds to a few minutes. + google.protobuf.Int64Value num_total_physical_bytes = 37 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes less than 90 days old. This data is + // not kept in real time, and might be delayed by a few seconds to a few + // minutes. + google.protobuf.Int64Value num_active_physical_bytes = 38 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Number of physical bytes more than 90 days old. + // This data is not kept in real time, and might be delayed by a few seconds + // to a few minutes. + google.protobuf.Int64Value num_long_term_physical_bytes = 39 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of partitions present in the table or materialized + // view. This data is not kept in real time, and might be delayed by a few + // seconds to a few minutes. + google.protobuf.Int64Value num_partitions = 40 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The maximum staleness of data that could be returned when the + // table (or stale MV) is queried. Staleness encoded as a string encoding + // of sql IntervalValue type. + string max_staleness = 41 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Restriction config for table. If set, restrict + // certain accesses on the table based on the config. See [Data + // egress](https://cloud.google.com/bigquery/docs/analytics-hub-introduction#data_egress) + // for more details. + RestrictionConfig restrictions = 46 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Tables Primary Key and Foreign Key information + TableConstraints table_constraints = 47 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The [tags](https://cloud.google.com/bigquery/docs/tags) attached + // to this table. Tag keys are globally unique. Tag key is expected to be in + // the namespaced format, for example "123456789012/environment" where + // 123456789012 is the ID of the parent organization or project resource for + // this tag key. Tag value is expected to be the short name, for example + // "Production". See [Tag + // definitions](https://cloud.google.com/iam/docs/tags-access-control#definitions) + // for more details. + map resource_tags = 48 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Table replication info for table created `AS REPLICA` DDL like: + // `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv` + TableReplicationInfo table_replication_info = 49 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Output only. Table references of all replicas currently active on + // the table. + repeated TableReference replicas = 50 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.field_behavior) = OUTPUT_ONLY + ]; + + // Optional. Options defining open source compatible table. + ExternalCatalogTableOptions external_catalog_table_options = 54 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for getting table metadata. +message GetTableRequest { + // TableMetadataView specifies which table information is returned. + enum TableMetadataView { + // The default value. + // Default to the STORAGE_STATS view. + TABLE_METADATA_VIEW_UNSPECIFIED = 0; + + // Includes basic table information including schema and + // partitioning specification. This view does not include storage statistics + // such as numRows or numBytes. This view is significantly more efficient + // and should be used to support high query rates. + BASIC = 1; + + // Includes all information in the BASIC view as well as storage statistics + // (numBytes, numLongTermBytes, numRows and lastModifiedTime). + STORAGE_STATS = 2; + + // Includes all table information, including storage statistics. + // It returns same information as STORAGE_STATS view, but may contain + // additional information in the future. + FULL = 3; + } + + // Required. Project ID of the requested table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the requested table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the requested table + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // List of table schema fields to return (comma-separated). + // If unspecified, all fields are returned. + // A fieldMask cannot be used here because the fields will automatically be + // converted from camelCase to snake_case and the conversion will fail if + // there are underscores. Since these are fields in BigQuery table schemas, + // underscores are allowed. + string selected_fields = 4; + + // Optional. Specifies the view that determines which table information is + // returned. By default, basic table information and storage statistics + // (STORAGE_STATS) are returned. + TableMetadataView view = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for inserting table metadata. +message InsertTableRequest { + // Required. Project ID of the new table + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the new table + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource to insert + Table table = 4 [(google.api.field_behavior) = REQUIRED]; +} + +message UpdateOrPatchTableRequest { + // Required. Project ID of the table to update + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to update + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to update + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. A tables resource which will replace or patch the specified table + Table table = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. When true will autodetect schema, else will keep original schema. + bool autodetect_schema = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Request format for deleting a table. +message DeleteTableRequest { + // Required. Project ID of the table to delete + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the table to delete + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Table ID of the table to delete + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request format for enumerating tables. +message ListTablesRequest { + // Required. Project ID of the tables to list + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset ID of the tables to list + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The maximum number of results to return in a single response page. + // Leverage the page tokens to iterate through the entire collection. + google.protobuf.UInt32Value max_results = 3; + + // Page token, returned by a previous call, to request the next page of + // results + string page_token = 4; +} + +// Information about a logical view. +message ListFormatView { + // True if view is defined in legacy SQL dialect, + // false if in GoogleSQL. + google.protobuf.BoolValue use_legacy_sql = 1; + + // Specifices the privacy policy for the view. + PrivacyPolicy privacy_policy = 2; +} + +message ListFormatTable { + // The resource type. + string kind = 1; + + // An opaque ID of the table. + string id = 2; + + // A reference uniquely identifying table. + TableReference table_reference = 3; + + // The user-friendly name for this table. + google.protobuf.StringValue friendly_name = 4; + + // The type of table. + string type = 5; + + // The time-based partitioning for this table. + TimePartitioning time_partitioning = 6; + + // The range partitioning for this table. + RangePartitioning range_partitioning = 12; + + // Clustering specification for this table, if configured. + Clustering clustering = 11; + + // The labels associated with this table. You can use these to organize + // and group your tables. + map labels = 7; + + // Additional details for a view. + ListFormatView view = 8; + + // Output only. The time when this table was created, in milliseconds since + // the epoch. + int64 creation_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The time when this table expires, in milliseconds since the + // epoch. If not present, the table will persist indefinitely. Expired tables + // will be deleted and their storage reclaimed. + int64 expiration_time = 10; + + // Optional. If set to true, queries including this table must specify a + // partition filter. This filter is used for partition elimination. + google.protobuf.BoolValue require_partition_filter = 14 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Partial projection of the metadata for a given table in a list response. +message TableList { + // The type of list. + string kind = 1; + + // A hash of this page of results. + string etag = 2; + + // A token to request the next page of results. + string next_page_token = 3; + + // Tables in the requested dataset. + repeated ListFormatTable tables = 4; + + // The total number of tables in the dataset. + google.protobuf.Int32Value total_items = 5; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/table_constraints.proto b/test-fixtures/protos/google/cloud/bigquery/v2/table_constraints.proto new file mode 100644 index 000000000..13edc6bb9 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/table_constraints.proto @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/v2/table_reference.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableConstraintsProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Represents the primary key constraint on a table's columns. +message PrimaryKey { + // Required. The columns that are composed of the primary key constraint. + repeated string columns = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The pair of the foreign key column and primary key column. +message ColumnReference { + // Required. The column that composes the foreign key. + string referencing_column = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The column in the primary key that are referenced by the + // referencing_column. + string referenced_column = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Represents a foreign key constraint on a table's columns. +message ForeignKey { + // Optional. Set only if the foreign key constraint is named. + string name = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The table that holds the primary key and is referenced by this + // foreign key. + TableReference referenced_table = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The columns that compose the foreign key. + repeated ColumnReference column_references = 3 + [(google.api.field_behavior) = REQUIRED]; +} + +// The TableConstraints defines the primary key and foreign key. +message TableConstraints { + // Optional. Represents a primary key constraint on a table's columns. + // Present only if the table has a primary key. + // The primary key is not enforced. + PrimaryKey primary_key = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Present only if the table has a foreign key. + // The foreign key is not enforced. + repeated ForeignKey foreign_keys = 2 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/table_reference.proto b/test-fixtures/protos/google/cloud/bigquery/v2/table_reference.proto new file mode 100644 index 000000000..e6e9a1b35 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/table_reference.proto @@ -0,0 +1,40 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableReferenceProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TableReference { + // Required. The ID of the project containing this table. + string project_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the dataset containing this table. + string dataset_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The ID of the table. The ID can contain Unicode characters in + // category L (letter), M (mark), N (number), Pc (connector, including + // underscore), Pd (dash), and Zs (space). For more information, see [General + // Category](https://wikipedia.org/wiki/Unicode_character_property#General_Category). + // The maximum length is 1,024 characters. Certain operations allow suffixing + // of the table ID with a partition decorator, such as + // `sample_table$20190123`. + string table_id = 3 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/table_schema.proto b/test-fixtures/protos/google/cloud/bigquery/v2/table_schema.proto new file mode 100644 index 000000000..8a56f8e87 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/table_schema.proto @@ -0,0 +1,233 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TableSchemaProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// Schema of a table +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; + + // Optional. Specifies metadata of the foreign data type definition in field + // schema + // ([TableFieldSchema.foreign_type_definition][google.cloud.bigquery.v2.TableFieldSchema.foreign_type_definition]). + ForeignTypeInfo foreign_type_info = 3 + [(google.api.field_behavior) = OPTIONAL]; +} + +// Metadata about the foreign data type definition such as the system +// in which the type is defined. +message ForeignTypeInfo { + // External systems, such as query engines or table formats, that have their + // own data types. + enum TypeSystem { + // TypeSystem not specified. + TYPE_SYSTEM_UNSPECIFIED = 0; + + // Represents Hive data types. + HIVE = 1; + } + + // Required. Specifies the system which defines the foreign data type. + TypeSystem type_system = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Data policy option proto, it currently supports name only, will support +// precedence later. +message DataPolicyOption { + // Data policy resource name in the form of + // projects/project_id/locations/location_id/dataPolicies/data_policy_id. + optional string name = 1; +} + +// A field in TableSchema +message TableFieldSchema { + message PolicyTagList { + // A list of policy tag resource names. For example, + // "projects/1/locations/eu/taxonomies/2/policyTags/3". At most 1 policy tag + // is currently allowed. + repeated string names = 1; + } + + // Rounding mode options that can be used when storing NUMERIC + // or BIGNUMERIC values. + enum RoundingMode { + // Unspecified will default to using ROUND_HALF_AWAY_FROM_ZERO. + ROUNDING_MODE_UNSPECIFIED = 0; + + // ROUND_HALF_AWAY_FROM_ZERO rounds half values away from zero + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5, 1.6, 1.7, 1.8, 1.9 => 2 + ROUND_HALF_AWAY_FROM_ZERO = 1; + + // ROUND_HALF_EVEN rounds half values to the nearest even value + // when applying precision and scale upon writing of NUMERIC and BIGNUMERIC + // values. + // For Scale: 0 + // 1.1, 1.2, 1.3, 1.4 => 1 + // 1.5 => 2 + // 1.6, 1.7, 1.8, 1.9 => 2 + // 2.5 => 2 + ROUND_HALF_EVEN = 2; + } + + // Represents the type of a field element. + message FieldElementType { + // Required. The type of a field element. For more information, see + // [TableFieldSchema.type][google.cloud.bigquery.v2.TableFieldSchema.type]. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 300 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. Possible values include: + // + // * STRING + // * BYTES + // * INTEGER (or INT64) + // * FLOAT (or FLOAT64) + // * BOOLEAN (or BOOL) + // * TIMESTAMP + // * DATE + // * TIME + // * DATETIME + // * GEOGRAPHY + // * NUMERIC + // * BIGNUMERIC + // * JSON + // * RECORD (or STRUCT) + // * RANGE + // + // Use of RECORD/STRUCT indicates that the field contains a nested schema. + string type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. Possible values include NULLABLE, REQUIRED and + // REPEATED. The default value is NULLABLE. + string mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set + // to RECORD. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + google.protobuf.StringValue description = 6 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The policy tags attached to this field, used for field-level + // access control. If not set, defaults to empty policy_tags. + PolicyTagList policy_tags = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Data policy options, will replace the data_policies. + repeated DataPolicyOption data_policies = 21 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Maximum length of values of this field for STRINGS or BYTES. + // + // If max_length is not specified, no maximum length constraint is imposed + // on this field. + // + // If type = "STRING", then max_length represents the maximum UTF-8 + // length of strings in this field. + // + // If type = "BYTES", then max_length represents the maximum number of + // bytes in this field. + // + // It is invalid to set this field if type ≠ "STRING" and ≠ "BYTES". + int64 max_length = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Precision (maximum number of total digits in base 10) and scale + // (maximum number of digits in the fractional part in base 10) constraints + // for values of this field for NUMERIC or BIGNUMERIC. + // + // It is invalid to set precision or scale if type ≠ "NUMERIC" and ≠ + // "BIGNUMERIC". + // + // If precision and scale are not specified, no value range constraint is + // imposed on this field insofar as values are permitted by the type. + // + // Values of this NUMERIC or BIGNUMERIC field must be in this range when: + // + // * Precision (P) and scale (S) are specified: + // [-10P-S + 10-S, + // 10P-S - 10-S] + // * Precision (P) is specified but not scale (and thus scale is + // interpreted to be equal to zero): + // [-10P + 1, 10P - 1]. + // + // Acceptable values for precision and scale if both are specified: + // + // * If type = "NUMERIC": + // 1 ≤ precision - scale ≤ 29 and 0 ≤ scale ≤ 9. + // * If type = "BIGNUMERIC": + // 1 ≤ precision - scale ≤ 38 and 0 ≤ scale ≤ 38. + // + // Acceptable values for precision if only precision is specified but not + // scale (and thus scale is interpreted to be equal to zero): + // + // * If type = "NUMERIC": 1 ≤ precision ≤ 29. + // * If type = "BIGNUMERIC": 1 ≤ precision ≤ 38. + // + // If scale is specified but not precision, then it is invalid. + int64 precision = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. See documentation for precision. + int64 scale = 12 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Specifies the rounding mode to be used when storing values of + // NUMERIC and BIGNUMERIC type. + RoundingMode rounding_mode = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Field collation can be set only when the type of field is STRING. + // The following values are supported: + // + // * 'und:ci': undetermined locale, case insensitive. + // * '': empty string. Default to case-sensitive behavior. + google.protobuf.StringValue collation = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A SQL expression to specify the [default value] + // (https://cloud.google.com/bigquery/docs/default-values) for this field. + google.protobuf.StringValue default_value_expression = 14 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The subtype of the RANGE, if the type of this field is RANGE. If + // the type is RANGE, this field is required. Values for the field element + // type can be the following: + // + // * DATE + // * DATETIME + // * TIMESTAMP + FieldElementType range_element_type = 18 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Definition of the foreign data type. + // Only valid for top-level schema fields (not nested fields). + // If the type is FOREIGN, this field is required. + string foreign_type_definition = 23 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/time_partitioning.proto b/test-fixtures/protos/google/cloud/bigquery/v2/time_partitioning.proto new file mode 100644 index 000000000..440a20983 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/time_partitioning.proto @@ -0,0 +1,44 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/api/field_behavior.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "TimePartitioningProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +message TimePartitioning { + // Required. The supported types are DAY, HOUR, MONTH, and YEAR, which will + // generate one partition per day, hour, month, and year, respectively. + string type = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Number of milliseconds for which to keep the storage for a + // partition. + // A wrapper is used here because 0 is an invalid value. + google.protobuf.Int64Value expiration_ms = 2 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If not set, the table is partitioned by pseudo + // column '_PARTITIONTIME'; if set, the table is partitioned by this field. + // The field must be a top-level TIMESTAMP or DATE field. Its mode must be + // NULLABLE or REQUIRED. + // A wrapper is used here because an empty string is an invalid value. + google.protobuf.StringValue field = 3 + [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/test-fixtures/protos/google/cloud/bigquery/v2/udf_resource.proto b/test-fixtures/protos/google/cloud/bigquery/v2/udf_resource.proto new file mode 100644 index 000000000..d630bf9c7 --- /dev/null +++ b/test-fixtures/protos/google/cloud/bigquery/v2/udf_resource.proto @@ -0,0 +1,42 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.bigquery.v2; + +import "google/protobuf/wrappers.proto"; + +option go_package = "cloud.google.com/go/bigquery/apiv2/bigquerypb;bigquerypb"; +option java_outer_classname = "UdfProto"; +option java_package = "com.google.cloud.bigquery.v2"; + +// +// This is used for defining User Defined Function (UDF) resources only when +// using legacy SQL. Users of GoogleSQL should leverage either DDL (e.g. +// CREATE [TEMPORARY] FUNCTION ... ) or the Routines API to define UDF +// resources. +// +// For additional information on migrating, see: +// https://cloud.google.com/bigquery/docs/reference/standard-sql/migrating-from-legacy-sql#differences_in_user-defined_javascript_functions +message UserDefinedFunctionResource { + // [Pick one] A code resource to load from a Google Cloud Storage URI + // (gs://bucket/path). + google.protobuf.StringValue resource_uri = 1; + + // [Pick one] An inline resource that contains code for a user-defined + // function (UDF). Providing a inline code resource is equivalent to providing + // a URI for a file containing the same code. + google.protobuf.StringValue inline_code = 2; +} diff --git a/typescript/src/schema/proto.ts b/typescript/src/schema/proto.ts index f63ad6a54..9d5715006 100644 --- a/typescript/src/schema/proto.ts +++ b/typescript/src/schema/proto.ts @@ -35,6 +35,12 @@ const COMMON_PROTO_LIST = [ 'google.type', ]; +// services that are allowed to use Int32Value and UInt32Value protobuf wrapper types +// instead of "number" for pageSize/maxResults +// keyed by proto package name, e.g. "google.cloud.foo.v1". +const ENABLE_WRAPPER_TYPES_FOR_PAGE_SIZE = { + 'google.cloud.bigquery.v2': true, +}; export interface MethodDescriptorProto extends protos.google.protobuf.IMethodDescriptorProto { autoPopulatedFields?: string[]; @@ -71,6 +77,8 @@ export interface MethodDescriptorProto bundleConfig?: BundleConfig; toJSON: Function | undefined; isDiregapicLRO?: boolean; + // if wrappers are allowed and there is a maxResultsParamter, return true + maxResultsParameter?: boolean; } export interface ServiceDescriptorProto @@ -266,11 +274,27 @@ function streaming(method: MethodDescriptorProto) { return undefined; } +// returns true if the method has wrappers for UInt32Value enabled +// and is a paginated call with a maxResults parameter instead of pageSize +// as of its creation, this should only be true for BigQuery +// otherwise will not return +function wrappersHasMaxResultsParameter(messages, method, wrappersAllowed) { + const inputType = messages[method.inputType!]; + const hasMaxResults = + inputType && + inputType.field && + inputType.field.some(field => field.name === 'max_results'); + if (wrappersAllowed && hasMaxResults) { + return true; + } +} +// determines the actual field from that service that needs to be paginated function pagingField( messages: MessagesMap, method: MethodDescriptorProto, service?: ServiceDescriptorProto, - diregapic?: boolean + diregapic?: boolean, + wrappersAllowed?: boolean ) { // TODO: remove this once the next version of the Talent API is published. // @@ -295,17 +319,34 @@ function pagingField( inputType && inputType.field && inputType.field.some(field => field.name === 'page_token'); - // Support paginated methods defined in Discovery-based APIs, + + // isPageSizeField evaluates whether a particular field is a page size field, and whether this + // field will require a dependency on wrapper types in the generator. + // + // https://google.aip.dev/158 guidance is to use `page_size`, but older APIs like compute + // and bigquery use `max_results`. Similarly, `int32` is the expected scalar type, but + // there's more variance here in implementations, so int32 and uint32 are allowed. + // + // Additionally, we support paginated methods defined in Discovery-based APIs, // where it uses "max_results" to define the maximum number of // paginated resources to return. - const hasPageSize = - inputType && - inputType.field && - inputType.field.some( - field => - field.name === 'page_size' || - (diregapic && field.name === 'max_results') - ); + const isPageSizeField = () => { + let fieldYes = false; + if (inputType && inputType.field) { + inputType.field.some(field => { + if ( + field.name === 'page_size' || + (diregapic && field.name === 'max_results') || + (wrappersAllowed && field.name === 'max_results') + ) { + fieldYes = true; + } + }); + } + return fieldYes; + }; + const hasPageSize = isPageSizeField(); + const hasNextPageToken = outputType && outputType.field && @@ -319,6 +360,7 @@ function pagingField( if (repeatedFields.length === 0) { return undefined; } + if (repeatedFields.length === 1) { return repeatedFields[0]; } @@ -355,18 +397,32 @@ function pagingFieldName( messages: MessagesMap, method: MethodDescriptorProto, service?: ServiceDescriptorProto, - diregapic?: boolean + diregapic?: boolean, + wrappersAllowed?: boolean // whether a service is allowed to use UInt32Value wrappers - generally this is only BigQuery ) { - const field = pagingField(messages, method, service, diregapic); + const field = pagingField( + messages, + method, + service, + diregapic, + wrappersAllowed + ); return field?.name; } function pagingResponseType( messages: MessagesMap, method: MethodDescriptorProto, - diregapic?: boolean + diregapic?: boolean, + wrappersAllowed?: boolean // whether a service is allowed to use UInt32Value wrappers - generally this is only BigQuery ) { - const field = pagingField(messages, method, undefined, diregapic); + const field = pagingField( + messages, + method, + undefined, + diregapic, + wrappersAllowed + ); if (!field || !field.type) { return undefined; } @@ -376,6 +432,7 @@ function pagingResponseType( return field.typeName; //.google.showcase.v1beta1.EchoResponse } const type = Type[field.type]; + // .google.protobuf.FieldDescriptorProto.Type.TYPE_STRING return '.google.protobuf.FieldDescriptorProto.Type.' + type; } @@ -384,9 +441,16 @@ function pagingResponseType( function ignoreMapPagingMethod( messages: MessagesMap, method: MethodDescriptorProto, - diregapic?: boolean + diregapic?: boolean, + wrappersAllowed?: boolean // whether a service is allowed to use UInt32Value wrappers - generally this is only BigQuery ) { - const pagingfield = pagingField(messages, method, undefined, diregapic); + const pagingfield = pagingField( + messages, + method, + undefined, + diregapic, + wrappersAllowed + ); const outputType = messages[method.outputType!]; if (!pagingfield?.type || !outputType.nestedType) { return undefined; @@ -406,9 +470,17 @@ function ignoreMapPagingMethod( function pagingMapResponseType( messages: MessagesMap, method: MethodDescriptorProto, - diregapic?: boolean + diregapic?: boolean, + wrappersAllowed?: boolean // whether a service is allowed to use UInt32Value wrappers - generally this is only BigQuery ) { - const pagingfield = pagingField(messages, method, undefined, diregapic); + const pagingfield = pagingField( + messages, + method, + undefined, + diregapic, + wrappersAllowed + ); + const outputType = messages[method.outputType!]; if (!pagingfield?.type || !diregapic || !outputType.nestedType) { return undefined; @@ -491,6 +563,11 @@ function augmentMethod( parameters: AugmentMethodParameters, method: MethodDescriptorProto ) { + // whether a service is allowed to use Int32Value and UInt32Value wrappers - generally this is only BigQuery + // this is used to determine factors about pagination fields and to allow users to pass a "number" instead of + // having to convert to a protobuf wrapper type to determine page size + const wrappersAllowed = + ENABLE_WRAPPER_TYPES_FOR_PAGE_SIZE[parameters.service.packageName]; method = Object.assign( { longRunning: longrunning(parameters.service, method), @@ -513,22 +590,26 @@ function augmentMethod( parameters.allMessages, method, parameters.service, - parameters.diregapic + parameters.diregapic, + wrappersAllowed ), pagingResponseType: pagingResponseType( parameters.allMessages, method, - parameters.diregapic + parameters.diregapic, + wrappersAllowed ), pagingMapResponseType: pagingMapResponseType( parameters.allMessages, method, - parameters.diregapic + parameters.diregapic, + wrappersAllowed ), ignoreMapPagingMethod: ignoreMapPagingMethod( parameters.allMessages, method, - parameters.diregapic + parameters.diregapic, + wrappersAllowed ), inputInterface: method.inputType!, outputInterface: method.outputType!, @@ -543,6 +624,11 @@ function augmentMethod( ), retryableCodesName: defaultNonIdempotentRetryCodesName, retryParamsName: defaultParametersName, + maxResultsParameter: wrappersHasMaxResultsParameter( + parameters.allMessages, + method, + wrappersAllowed + ), }, method ) as MethodDescriptorProto; @@ -592,6 +678,7 @@ function augmentMethod( ); paramComment.push(comment); } + method.paramComment = paramComment; } if (method.methodConfig.retryPolicy?.retryableStatusCodes) { diff --git a/typescript/test/unit/baselines-esm.ts b/typescript/test/unit/baselines-esm.ts index 32e436ba6..ef6f85c07 100644 --- a/typescript/test/unit/baselines-esm.ts +++ b/typescript/test/unit/baselines-esm.ts @@ -16,6 +16,13 @@ import {describe} from 'mocha'; import {runBaselineTest} from '../util.js'; describe('Baseline tests: ESM', () => { + runBaselineTest({ + baselineName: 'bigquery-v2-esm', + outputDir: '.test-out-bigquery-v2-esm', + protoPath: 'google/cloud/bigquery/v2/*.proto', + useCommonProto: true, + format: 'esm', + }); runBaselineTest({ baselineName: 'dlp-esm', outputDir: '.test-out-dlp-esm', diff --git a/typescript/test/unit/baselines.ts b/typescript/test/unit/baselines.ts index d928b8138..6688c57bd 100644 --- a/typescript/test/unit/baselines.ts +++ b/typescript/test/unit/baselines.ts @@ -16,6 +16,12 @@ import {describe} from 'mocha'; import {runBaselineTest} from '../util.js'; describe('Baseline tests', () => { + runBaselineTest({ + baselineName: 'bigquery-v2', + outputDir: '.test-out-bigquery-v2', + protoPath: 'google/cloud/bigquery/v2/*.proto', + useCommonProto: true, + }); runBaselineTest({ baselineName: 'dlp', outputDir: '.test-out-dlp', diff --git a/typescript/test/unit/proto.ts b/typescript/test/unit/proto.ts index 1fa4f013c..8e09abf0c 100644 --- a/typescript/test/unit/proto.ts +++ b/typescript/test/unit/proto.ts @@ -1179,7 +1179,7 @@ describe('src/schema/proto.ts', () => { '.google.cloud.showcase.v1beta1.Address' ); }); - it('should not be page field if api is not google discovery api but use "max_result"', () => { + it('should not be page field if api is not google discovery api, method is not allowlisted, but use "max_result"', () => { const fd = {} as protos.google.protobuf.FileDescriptorProto; fd.name = 'google/cloud/showcase/v1beta1/test.proto'; fd.package = 'google.cloud.showcase.v1beta1'; @@ -1240,4 +1240,157 @@ describe('src/schema/proto.ts', () => { assert.deepStrictEqual(proto.services['service'].paging.length, 0); }); }); + describe('should support pagination for allowlisted APIs that use UInt32/Int32 wrappers and max_results', () => { + it('should be page field if allowlisted with wrappers and use "max_results" as field name', () => { + const fd = {} as protos.google.protobuf.FileDescriptorProto; + fd.name = 'google/cloud/bigquery/v2/cats.proto'; + // bq is the only service where this behavior is currently allowlisted + fd.package = 'google.cloud.bigquery.v2'; + fd.service = [{} as protos.google.protobuf.ServiceDescriptorProto]; + fd.service[0].name = 'CatService'; + fd.service[0].method = [ + {} as protos.google.protobuf.MethodDescriptorProto, + ]; + fd.service[0].method[0] = + {} as protos.google.protobuf.MethodDescriptorProto; + fd.service[0].method[0].name = 'ListCats'; + fd.service[0].method[0].outputType = '.google.cloud.bigquery.v2.CatList'; + fd.service[0].method[0].inputType = + '.google.cloud.bigquery.v2.ListCatsRequest'; + + fd.messageType = [{} as protos.google.protobuf.DescriptorProto]; + fd.messageType[0] = {} as protos.google.protobuf.DescriptorProto; + fd.messageType[1] = {} as protos.google.protobuf.DescriptorProto; + + fd.messageType[0].name = 'CatList'; + fd.messageType[1].name = 'ListCatsRequest'; + + fd.messageType[0].field = [ + {} as protos.google.protobuf.FieldDescriptorProto, + ]; + fd.messageType[0].field[0] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[0].field[0].name = 'next_page_token'; + fd.messageType[0].field[0].label = 3; // LABEL_REPEATED + fd.messageType[0].field[0].type = 11; // TYPE_MESSAGE + fd.messageType[0].field[0].typeName = '.google.cloud.bigquery.v2.Cat'; + fd.messageType[1].field = [ + {} as protos.google.protobuf.FieldDescriptorProto, + ]; + fd.messageType[1].field[0] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[1].field[0].name = 'max_results'; + fd.messageType[1].field[1] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[1].field[1].name = 'page_token'; + const options: Options = { + grpcServiceConfig: {} as protos.grpc.service_config.ServiceConfig, + diregapic: false, + }; + const allMessages: MessagesMap = {}; + fd.messageType + ?.filter(message => message.name) + .forEach(message => { + allMessages['.' + fd.package! + '.' + message.name!] = message; + }); + const commentsMap = new CommentsMap([fd]); + const proto = new Proto({ + fd, + packageName: 'google.cloud.bigquery.v2', + allMessages, + allResourceDatabase: new ResourceDatabase(), + resourceDatabase: new ResourceDatabase(), + options, + commentsMap, + }); + assert.deepStrictEqual( + proto.services['CatService'].method[0].pagingFieldName, + 'next_page_token' + ); + assert.deepStrictEqual( + proto.services['CatService'].paging[0].name, + 'ListCats' + ); + assert.deepStrictEqual( + proto.services['CatService'].paging[0].inputType, + '.google.cloud.bigquery.v2.ListCatsRequest' + ); + assert.deepStrictEqual( + proto.services['CatService'].paging[0].outputType, + '.google.cloud.bigquery.v2.CatList' + ); + assert.deepStrictEqual( + proto.services['CatService'].paging[0].pagingResponseType, + '.google.cloud.bigquery.v2.Cat' + ); + }); + it('should not be page field if api is not google discovery api, is not wrappers allowed but uses "max_result"', () => { + const fd = {} as protos.google.protobuf.FileDescriptorProto; + fd.name = 'google/cloud/felines/v2/cats.proto'; + // bq is the only service where this behavior is currently allowlisted + fd.package = 'google.cloud.felines.v2'; + fd.service = [{} as protos.google.protobuf.ServiceDescriptorProto]; + fd.service[0].name = 'CatService'; + fd.service[0].method = [ + {} as protos.google.protobuf.MethodDescriptorProto, + ]; + fd.service[0].method[0] = + {} as protos.google.protobuf.MethodDescriptorProto; + fd.service[0].method[0].name = 'ListCats'; + fd.service[0].method[0].outputType = '.google.cloud.felines.v2.CatList'; + fd.service[0].method[0].inputType = + '.google.cloud.felines.v2.ListCatsRequest'; + + fd.messageType = [{} as protos.google.protobuf.DescriptorProto]; + fd.messageType[0] = {} as protos.google.protobuf.DescriptorProto; + fd.messageType[1] = {} as protos.google.protobuf.DescriptorProto; + + fd.messageType[0].name = 'CatList'; + fd.messageType[1].name = 'ListCatsRequest'; + + fd.messageType[0].field = [ + {} as protos.google.protobuf.FieldDescriptorProto, + ]; + fd.messageType[0].field[0] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[0].field[0].name = 'next_page_token'; + fd.messageType[0].field[0].label = 3; // LABEL_REPEATED + fd.messageType[0].field[0].type = 11; // TYPE_MESSAGE + fd.messageType[0].field[0].typeName = '.google.cloud.bigquery.v2.Cat'; + fd.messageType[1].field = [ + {} as protos.google.protobuf.FieldDescriptorProto, + ]; + fd.messageType[1].field[0] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[1].field[0].name = 'max_results'; + fd.messageType[1].field[1] = + {} as protos.google.protobuf.FieldDescriptorProto; + fd.messageType[1].field[1].name = 'page_token'; + const options: Options = { + grpcServiceConfig: {} as protos.grpc.service_config.ServiceConfig, + diregapic: false, + }; + const allMessages: MessagesMap = {}; + fd.messageType + ?.filter(message => message.name) + .forEach(message => { + allMessages['.' + fd.package! + '.' + message.name!] = message; + }); + const commentsMap = new CommentsMap([fd]); + const proto = new Proto({ + fd, + packageName: 'google.cloud.felines.v2', + allMessages, + allResourceDatabase: new ResourceDatabase(), + resourceDatabase: new ResourceDatabase(), + options, + commentsMap, + }); + assert.deepStrictEqual( + proto.services['CatService'].method[0].pagingFieldName, + undefined + ); + assert.deepStrictEqual(proto.services['CatService'].paging.length, 0); + }); + }); });