From 00496966ef17fc738659df32bf94326d23d82289 Mon Sep 17 00:00:00 2001 From: Nick Lee Date: Tue, 22 Oct 2024 14:46:16 +0200 Subject: [PATCH 1/4] [SC-179551] Add initial unorchestrated support for multipart upload operations --- .codegen/__init__.py.tmpl | 3 +- databricks/sdk/mixins/files.py | 167 ++++++++++++++++++++++++++++++++- 2 files changed, 164 insertions(+), 6 deletions(-) diff --git a/.codegen/__init__.py.tmpl b/.codegen/__init__.py.tmpl index d54e9dfff..83bdee5e2 100644 --- a/.codegen/__init__.py.tmpl +++ b/.codegen/__init__.py.tmpl @@ -3,6 +3,7 @@ import databricks.sdk.dbutils as dbutils from databricks.sdk.credentials_provider import CredentialsStrategy from databricks.sdk.mixins.files import DbfsExt +from databricks.sdk.mixins.files import FilesExt from databricks.sdk.mixins.compute import ClustersExt from databricks.sdk.mixins.workspace import WorkspaceExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt @@ -18,7 +19,7 @@ from typing import Optional "google_credentials" "google_service_account" }} {{- define "api" -}} - {{- $mixins := dict "ClustersAPI" "ClustersExt" "DbfsAPI" "DbfsExt" "WorkspaceAPI" "WorkspaceExt" "ServingEndpointsAPI" "ServingEndpointsExt" -}} + {{- $mixins := dict "ClustersAPI" "ClustersExt" "DbfsAPI" "DbfsExt" "FilesAPI" "FilesExt" "WorkspaceAPI" "WorkspaceExt" "ServingEndpointsAPI" "ServingEndpointsExt" -}} {{- $genApi := concat .PascalName "API" -}} {{- getOrDefault $mixins $genApi $genApi -}} {{- end -}} diff --git a/databricks/sdk/mixins/files.py b/databricks/sdk/mixins/files.py index 1e109a1a7..e07dc449c 100644 --- a/databricks/sdk/mixins/files.py +++ b/databricks/sdk/mixins/files.py @@ -8,15 +8,17 @@ import sys from abc import ABC, abstractmethod from collections import deque +from enum import Enum from io import BytesIO from types import TracebackType -from typing import (TYPE_CHECKING, AnyStr, BinaryIO, Generator, Iterable, - Iterator, Type, Union) +from typing import (TYPE_CHECKING, AnyStr, BinaryIO, Generator, Iterable, Iterator, Type, Union) +from typing import Dict, List, Optional from urllib import parse from .._property import _cached_property from ..errors import NotFound from ..service import files +from ..service._internal import _escape_multi_segment_path_parameter if TYPE_CHECKING: from _typeshed import Self @@ -40,9 +42,12 @@ def __init__(self, self._api = api self._path = path if write and read: raise IOError(f'can open either for reading or writing') - if read: self._status = api.get_status(path) - elif write: self._created = api.create(path, overwrite=overwrite) - else: raise IOError(f'need to open either for reading or writing') + if read: + self._status = api.get_status(path) + elif write: + self._created = api.create(path, overwrite=overwrite) + else: + raise IOError(f'need to open either for reading or writing') def __enter__(self) -> Self: return self @@ -636,3 +641,155 @@ def delete(self, path: str, *, recursive=False): if p.is_dir and not recursive: raise IOError('deleting directories requires recursive flag') p.delete(recursive=recursive) + + +class FilesExt(files.FilesAPI): + """Extends the FilesAPI with support for complex multipart upload/download operations & more robust file I/O""" + __doc__ = files.FilesAPI.__doc__ + + class _FileTransferBackend(Enum): + DB_FILES_API = 1 + PRESIGNED_URLS = 2 + + class _UploadSubOperation(Enum): + UPLOAD_SIMPLE = 1 + UPLOAD_MULTIPART_CREATE = 2 + UPLOAD_MULTIPART_UPLOAD_PART = 3 + UPLOAD_MULTIPART_COMPLETE = 4 + + # Default backend for performing each upload sub-operation. + _DEFAULT_OPERATIONS = { + _UploadSubOperation.UPLOAD_SIMPLE: _FileTransferBackend.DB_FILES_API, + _UploadSubOperation.UPLOAD_MULTIPART_CREATE: _FileTransferBackend.DB_FILES_API, + _UploadSubOperation.UPLOAD_MULTIPART_UPLOAD_PART: _FileTransferBackend.PRESIGNED_URLS, + _UploadSubOperation.UPLOAD_MULTIPART_COMPLETE: _FileTransferBackend.DB_FILES_API, + } + + def __init__(self, api_client): + super().__init__(api_client) + self._api = files.FilesAPI(api_client) + + def multipart_upload_create(self, + file_path: str, + *, + file_size_bytes: Optional[int] = None, + backend: Optional[_FileTransferBackend] = None) -> MultipartUploadCreate: + """Create a multipart upload session, returning the session token and part size.""" + if not backend: + backend = self._DEFAULT_OPERATIONS[FilesExt._UploadSubOperation.UPLOAD_MULTIPART_CREATE] + + if backend == self._FileTransferBackend.DB_FILES_API: + return self._files_multipart_upload_create(file_path, file_size_bytes=file_size_bytes) + else: + raise NotImplementedError(f"Backend {backend} not yet supported for multipart upload create") + + def _files_multipart_upload_create(self, + file_path: str, + *, + file_size_bytes: Optional[int] = None) -> MultipartUploadCreate: + """Create a multipart upload session.""" + + headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} + + resp = self._api.do( + 'POST', + f'/api/2.0/fs/files{_escape_multi_segment_path_parameter(file_path)}?action=initiate-upload{str(file_size_bytes) if file_size_bytes else ""}', + headers=headers) + + return MultipartUploadCreate(resp["session_token"], resp["part_size"]) + + def multipart_upload_complete(self, + file_path: str, + session_token: str, + etags: List[str], + backend: Optional[_FileTransferBackend] = None): + """Complete a multipart upload session, writing to the path.""" + if not backend: + backend = self._DEFAULT_OPERATIONS[FilesExt._UploadSubOperation.UPLOAD_MULTIPART_COMPLETE] + + if backend == self._FileTransferBackend.DB_FILES_API: + return self._files_multipart_upload_complete(file_path, session_token, etags) + else: + raise NotImplementedError(f"Backend {backend} not yet supported for multipart upload complete") + + def _files_multipart_upload_complete(self, file_path: str, session_token: str, etags: List[str]): + """Complete a multipart upload session, writing to the path""" + + headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} + + self._api.do( + 'POST', + f'/api/2.0/fs/files{_escape_multi_segment_path_parameter(file_path)}?action=complete-upload&session_token={session_token}', + headers=headers, + body={'etags': etags}) + return + + def _ps_multipart_upload_create_part_urls(self, session_token: str, page_token: str, page_size: int): + """Request a set of presigned URLs for uploading parts of a file in a multipart upload session.""" + + headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} + body = {'session_token': session_token, 'page_token': page_token, 'page_size': page_size} + """ + Expected response of form { + "upload_part_urls": [ + { + "method": "PUT", + "url": str, + "headers": [{"name": "Content-Range", "value": "bytes=0-1234/1235"}, ...] + }, + ... + ], + "next_page_token": str + } + """ + resp = self._api.do('POST', f'/api/2.0/fs/create-upload-part-urls', headers=headers, body=body) + + presigned_urls = map(lambda u: PresignedUrl(u["method"], u["url"], u["headers"], ["Content-Range"]), + resp["upload_part_urls"]) + return MultipartUploadCreatePartUrlsResponse(presigned_urls, resp["next_page_token"]) + + def execute_presigned_url_request(self, + presigned_url: PresignedUrl, + headers: Optional[Dict[str, str]] = None, + data: Optional[BinaryIO] = None): + """Execute a request to a presigned URL. + + :param headers: Optional[Dict[str, str]]: Additional headers specified by the client + :param data: Optional[BinaryIO]: Data to be sent in the request body, sent as a BinaryIO stream that will be read to its completion + """ + if not presigned_url.all_client_headers_populated(headers.keys()): + raise Exception( + "Not all client-provided headers are populated") # TODO: Move to a dedicated exception type + + request_headers = {**presigned_url.headers, **headers} + self._api.do(presigned_url.method, presigned_url.url, headers=request_headers, data=data) + + +class PresignedUrl: + """Represents all information needed to execute a presigned URL request""" + + def __init__(self, method: str, url: str, headers: List[Dict[str, str]], + headers_populated_by_client: List[str]): + self.method = method + self.url = url + self.headers_populated_by_client = set(headers_populated_by_client) + self.headers = {h["name"]: h["value"] for h in headers} + + def all_client_headers_populated(self, user_headers: List[str]): + return self.headers_populated_by_client.issubset(user_headers) + + +class MultipartUploadCreatePartUrlsResponse: + """Represents the response of a request for presigned URLs for uploading parts of a file in a multipart upload session.""" + + def __init__(self, upload_part_urls: List[PresignedUrl], next_page_token: str): + self.upload_part_urls = upload_part_urls + self.next_page_token = next_page_token + + +class MultipartUploadCreate: + """Represents the response to an initiated multipart upload session.""" + + def __init__(self, session_token: str, part_size: int): + self.session_token = session_token + self.part_size = part_size From 0b07020f6099738a868339694361aa34dcc9af29 Mon Sep 17 00:00:00 2001 From: Nick Lee Date: Tue, 22 Oct 2024 15:16:28 +0200 Subject: [PATCH 2/4] Autogenerate SDK --- databricks/sdk/__init__.py | 6 +- databricks/sdk/service/apps.py | 52 +-- databricks/sdk/service/compute.py | 30 +- databricks/sdk/service/pipelines.py | 12 +- docs/account/billing/budgets.rst | 58 ++-- docs/account/billing/index.rst | 3 +- docs/account/iam/workspace_assignment.rst | 16 +- .../account/oauth2/custom_app_integration.rst | 35 +- docs/account/oauth2/o_auth_published_apps.rst | 2 +- .../oauth2/published_app_integration.rst | 22 +- docs/account/settings/index.rst | 1 - docs/account/settings/settings.rst | 9 - docs/dbdataclasses/billing.rst | 101 +----- docs/dbdataclasses/catalog.rst | 121 +------ docs/dbdataclasses/compute.rst | 70 +--- docs/dbdataclasses/dashboards.rst | 203 ------------ docs/dbdataclasses/iam.rst | 21 +- docs/dbdataclasses/jobs.rst | 180 +---------- docs/dbdataclasses/marketplace.rst | 29 ++ docs/dbdataclasses/pipelines.rst | 22 +- docs/dbdataclasses/serving.rst | 143 ++++++--- docs/dbdataclasses/settings.rst | 124 +------ docs/dbdataclasses/sharing.rst | 6 +- docs/dbdataclasses/sql.rst | 302 ++---------------- docs/dbdataclasses/workspace.rst | 34 +- docs/workspace/apps/apps.rst | 109 ++----- docs/workspace/catalog/catalogs.rst | 18 +- docs/workspace/catalog/external_locations.rst | 14 +- docs/workspace/catalog/functions.rst | 2 - docs/workspace/catalog/index.rst | 2 - docs/workspace/catalog/metastores.rst | 11 +- docs/workspace/catalog/model_versions.rst | 10 +- docs/workspace/catalog/quality_monitors.rst | 23 -- docs/workspace/catalog/registered_models.rst | 4 +- docs/workspace/catalog/schemas.rst | 4 +- .../workspace/catalog/storage_credentials.rst | 4 +- docs/workspace/catalog/system_schemas.rst | 9 +- docs/workspace/catalog/tables.rst | 12 +- docs/workspace/catalog/workspace_bindings.rst | 23 +- docs/workspace/compute/cluster_policies.rst | 18 +- docs/workspace/compute/clusters.rst | 87 ++--- docs/workspace/compute/command_execution.rst | 3 +- docs/workspace/compute/index.rst | 1 - docs/workspace/compute/policy_families.rst | 12 +- docs/workspace/dashboards/index.rst | 1 - docs/workspace/dashboards/lakeview.rst | 36 +-- docs/workspace/iam/permission_migration.rst | 11 +- docs/workspace/iam/permissions.rst | 22 +- docs/workspace/jobs/index.rst | 3 +- docs/workspace/jobs/jobs.rst | 79 +++-- .../marketplace/consumer_listings.rst | 9 +- docs/workspace/ml/experiments.rst | 10 +- docs/workspace/pipelines/pipelines.rst | 18 +- docs/workspace/serving/index.rst | 3 +- docs/workspace/serving/serving_endpoints.rst | 46 +-- docs/workspace/settings/index.rst | 3 - docs/workspace/settings/settings.rst | 16 - docs/workspace/sharing/providers.rst | 24 +- docs/workspace/sharing/recipients.rst | 32 +- docs/workspace/sharing/shares.rst | 37 +-- docs/workspace/sql/alerts.rst | 64 ++-- docs/workspace/sql/dashboards.rst | 4 +- docs/workspace/sql/data_sources.rst | 9 - docs/workspace/sql/dbsql_permissions.rst | 19 -- docs/workspace/sql/index.rst | 3 - docs/workspace/sql/queries.rst | 142 +++++--- docs/workspace/sql/query_history.rst | 20 +- docs/workspace/sql/query_visualizations.rst | 51 +-- docs/workspace/sql/statement_execution.rst | 32 +- docs/workspace/sql/warehouses.rst | 3 +- docs/workspace/workspace/git_credentials.rst | 26 +- docs/workspace/workspace/repos.rst | 31 +- 72 files changed, 739 insertions(+), 1983 deletions(-) diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 159946461..5cab39d27 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -5,7 +5,7 @@ from databricks.sdk import azure from databricks.sdk.credentials_provider import CredentialsStrategy from databricks.sdk.mixins.compute import ClustersExt -from databricks.sdk.mixins.files import DbfsExt +from databricks.sdk.mixins.files import DbfsExt, FilesExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt from databricks.sdk.mixins.workspace import WorkspaceExt from databricks.sdk.service.apps import AppsAPI @@ -202,7 +202,7 @@ def __init__(self, self._dbsql_permissions = DbsqlPermissionsAPI(self._api_client) self._experiments = ExperimentsAPI(self._api_client) self._external_locations = ExternalLocationsAPI(self._api_client) - self._files = FilesAPI(self._api_client) + self._files = FilesExt(self._api_client) self._functions = FunctionsAPI(self._api_client) self._genie = GenieAPI(self._api_client) self._git_credentials = GitCredentialsAPI(self._api_client) @@ -408,7 +408,7 @@ def external_locations(self) -> ExternalLocationsAPI: return self._external_locations @property - def files(self) -> FilesAPI: + def files(self) -> FilesExt: """The Files API is a standard HTTP API that allows you to read, write, list, and delete files and directories by referring to their URI.""" return self._files diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index 52796d0e8..5f413f0be 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -813,29 +813,31 @@ def wait_get_app_active(self, attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_app_stopped(self, - name: str, - timeout=timedelta(minutes=20), - callback: Optional[Callable[[App], None]] = None) -> App: + def wait_get_deployment_app_succeeded( + self, + app_name: str, + deployment_id: str, + timeout=timedelta(minutes=20), + callback: Optional[Callable[[AppDeployment], None]] = None) -> AppDeployment: deadline = time.time() + timeout.total_seconds() - target_states = (ComputeState.STOPPED, ) - failure_states = (ComputeState.ERROR, ) + target_states = (AppDeploymentState.SUCCEEDED, ) + failure_states = (AppDeploymentState.FAILED, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.get(name=name) - status = poll.compute_status.state + poll = self.get_deployment(app_name=app_name, deployment_id=deployment_id) + status = poll.status.state status_message = f'current status: {status}' - if poll.compute_status: - status_message = poll.compute_status.message + if poll.status: + status_message = poll.status.message if status in target_states: return poll if callback: callback(poll) if status in failure_states: - msg = f'failed to reach STOPPED, got {status}: {status_message}' + msg = f'failed to reach SUCCEEDED, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"name={name}" + prefix = f"app_name={app_name}, deployment_id={deployment_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -845,31 +847,29 @@ def wait_get_app_stopped(self, attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_deployment_app_succeeded( - self, - app_name: str, - deployment_id: str, - timeout=timedelta(minutes=20), - callback: Optional[Callable[[AppDeployment], None]] = None) -> AppDeployment: + def wait_get_app_stopped(self, + name: str, + timeout=timedelta(minutes=20), + callback: Optional[Callable[[App], None]] = None) -> App: deadline = time.time() + timeout.total_seconds() - target_states = (AppDeploymentState.SUCCEEDED, ) - failure_states = (AppDeploymentState.FAILED, ) + target_states = (ComputeState.STOPPED, ) + failure_states = (ComputeState.ERROR, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.get_deployment(app_name=app_name, deployment_id=deployment_id) - status = poll.status.state + poll = self.get(name=name) + status = poll.compute_status.state status_message = f'current status: {status}' - if poll.status: - status_message = poll.status.message + if poll.compute_status: + status_message = poll.compute_status.message if status in target_states: return poll if callback: callback(poll) if status in failure_states: - msg = f'failed to reach SUCCEEDED, got {status}: {status_message}' + msg = f'failed to reach STOPPED, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"app_name={app_name}, deployment_id={deployment_id}" + prefix = f"name={name}" sleep = attempt if sleep > 10: # sleep 10s max per attempt diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 4a77496de..40def5df5 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -7865,20 +7865,19 @@ def wait_command_status_command_execution_cancelled( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_command_status_command_execution_finished_or_error( + def wait_context_status_command_execution_running( self, cluster_id: str, - command_id: str, context_id: str, timeout=timedelta(minutes=20), - callback: Optional[Callable[[CommandStatusResponse], None]] = None) -> CommandStatusResponse: + callback: Optional[Callable[[ContextStatusResponse], None]] = None) -> ContextStatusResponse: deadline = time.time() + timeout.total_seconds() - target_states = (CommandStatus.FINISHED, CommandStatus.ERROR, ) - failure_states = (CommandStatus.CANCELLED, CommandStatus.CANCELLING, ) + target_states = (ContextStatus.RUNNING, ) + failure_states = (ContextStatus.ERROR, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id) + poll = self.context_status(cluster_id=cluster_id, context_id=context_id) status = poll.status status_message = f'current status: {status}' if status in target_states: @@ -7886,9 +7885,9 @@ def wait_command_status_command_execution_finished_or_error( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach Finished or Error, got {status}: {status_message}' + msg = f'failed to reach Running, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}" + prefix = f"cluster_id={cluster_id}, context_id={context_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -7898,19 +7897,20 @@ def wait_command_status_command_execution_finished_or_error( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_context_status_command_execution_running( + def wait_command_status_command_execution_finished_or_error( self, cluster_id: str, + command_id: str, context_id: str, timeout=timedelta(minutes=20), - callback: Optional[Callable[[ContextStatusResponse], None]] = None) -> ContextStatusResponse: + callback: Optional[Callable[[CommandStatusResponse], None]] = None) -> CommandStatusResponse: deadline = time.time() + timeout.total_seconds() - target_states = (ContextStatus.RUNNING, ) - failure_states = (ContextStatus.ERROR, ) + target_states = (CommandStatus.FINISHED, CommandStatus.ERROR, ) + failure_states = (CommandStatus.CANCELLED, CommandStatus.CANCELLING, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.context_status(cluster_id=cluster_id, context_id=context_id) + poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id) status = poll.status status_message = f'current status: {status}' if status in target_states: @@ -7918,9 +7918,9 @@ def wait_context_status_command_execution_running( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach Running, got {status}: {status_message}' + msg = f'failed to reach Finished or Error, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"cluster_id={cluster_id}, context_id={context_id}" + prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9c12f8788..f99201fde 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -2122,13 +2122,13 @@ class PipelinesAPI: def __init__(self, api_client): self._api = api_client - def wait_get_pipeline_idle( + def wait_get_pipeline_running( self, pipeline_id: str, timeout=timedelta(minutes=20), callback: Optional[Callable[[GetPipelineResponse], None]] = None) -> GetPipelineResponse: deadline = time.time() + timeout.total_seconds() - target_states = (PipelineState.IDLE, ) + target_states = (PipelineState.RUNNING, ) failure_states = (PipelineState.FAILED, ) status_message = 'polling...' attempt = 1 @@ -2141,7 +2141,7 @@ def wait_get_pipeline_idle( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach IDLE, got {status}: {status_message}' + msg = f'failed to reach RUNNING, got {status}: {status_message}' raise OperationFailed(msg) prefix = f"pipeline_id={pipeline_id}" sleep = attempt @@ -2153,13 +2153,13 @@ def wait_get_pipeline_idle( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_pipeline_running( + def wait_get_pipeline_idle( self, pipeline_id: str, timeout=timedelta(minutes=20), callback: Optional[Callable[[GetPipelineResponse], None]] = None) -> GetPipelineResponse: deadline = time.time() + timeout.total_seconds() - target_states = (PipelineState.RUNNING, ) + target_states = (PipelineState.IDLE, ) failure_states = (PipelineState.FAILED, ) status_message = 'polling...' attempt = 1 @@ -2172,7 +2172,7 @@ def wait_get_pipeline_running( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach RUNNING, got {status}: {status_message}' + msg = f'failed to reach IDLE, got {status}: {status_message}' raise OperationFailed(msg) prefix = f"pipeline_id={pipeline_id}" sleep = attempt diff --git a/docs/account/billing/budgets.rst b/docs/account/billing/budgets.rst index edba0a733..02ebe5ced 100644 --- a/docs/account/billing/budgets.rst +++ b/docs/account/billing/budgets.rst @@ -4,11 +4,10 @@ .. py:class:: BudgetsAPI - These APIs manage budget configurations for this account. Budgets enable you to monitor usage across your - account. You can set up budgets to either track account-wide spending, or apply filters to track the - spending of specific teams, projects, or workspaces. + These APIs manage budget configuration including notifications for exceeding a budget for a period. They + can also retrieve the status of each budget. - .. py:method:: create(budget: CreateBudgetConfigurationBudget) -> CreateBudgetConfigurationResponse + .. py:method:: create(budget: Budget) -> WrappedBudgetWithStatus Usage: @@ -46,31 +45,29 @@ # cleanup a.budgets.delete(budget_id=created.budget.budget_configuration_id) - Create new budget. + Create a new budget. - Create a new budget configuration for an account. For full details, see - https://docs.databricks.com/en/admin/account-settings/budgets.html. + Creates a new budget in the specified account. - :param budget: :class:`CreateBudgetConfigurationBudget` - Properties of the new budget configuration. + :param budget: :class:`Budget` + Budget configuration to be created. - :returns: :class:`CreateBudgetConfigurationResponse` + :returns: :class:`WrappedBudgetWithStatus` .. py:method:: delete(budget_id: str) Delete budget. - Deletes a budget configuration for an account. Both account and budget configuration are specified by - ID. This cannot be undone. + Deletes the budget specified by its UUID. :param budget_id: str - The Databricks budget configuration ID. + Budget ID - .. py:method:: get(budget_id: str) -> GetBudgetConfigurationResponse + .. py:method:: get(budget_id: str) -> WrappedBudgetWithStatus Usage: @@ -110,17 +107,18 @@ # cleanup a.budgets.delete(budget_id=created.budget.budget_configuration_id) - Get budget. + Get budget and its status. - Gets a budget configuration for an account. Both account and budget configuration are specified by ID. + Gets the budget specified by its UUID, including noncumulative status for each day that the budget is + configured to include. :param budget_id: str - The Databricks budget configuration ID. + Budget ID - :returns: :class:`GetBudgetConfigurationResponse` + :returns: :class:`WrappedBudgetWithStatus` - .. py:method:: list( [, page_token: Optional[str]]) -> Iterator[BudgetConfiguration] + .. py:method:: list() -> Iterator[BudgetWithStatus] Usage: @@ -136,16 +134,13 @@ Get all budgets. - Gets all budgets associated with this account. + Gets all budgets associated with this account, including noncumulative status for each day that the + budget is configured to include. - :param page_token: str (optional) - A page token received from a previous get all budget configurations call. This token can be used to - retrieve the subsequent page. Requests first page if absent. - - :returns: Iterator over :class:`BudgetConfiguration` + :returns: Iterator over :class:`BudgetWithStatus` - .. py:method:: update(budget_id: str, budget: UpdateBudgetConfigurationBudget) -> UpdateBudgetConfigurationResponse + .. py:method:: update(budget_id: str, budget: Budget) Usage: @@ -206,13 +201,12 @@ Modify budget. - Updates a budget configuration for an account. Both account and budget configuration are specified by - ID. + Modifies a budget in this account. Budget properties are completely overwritten. :param budget_id: str - The Databricks budget configuration ID. - :param budget: :class:`UpdateBudgetConfigurationBudget` - The updated budget. This will overwrite the budget specified by the budget ID. + Budget ID + :param budget: :class:`Budget` + Budget configuration to be created. + - :returns: :class:`UpdateBudgetConfigurationResponse` \ No newline at end of file diff --git a/docs/account/billing/index.rst b/docs/account/billing/index.rst index 0e07da594..522f6f5fd 100644 --- a/docs/account/billing/index.rst +++ b/docs/account/billing/index.rst @@ -9,5 +9,4 @@ Configure different aspects of Databricks billing and usage. billable_usage budgets - log_delivery - usage_dashboards \ No newline at end of file + log_delivery \ No newline at end of file diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 6230b8199..1ce06996e 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -15,7 +15,7 @@ principal. :param workspace_id: int - The workspace ID for the account. + The workspace ID. :param principal_id: int The ID of the user, service principal, or group. @@ -61,7 +61,7 @@ :returns: Iterator over :class:`PermissionAssignment` - .. py:method:: update(workspace_id: int, principal_id: int [, permissions: Optional[List[WorkspacePermission]]]) -> PermissionAssignment + .. py:method:: update(workspace_id: int, principal_id: int, permissions: List[WorkspacePermission]) -> PermissionAssignment Usage: @@ -92,15 +92,13 @@ specified principal. :param workspace_id: int - The workspace ID for the account. + The workspace ID. :param principal_id: int The ID of the user, service principal, or group. - :param permissions: List[:class:`WorkspacePermission`] (optional) - Array of permissions assignments to update on the workspace. Valid values are "USER" and "ADMIN" - (case-sensitive). If both "USER" and "ADMIN" are provided, "ADMIN" takes precedence. Other values - will be ignored. Note that excluding this field, or providing unsupported values, will have the same - effect as providing an empty list, which will result in the deletion of all permissions for the - principal. + :param permissions: List[:class:`WorkspacePermission`] + Array of permissions assignments to update on the workspace. Note that excluding this field will + have the same effect as providing an empty list which will result in the deletion of all permissions + for the principal. :returns: :class:`PermissionAssignment` \ No newline at end of file diff --git a/docs/account/oauth2/custom_app_integration.rst b/docs/account/oauth2/custom_app_integration.rst index 0dcc3d8e0..382ce0bd0 100644 --- a/docs/account/oauth2/custom_app_integration.rst +++ b/docs/account/oauth2/custom_app_integration.rst @@ -4,23 +4,23 @@ .. py:class:: CustomAppIntegrationAPI - These APIs enable administrators to manage custom OAuth app integrations, which is required for + These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. - .. py:method:: create( [, confidential: Optional[bool], name: Optional[str], redirect_urls: Optional[List[str]], scopes: Optional[List[str]], token_access_policy: Optional[TokenAccessPolicy]]) -> CreateCustomAppIntegrationOutput + .. py:method:: create(name: str, redirect_urls: List[str] [, confidential: Optional[bool], scopes: Optional[List[str]], token_access_policy: Optional[TokenAccessPolicy]]) -> CreateCustomAppIntegrationOutput Create Custom OAuth App Integration. Create Custom OAuth App Integration. - You can retrieve the custom OAuth app integration via :method:CustomAppIntegration/get. + You can retrieve the custom oauth app integration via :method:CustomAppIntegration/get. + :param name: str + name of the custom oauth app + :param redirect_urls: List[str] + List of oauth redirect urls :param confidential: bool (optional) - This field indicates whether an OAuth client secret is required to authenticate this client. - :param name: str (optional) - Name of the custom OAuth app - :param redirect_urls: List[str] (optional) - List of OAuth redirect urls + indicates if an oauth client-secret should be generated :param scopes: List[str] (optional) OAuth scopes granted to the application. Supported scopes: all-apis, sql, offline_access, openid, profile, email. @@ -34,10 +34,11 @@ Delete Custom OAuth App Integration. - Delete an existing Custom OAuth App Integration. You can retrieve the custom OAuth app integration via + Delete an existing Custom OAuth App Integration. You can retrieve the custom oauth app integration via :method:CustomAppIntegration/get. :param integration_id: str + The oauth app integration ID. @@ -49,19 +50,16 @@ Gets the Custom OAuth App Integration for the given integration id. :param integration_id: str + The oauth app integration ID. :returns: :class:`GetCustomAppIntegrationOutput` - .. py:method:: list( [, include_creator_username: Optional[bool], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[GetCustomAppIntegrationOutput] + .. py:method:: list() -> Iterator[GetCustomAppIntegrationOutput] Get custom oauth app integrations. - Get the list of custom OAuth app integrations for the specified Databricks account - - :param include_creator_username: bool (optional) - :param page_size: int (optional) - :param page_token: str (optional) + Get the list of custom oauth app integrations for the specified Databricks account :returns: Iterator over :class:`GetCustomAppIntegrationOutput` @@ -70,14 +68,15 @@ Updates Custom OAuth App Integration. - Updates an existing custom OAuth App Integration. You can retrieve the custom OAuth app integration + Updates an existing custom OAuth App Integration. You can retrieve the custom oauth app integration via :method:CustomAppIntegration/get. :param integration_id: str + The oauth app integration ID. :param redirect_urls: List[str] (optional) - List of OAuth redirect urls to be updated in the custom OAuth app integration + List of oauth redirect urls to be updated in the custom oauth app integration :param token_access_policy: :class:`TokenAccessPolicy` (optional) - Token access policy to be updated in the custom OAuth app integration + Token access policy to be updated in the custom oauth app integration \ No newline at end of file diff --git a/docs/account/oauth2/o_auth_published_apps.rst b/docs/account/oauth2/o_auth_published_apps.rst index 18c07c326..69aecb8ad 100644 --- a/docs/account/oauth2/o_auth_published_apps.rst +++ b/docs/account/oauth2/o_auth_published_apps.rst @@ -15,7 +15,7 @@ Get all the available published OAuth apps in Databricks. :param page_size: int (optional) - The max number of OAuth published apps to return in one page. + The max number of OAuth published apps to return. :param page_token: str (optional) A token that can be used to get the next page of results. diff --git a/docs/account/oauth2/published_app_integration.rst b/docs/account/oauth2/published_app_integration.rst index f59f2c4aa..0488415cd 100644 --- a/docs/account/oauth2/published_app_integration.rst +++ b/docs/account/oauth2/published_app_integration.rst @@ -4,7 +4,7 @@ .. py:class:: PublishedAppIntegrationAPI - These APIs enable administrators to manage published OAuth app integrations, which is required for + These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud. .. py:method:: create( [, app_id: Optional[str], token_access_policy: Optional[TokenAccessPolicy]]) -> CreatePublishedAppIntegrationOutput @@ -13,10 +13,10 @@ Create Published OAuth App Integration. - You can retrieve the published OAuth app integration via :method:PublishedAppIntegration/get. + You can retrieve the published oauth app integration via :method:PublishedAppIntegration/get. :param app_id: str (optional) - App id of the OAuth published app integration. For example power-bi, tableau-deskop + app_id of the oauth published app integration. For example power-bi, tableau-deskop :param token_access_policy: :class:`TokenAccessPolicy` (optional) Token access policy @@ -27,10 +27,11 @@ Delete Published OAuth App Integration. - Delete an existing Published OAuth App Integration. You can retrieve the published OAuth app + Delete an existing Published OAuth App Integration. You can retrieve the published oauth app integration via :method:PublishedAppIntegration/get. :param integration_id: str + The oauth app integration ID. @@ -42,18 +43,16 @@ Gets the Published OAuth App Integration for the given integration id. :param integration_id: str + The oauth app integration ID. :returns: :class:`GetPublishedAppIntegrationOutput` - .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[GetPublishedAppIntegrationOutput] + .. py:method:: list() -> Iterator[GetPublishedAppIntegrationOutput] Get published oauth app integrations. - Get the list of published OAuth app integrations for the specified Databricks account - - :param page_size: int (optional) - :param page_token: str (optional) + Get the list of published oauth app integrations for the specified Databricks account :returns: Iterator over :class:`GetPublishedAppIntegrationOutput` @@ -62,12 +61,13 @@ Updates Published OAuth App Integration. - Updates an existing published OAuth App Integration. You can retrieve the published OAuth app + Updates an existing published OAuth App Integration. You can retrieve the published oauth app integration via :method:PublishedAppIntegration/get. :param integration_id: str + The oauth app integration ID. :param token_access_policy: :class:`TokenAccessPolicy` (optional) - Token access policy to be updated in the published OAuth app integration + Token access policy to be updated in the published oauth app integration \ No newline at end of file diff --git a/docs/account/settings/index.rst b/docs/account/settings/index.rst index abf97c6a0..2c53b1afa 100644 --- a/docs/account/settings/index.rst +++ b/docs/account/settings/index.rst @@ -11,6 +11,5 @@ Manage security settings for Accounts and Workspaces network_connectivity settings csp_enablement_account - disable_legacy_features esm_enablement_account personal_compute \ No newline at end of file diff --git a/docs/account/settings/settings.rst b/docs/account/settings/settings.rst index 3df647279..9ef26a1ee 100644 --- a/docs/account/settings/settings.rst +++ b/docs/account/settings/settings.rst @@ -16,15 +16,6 @@ This settings can be disabled so that new workspaces do not have compliance security profile enabled by default. - .. py:property:: disable_legacy_features - :type: DisableLegacyFeaturesAPI - - Disable legacy features for new Databricks workspaces. - - For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be - provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions - prior to 13.3LTS. - .. py:property:: esm_enablement_account :type: EsmEnablementAccountAPI diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index 25deb0a18..27abdd35a 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -4,84 +4,23 @@ Billing These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.billing`` module. .. py:currentmodule:: databricks.sdk.service.billing -.. autoclass:: ActionConfiguration +.. autoclass:: Budget :members: :undoc-members: -.. py:class:: ActionConfigurationType - - .. py:attribute:: EMAIL_NOTIFICATION - :value: "EMAIL_NOTIFICATION" - -.. autoclass:: AlertConfiguration - :members: - :undoc-members: - -.. py:class:: AlertConfigurationQuantityType - - .. py:attribute:: LIST_PRICE_DOLLARS_USD - :value: "LIST_PRICE_DOLLARS_USD" - -.. py:class:: AlertConfigurationTimePeriod - - .. py:attribute:: MONTH - :value: "MONTH" - -.. py:class:: AlertConfigurationTriggerType - - .. py:attribute:: CUMULATIVE_SPENDING_EXCEEDED - :value: "CUMULATIVE_SPENDING_EXCEEDED" - -.. autoclass:: BudgetConfiguration - :members: - :undoc-members: - -.. autoclass:: BudgetConfigurationFilter +.. autoclass:: BudgetAlert :members: :undoc-members: -.. autoclass:: BudgetConfigurationFilterClause +.. autoclass:: BudgetList :members: :undoc-members: -.. py:class:: BudgetConfigurationFilterOperator - - .. py:attribute:: IN - :value: "IN" - -.. autoclass:: BudgetConfigurationFilterTagClause +.. autoclass:: BudgetWithStatus :members: :undoc-members: -.. autoclass:: BudgetConfigurationFilterWorkspaceIdClause - :members: - :undoc-members: - -.. autoclass:: CreateBillingUsageDashboardRequest - :members: - :undoc-members: - -.. autoclass:: CreateBillingUsageDashboardResponse - :members: - :undoc-members: - -.. autoclass:: CreateBudgetConfigurationBudget - :members: - :undoc-members: - -.. autoclass:: CreateBudgetConfigurationBudgetActionConfigurations - :members: - :undoc-members: - -.. autoclass:: CreateBudgetConfigurationBudgetAlertConfigurations - :members: - :undoc-members: - -.. autoclass:: CreateBudgetConfigurationRequest - :members: - :undoc-members: - -.. autoclass:: CreateBudgetConfigurationResponse +.. autoclass:: BudgetWithStatusStatusDailyItem :members: :undoc-members: @@ -89,7 +28,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteBudgetConfigurationResponse +.. autoclass:: DeleteResponse :members: :undoc-members: @@ -116,18 +55,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GetBillingUsageDashboardResponse - :members: - :undoc-members: - -.. autoclass:: GetBudgetConfigurationResponse - :members: - :undoc-members: - -.. autoclass:: ListBudgetConfigurationsResponse - :members: - :undoc-members: - .. py:class:: LogDeliveryConfigStatus Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the configuration](#operation/patch-log-delivery-config-status) later. Deletion of a configuration is not supported, so disable a log delivery configuration that is no longer needed. @@ -175,30 +102,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateBudgetConfigurationBudget +.. autoclass:: UpdateLogDeliveryConfigurationStatusRequest :members: :undoc-members: -.. autoclass:: UpdateBudgetConfigurationRequest +.. autoclass:: UpdateResponse :members: :undoc-members: -.. autoclass:: UpdateBudgetConfigurationResponse +.. autoclass:: WrappedBudget :members: :undoc-members: -.. autoclass:: UpdateLogDeliveryConfigurationStatusRequest +.. autoclass:: WrappedBudgetWithStatus :members: :undoc-members: -.. py:class:: UsageDashboardType - - .. py:attribute:: USAGE_DASHBOARD_TYPE_GLOBAL - :value: "USAGE_DASHBOARD_TYPE_GLOBAL" - - .. py:attribute:: USAGE_DASHBOARD_TYPE_WORKSPACE - :value: "USAGE_DASHBOARD_TYPE_WORKSPACE" - .. autoclass:: WrappedCreateLogDeliveryConfiguration :members: :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index cb6399348..94dfa1ff5 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -65,10 +65,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: AwsCredentials - :members: - :undoc-members: - .. autoclass:: AwsIamRoleRequest :members: :undoc-members: @@ -89,10 +85,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: AzureUserDelegationSas - :members: - :undoc-members: - .. autoclass:: CancelRefreshResponse :members: :undoc-members: @@ -144,16 +136,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CATALOG_SYSTEM_DELTASHARING :value: "CATALOG_SYSTEM_DELTASHARING" -.. py:class:: CatalogIsolationMode - - Whether the current securable is accessible from all workspaces or a specific set of workspaces. - - .. py:attribute:: ISOLATED - :value: "ISOLATED" - - .. py:attribute:: OPEN - :value: "OPEN" - .. py:class:: CatalogType The type of the catalog. @@ -257,21 +239,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONNECTION_BIGQUERY :value: "CONNECTION_BIGQUERY" - .. py:attribute:: CONNECTION_BUILTIN_HIVE_METASTORE - :value: "CONNECTION_BUILTIN_HIVE_METASTORE" - .. py:attribute:: CONNECTION_DATABRICKS :value: "CONNECTION_DATABRICKS" - .. py:attribute:: CONNECTION_EXTERNAL_HIVE_METASTORE - :value: "CONNECTION_EXTERNAL_HIVE_METASTORE" - - .. py:attribute:: CONNECTION_GLUE - :value: "CONNECTION_GLUE" - - .. py:attribute:: CONNECTION_HTTP_BEARER - :value: "CONNECTION_HTTP_BEARER" - .. py:attribute:: CONNECTION_MYSQL :value: "CONNECTION_MYSQL" @@ -303,15 +273,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DATABRICKS :value: "DATABRICKS" - .. py:attribute:: GLUE - :value: "GLUE" - - .. py:attribute:: HIVE_METASTORE - :value: "HIVE_METASTORE" - - .. py:attribute:: HTTP - :value: "HTTP" - .. py:attribute:: MYSQL :value: "MYSQL" @@ -435,9 +396,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo The type of credential. - .. py:attribute:: BEARER_TOKEN - :value: "BEARER_TOKEN" - .. py:attribute:: USERNAME_PASSWORD :value: "USERNAME_PASSWORD" @@ -679,29 +637,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PARAM :value: "PARAM" -.. autoclass:: GcpOauthToken - :members: - :undoc-members: - -.. autoclass:: GenerateTemporaryTableCredentialRequest - :members: - :undoc-members: - -.. autoclass:: GenerateTemporaryTableCredentialResponse - :members: - :undoc-members: - -.. py:class:: GetBindingsSecurableType - - .. py:attribute:: CATALOG - :value: "CATALOG" - - .. py:attribute:: EXTERNAL_LOCATION - :value: "EXTERNAL_LOCATION" - - .. py:attribute:: STORAGE_CREDENTIAL - :value: "STORAGE_CREDENTIAL" - .. autoclass:: GetMetastoreSummaryResponse :members: :undoc-members: @@ -716,10 +651,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INTERNAL_AND_EXTERNAL :value: "INTERNAL_AND_EXTERNAL" -.. autoclass:: GetQuotaResponse - :members: - :undoc-members: - .. py:class:: IsolationMode Whether the current securable is accessible from all workspaces or a specific set of workspaces. @@ -762,10 +693,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListQuotasResponse - :members: - :undoc-members: - .. autoclass:: ListRegisteredModelsResponse :members: :undoc-members: @@ -1003,6 +930,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ONLINE_PIPELINE_FAILED :value: "ONLINE_PIPELINE_FAILED" + .. py:attribute:: ONLINE_TABLE_STATE_UNSPECIFIED + :value: "ONLINE_TABLE_STATE_UNSPECIFIED" + .. py:attribute:: ONLINE_TRIGGERED_UPDATE :value: "ONLINE_TRIGGERED_UPDATE" @@ -1112,9 +1042,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXECUTE :value: "EXECUTE" - .. py:attribute:: MANAGE - :value: "MANAGE" - .. py:attribute:: MANAGE_ALLOWLIST :value: "MANAGE_ALLOWLIST" @@ -1139,6 +1066,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SET_SHARE_PERMISSION :value: "SET_SHARE_PERMISSION" + .. py:attribute:: SINGLE_USER_ACCESS + :value: "SINGLE_USER_ACCESS" + .. py:attribute:: USAGE :value: "USAGE" @@ -1194,29 +1124,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PROVISIONING :value: "PROVISIONING" - .. py:attribute:: UPDATING - :value: "UPDATING" + .. py:attribute:: STATE_UNSPECIFIED + :value: "STATE_UNSPECIFIED" .. autoclass:: ProvisioningStatus :members: :undoc-members: -.. autoclass:: QuotaInfo - :members: - :undoc-members: - -.. autoclass:: R2Credentials - :members: - :undoc-members: - -.. autoclass:: RegenerateDashboardRequest - :members: - :undoc-members: - -.. autoclass:: RegenerateDashboardResponse - :members: - :undoc-members: - .. autoclass:: RegisteredModelAlias :members: :undoc-members: @@ -1337,14 +1251,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: TableOperation - - .. py:attribute:: READ - :value: "READ" - - .. py:attribute:: READ_WRITE - :value: "READ_WRITE" - .. autoclass:: TableRowFilter :members: :undoc-members: @@ -1391,17 +1297,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: UpdateBindingsSecurableType - - .. py:attribute:: CATALOG - :value: "CATALOG" - - .. py:attribute:: EXTERNAL_LOCATION - :value: "EXTERNAL_LOCATION" - - .. py:attribute:: STORAGE_CREDENTIAL - :value: "STORAGE_CREDENTIAL" - .. autoclass:: UpdateCatalog :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 0066f0374..64ab42682 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -103,10 +103,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ClusterCompliance - :members: - :undoc-members: - .. autoclass:: ClusterDetails :members: :undoc-members: @@ -183,10 +179,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ClusterSettingsChange - :members: - :undoc-members: - .. autoclass:: ClusterSize :members: :undoc-members: @@ -451,14 +443,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: EnforceClusterComplianceRequest - :members: - :undoc-members: - -.. autoclass:: EnforceClusterComplianceResponse - :members: - :undoc-members: - .. autoclass:: Environment :members: :undoc-members: @@ -581,10 +565,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GetClusterComplianceResponse - :members: - :undoc-members: - .. autoclass:: GetClusterPermissionLevelsResponse :members: :undoc-members: @@ -837,42 +817,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListClusterCompliancesResponse - :members: - :undoc-members: - -.. autoclass:: ListClustersFilterBy - :members: - :undoc-members: - .. autoclass:: ListClustersResponse :members: :undoc-members: -.. autoclass:: ListClustersSortBy - :members: - :undoc-members: - -.. py:class:: ListClustersSortByDirection - - The direction to sort by. - - .. py:attribute:: ASC - :value: "ASC" - - .. py:attribute:: DESC - :value: "DESC" - -.. py:class:: ListClustersSortByField - - The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest precedence: cluster state, pinned or unpinned, then cluster name. - - .. py:attribute:: CLUSTER_NAME - :value: "CLUSTER_NAME" - - .. py:attribute:: DEFAULT - :value: "DEFAULT" - .. autoclass:: ListGlobalInitScriptsResponse :members: :undoc-members: @@ -907,8 +855,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ListSortOrder - A generic ordering enum for list-based queries. - .. py:attribute:: ASC :value: "ASC" @@ -1022,9 +968,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: RuntimeEngine - Determines the cluster's runtime engine, either standard or Photon. - This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. - If left unspecified, the runtime engine defaults to standard unless the spark_version contains -photon-, in which case Photon will be used. + Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine is inferred from spark_version. .. py:attribute:: NULL :value: "NULL" @@ -1364,18 +1308,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateCluster - :members: - :undoc-members: - -.. autoclass:: UpdateClusterResource - :members: - :undoc-members: - -.. autoclass:: UpdateClusterResponse - :members: - :undoc-members: - .. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 91de6ccb2..a7d000ce9 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -24,11 +24,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: DashboardView - - .. py:attribute:: DASHBOARD_VIEW_BASIC - :value: "DASHBOARD_VIEW_BASIC" - .. autoclass:: DeleteScheduleResponse :members: :undoc-members: @@ -37,34 +32,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GenieAttachment - :members: - :undoc-members: - -.. autoclass:: GenieConversation - :members: - :undoc-members: - -.. autoclass:: GenieCreateConversationMessageRequest - :members: - :undoc-members: - -.. autoclass:: GenieGetMessageQueryResultResponse - :members: - :undoc-members: - -.. autoclass:: GenieMessage - :members: - :undoc-members: - -.. autoclass:: GenieStartConversationMessageRequest - :members: - :undoc-members: - -.. autoclass:: GenieStartConversationResponse - :members: - :undoc-members: - .. py:class:: LifecycleState .. py:attribute:: ACTIVE @@ -73,10 +40,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TRASHED :value: "TRASHED" -.. autoclass:: ListDashboardsResponse - :members: - :undoc-members: - .. autoclass:: ListSchedulesResponse :members: :undoc-members: @@ -85,160 +48,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: MessageError - :members: - :undoc-members: - -.. py:class:: MessageErrorType - - .. py:attribute:: BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION - :value: "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION" - - .. py:attribute:: CHAT_COMPLETION_CLIENT_EXCEPTION - :value: "CHAT_COMPLETION_CLIENT_EXCEPTION" - - .. py:attribute:: CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION - :value: "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION" - - .. py:attribute:: CHAT_COMPLETION_NETWORK_EXCEPTION - :value: "CHAT_COMPLETION_NETWORK_EXCEPTION" - - .. py:attribute:: CONTENT_FILTER_EXCEPTION - :value: "CONTENT_FILTER_EXCEPTION" - - .. py:attribute:: CONTEXT_EXCEEDED_EXCEPTION - :value: "CONTEXT_EXCEEDED_EXCEPTION" - - .. py:attribute:: COULD_NOT_GET_UC_SCHEMA_EXCEPTION - :value: "COULD_NOT_GET_UC_SCHEMA_EXCEPTION" - - .. py:attribute:: DEPLOYMENT_NOT_FOUND_EXCEPTION - :value: "DEPLOYMENT_NOT_FOUND_EXCEPTION" - - .. py:attribute:: FUNCTIONS_NOT_AVAILABLE_EXCEPTION - :value: "FUNCTIONS_NOT_AVAILABLE_EXCEPTION" - - .. py:attribute:: FUNCTION_ARGUMENTS_INVALID_EXCEPTION - :value: "FUNCTION_ARGUMENTS_INVALID_EXCEPTION" - - .. py:attribute:: FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION - :value: "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION" - - .. py:attribute:: FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION - :value: "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION" - - .. py:attribute:: GENERIC_CHAT_COMPLETION_EXCEPTION - :value: "GENERIC_CHAT_COMPLETION_EXCEPTION" - - .. py:attribute:: GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION - :value: "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION" - - .. py:attribute:: GENERIC_SQL_EXEC_API_CALL_EXCEPTION - :value: "GENERIC_SQL_EXEC_API_CALL_EXCEPTION" - - .. py:attribute:: ILLEGAL_PARAMETER_DEFINITION_EXCEPTION - :value: "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION" - - .. py:attribute:: INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION - :value: "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" - - .. py:attribute:: INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION - :value: "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - - .. py:attribute:: INVALID_CHAT_COMPLETION_JSON_EXCEPTION - :value: "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" - - .. py:attribute:: INVALID_COMPLETION_REQUEST_EXCEPTION - :value: "INVALID_COMPLETION_REQUEST_EXCEPTION" - - .. py:attribute:: INVALID_FUNCTION_CALL_EXCEPTION - :value: "INVALID_FUNCTION_CALL_EXCEPTION" - - .. py:attribute:: INVALID_TABLE_IDENTIFIER_EXCEPTION - :value: "INVALID_TABLE_IDENTIFIER_EXCEPTION" - - .. py:attribute:: LOCAL_CONTEXT_EXCEEDED_EXCEPTION - :value: "LOCAL_CONTEXT_EXCEEDED_EXCEPTION" - - .. py:attribute:: MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION - :value: "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION" - - .. py:attribute:: MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION - :value: "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION" - - .. py:attribute:: NO_QUERY_TO_VISUALIZE_EXCEPTION - :value: "NO_QUERY_TO_VISUALIZE_EXCEPTION" - - .. py:attribute:: NO_TABLES_TO_QUERY_EXCEPTION - :value: "NO_TABLES_TO_QUERY_EXCEPTION" - - .. py:attribute:: RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION - :value: "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION" - - .. py:attribute:: RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION - :value: "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION" - - .. py:attribute:: REPLY_PROCESS_TIMEOUT_EXCEPTION - :value: "REPLY_PROCESS_TIMEOUT_EXCEPTION" - - .. py:attribute:: RETRYABLE_PROCESSING_EXCEPTION - :value: "RETRYABLE_PROCESSING_EXCEPTION" - - .. py:attribute:: SQL_EXECUTION_EXCEPTION - :value: "SQL_EXECUTION_EXCEPTION" - - .. py:attribute:: TABLES_MISSING_EXCEPTION - :value: "TABLES_MISSING_EXCEPTION" - - .. py:attribute:: TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION - :value: "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION" - - .. py:attribute:: TOO_MANY_TABLES_EXCEPTION - :value: "TOO_MANY_TABLES_EXCEPTION" - - .. py:attribute:: UNEXPECTED_REPLY_PROCESS_EXCEPTION - :value: "UNEXPECTED_REPLY_PROCESS_EXCEPTION" - - .. py:attribute:: UNKNOWN_AI_MODEL - :value: "UNKNOWN_AI_MODEL" - - .. py:attribute:: WAREHOUSE_ACCESS_MISSING_EXCEPTION - :value: "WAREHOUSE_ACCESS_MISSING_EXCEPTION" - - .. py:attribute:: WAREHOUSE_NOT_FOUND_EXCEPTION - :value: "WAREHOUSE_NOT_FOUND_EXCEPTION" - -.. py:class:: MessageStatus - - MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching metadata from the data sources. * `FILTERING_CONTEXT`: Running smart context step to determine relevant context. * `ASKING_AI`: Waiting for the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. **Important: The message status will stay in the `EXECUTING_QUERY` until a client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. * `FAILED`: Generating a response or the executing the query failed. Please see `error` field. * `COMPLETED`: Message processing is completed. Results are in the `attachments` field. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result is not available anymore. The user needs to execute the query again. * `CANCELLED`: Message has been cancelled. - - .. py:attribute:: ASKING_AI - :value: "ASKING_AI" - - .. py:attribute:: CANCELLED - :value: "CANCELLED" - - .. py:attribute:: COMPLETED - :value: "COMPLETED" - - .. py:attribute:: EXECUTING_QUERY - :value: "EXECUTING_QUERY" - - .. py:attribute:: FAILED - :value: "FAILED" - - .. py:attribute:: FETCHING_METADATA - :value: "FETCHING_METADATA" - - .. py:attribute:: FILTERING_CONTEXT - :value: "FILTERING_CONTEXT" - - .. py:attribute:: QUERY_RESULT_EXPIRED - :value: "QUERY_RESULT_EXPIRED" - - .. py:attribute:: SUBMITTED - :value: "SUBMITTED" - .. autoclass:: MigrateDashboardRequest :members: :undoc-members: @@ -251,14 +60,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QueryAttachment - :members: - :undoc-members: - -.. autoclass:: Result - :members: - :undoc-members: - .. autoclass:: Schedule :members: :undoc-members: @@ -287,10 +88,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: TextAttachment - :members: - :undoc-members: - .. autoclass:: TrashDashboardResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/iam.rst b/docs/dbdataclasses/iam.rst index 643da3d47..9cafb78df 100644 --- a/docs/dbdataclasses/iam.rst +++ b/docs/dbdataclasses/iam.rst @@ -20,7 +20,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteWorkspacePermissionAssignmentResponse +.. autoclass:: DeleteWorkspaceAssignments :members: :undoc-members: @@ -82,14 +82,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: MigratePermissionsRequest - :members: - :undoc-members: - -.. autoclass:: MigratePermissionsResponse - :members: - :undoc-members: - .. autoclass:: Name :members: :undoc-members: @@ -199,9 +191,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CAN_MANAGE_STAGING_VERSIONS :value: "CAN_MANAGE_STAGING_VERSIONS" - .. py:attribute:: CAN_MONITOR - :value: "CAN_MONITOR" - .. py:attribute:: CAN_QUERY :value: "CAN_QUERY" @@ -226,6 +215,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: IS_OWNER :value: "IS_OWNER" +.. autoclass:: PermissionMigrationRequest + :members: + :undoc-members: + +.. autoclass:: PermissionMigrationResponse + :members: + :undoc-members: + .. autoclass:: PermissionOutput :members: :undoc-members: diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 3aa0db043..414432d76 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -111,18 +111,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: EnforcePolicyComplianceForJobResponseJobClusterSettingsChange - :members: - :undoc-members: - -.. autoclass:: EnforcePolicyComplianceRequest - :members: - :undoc-members: - -.. autoclass:: EnforcePolicyComplianceResponse - :members: - :undoc-members: - .. autoclass:: ExportRunOutput :members: :undoc-members: @@ -159,10 +147,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GetPolicyComplianceResponse - :members: - :undoc-members: - .. py:class:: GitProvider .. py:attribute:: AWS_CODE_COMMIT @@ -213,10 +197,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: JobCompliance - :members: - :undoc-members: - .. autoclass:: JobDeployment :members: :undoc-members: @@ -317,23 +297,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: JobsHealthMetric Specifies the health metric that is being evaluated for a particular health rule. - * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all streams. This metric is in Private Preview. .. py:attribute:: RUN_DURATION_SECONDS :value: "RUN_DURATION_SECONDS" - .. py:attribute:: STREAMING_BACKLOG_BYTES - :value: "STREAMING_BACKLOG_BYTES" - - .. py:attribute:: STREAMING_BACKLOG_FILES - :value: "STREAMING_BACKLOG_FILES" - - .. py:attribute:: STREAMING_BACKLOG_RECORDS - :value: "STREAMING_BACKLOG_RECORDS" - - .. py:attribute:: STREAMING_BACKLOG_SECONDS - :value: "STREAMING_BACKLOG_SECONDS" - .. py:class:: JobsHealthOperator Specifies the operator used to compare the health metric value with the specified threshold. @@ -349,10 +316,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListJobComplianceForPolicyResponse - :members: - :undoc-members: - .. autoclass:: ListJobsResponse :members: :undoc-members: @@ -389,6 +352,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: HOURS :value: "HOURS" + .. py:attribute:: TIME_UNIT_UNSPECIFIED + :value: "TIME_UNIT_UNSPECIFIED" + .. py:attribute:: WEEKS :value: "WEEKS" @@ -404,23 +370,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QueueDetails - :members: - :undoc-members: - -.. py:class:: QueueDetailsCodeCode - - The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the per-job limit of concurrent job runs. * `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of active run job tasks. - - .. py:attribute:: ACTIVE_RUNS_LIMIT_REACHED - :value: "ACTIVE_RUNS_LIMIT_REACHED" - - .. py:attribute:: ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED - :value: "ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED" - - .. py:attribute:: MAX_CONCURRENT_RUNS_REACHED - :value: "MAX_CONCURRENT_RUNS_REACHED" - .. autoclass:: QueueSettings :members: :undoc-members: @@ -561,28 +510,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WAITING_FOR_RETRY :value: "WAITING_FOR_RETRY" -.. py:class:: RunLifecycleStateV2State - - The current state of the run. - - .. py:attribute:: BLOCKED - :value: "BLOCKED" - - .. py:attribute:: PENDING - :value: "PENDING" - - .. py:attribute:: QUEUED - :value: "QUEUED" - - .. py:attribute:: RUNNING - :value: "RUNNING" - - .. py:attribute:: TERMINATED - :value: "TERMINATED" - - .. py:attribute:: TERMINATING - :value: "TERMINATING" - .. autoclass:: RunNow :members: :undoc-members: @@ -601,14 +528,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: RunResultState - A value indicating the run's result. The possible values are: * `SUCCESS`: The task completed successfully. * `FAILED`: The task completed with an error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * `CANCELED`: The run was canceled at user request. * `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum concurrent runs were reached. * `EXCLUDED`: The run was skipped because the necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run completed successfully with some failures; leaf tasks were successful. * `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * `UPSTREAM_CANCELED`: The run was skipped because an upstream task was canceled. * `DISABLED`: The run was skipped because it was disabled explicitly by the user. + A value indicating the run's result. The possible values are: * `SUCCESS`: The task completed successfully. * `FAILED`: The task completed with an error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * `CANCELED`: The run was canceled at user request. * `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum concurrent runs were reached. * `EXCLUDED`: The run was skipped because the necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run completed successfully with some failures; leaf tasks were successful. * `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * `UPSTREAM_CANCELED`: The run was skipped because an upstream task was canceled. .. py:attribute:: CANCELED :value: "CANCELED" - .. py:attribute:: DISABLED - :value: "DISABLED" - .. py:attribute:: EXCLUDED :value: "EXCLUDED" @@ -637,10 +561,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: RunStatus - :members: - :undoc-members: - .. autoclass:: RunTask :members: :undoc-members: @@ -797,98 +717,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: TerminationCodeCode - - The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. - [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now - - .. py:attribute:: CANCELED - :value: "CANCELED" - - .. py:attribute:: CLOUD_FAILURE - :value: "CLOUD_FAILURE" - - .. py:attribute:: CLUSTER_ERROR - :value: "CLUSTER_ERROR" - - .. py:attribute:: CLUSTER_REQUEST_LIMIT_EXCEEDED - :value: "CLUSTER_REQUEST_LIMIT_EXCEEDED" - - .. py:attribute:: DRIVER_ERROR - :value: "DRIVER_ERROR" - - .. py:attribute:: FEATURE_DISABLED - :value: "FEATURE_DISABLED" - - .. py:attribute:: INTERNAL_ERROR - :value: "INTERNAL_ERROR" - - .. py:attribute:: INVALID_CLUSTER_REQUEST - :value: "INVALID_CLUSTER_REQUEST" - - .. py:attribute:: INVALID_RUN_CONFIGURATION - :value: "INVALID_RUN_CONFIGURATION" - - .. py:attribute:: LIBRARY_INSTALLATION_ERROR - :value: "LIBRARY_INSTALLATION_ERROR" - - .. py:attribute:: MAX_CONCURRENT_RUNS_EXCEEDED - :value: "MAX_CONCURRENT_RUNS_EXCEEDED" - - .. py:attribute:: MAX_JOB_QUEUE_SIZE_EXCEEDED - :value: "MAX_JOB_QUEUE_SIZE_EXCEEDED" - - .. py:attribute:: MAX_SPARK_CONTEXTS_EXCEEDED - :value: "MAX_SPARK_CONTEXTS_EXCEEDED" - - .. py:attribute:: REPOSITORY_CHECKOUT_FAILED - :value: "REPOSITORY_CHECKOUT_FAILED" - - .. py:attribute:: RESOURCE_NOT_FOUND - :value: "RESOURCE_NOT_FOUND" - - .. py:attribute:: RUN_EXECUTION_ERROR - :value: "RUN_EXECUTION_ERROR" - - .. py:attribute:: SKIPPED - :value: "SKIPPED" - - .. py:attribute:: STORAGE_ACCESS_ERROR - :value: "STORAGE_ACCESS_ERROR" - - .. py:attribute:: SUCCESS - :value: "SUCCESS" - - .. py:attribute:: UNAUTHORIZED_ERROR - :value: "UNAUTHORIZED_ERROR" - - .. py:attribute:: USER_CANCELED - :value: "USER_CANCELED" - - .. py:attribute:: WORKSPACE_RUN_LIMIT_EXCEEDED - :value: "WORKSPACE_RUN_LIMIT_EXCEEDED" - -.. autoclass:: TerminationDetails - :members: - :undoc-members: - -.. py:class:: TerminationTypeType - - * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the Databricks platform. Please look at the [status page] or contact support if the issue persists. * `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud provider. - [status page]: https://status.databricks.com/ - - .. py:attribute:: CLIENT_ERROR - :value: "CLIENT_ERROR" - - .. py:attribute:: CLOUD_FAILURE - :value: "CLOUD_FAILURE" - - .. py:attribute:: INTERNAL_ERROR - :value: "INTERNAL_ERROR" - - .. py:attribute:: SUCCESS - :value: "SUCCESS" - .. autoclass:: TriggerInfo :members: :undoc-members: diff --git a/docs/dbdataclasses/marketplace.rst b/docs/dbdataclasses/marketplace.rst index bb48967db..229bcf3eb 100644 --- a/docs/dbdataclasses/marketplace.rst +++ b/docs/dbdataclasses/marketplace.rst @@ -29,6 +29,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ASSET_TYPE_NOTEBOOK :value: "ASSET_TYPE_NOTEBOOK" + .. py:attribute:: ASSET_TYPE_UNSPECIFIED + :value: "ASSET_TYPE_UNSPECIFIED" + .. autoclass:: BatchGetListingsResponse :members: :undoc-members: @@ -285,6 +288,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FILE_STATUS_STAGING :value: "FILE_STATUS_STAGING" +.. py:class:: FilterType + + .. py:attribute:: METASTORE + :value: "METASTORE" + .. py:class:: FulfillmentType .. py:attribute:: INSTALL @@ -445,6 +453,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: LISTING_TAG_TYPE_TASK :value: "LISTING_TAG_TYPE_TASK" + .. py:attribute:: LISTING_TAG_TYPE_UNSPECIFIED + :value: "LISTING_TAG_TYPE_UNSPECIFIED" + .. py:class:: ListingType .. py:attribute:: PERSONALIZED @@ -515,6 +526,20 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: SortBy + + .. py:attribute:: SORT_BY_DATE + :value: "SORT_BY_DATE" + + .. py:attribute:: SORT_BY_RELEVANCE + :value: "SORT_BY_RELEVANCE" + + .. py:attribute:: SORT_BY_TITLE + :value: "SORT_BY_TITLE" + + .. py:attribute:: SORT_BY_UNSPECIFIED + :value: "SORT_BY_UNSPECIFIED" + .. autoclass:: TokenDetail :members: :undoc-members: @@ -586,3 +611,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PUBLIC :value: "PUBLIC" + +.. autoclass:: VisibilityFilter + :members: + :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 9f419f160..385bf2021 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -97,10 +97,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: IngestionPipelineDefinition - :members: - :undoc-members: - .. autoclass:: ListPipelineEventsResponse :members: :undoc-members: @@ -113,6 +109,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ManagedIngestionPipelineDefinition + :members: + :undoc-members: + .. autoclass:: ManualTrigger :members: :undoc-members: @@ -251,24 +251,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: PipelineStateInfoHealth - - The health of a pipeline. - - .. py:attribute:: HEALTHY - :value: "HEALTHY" - - .. py:attribute:: UNHEALTHY - :value: "UNHEALTHY" - .. autoclass:: PipelineTrigger :members: :undoc-members: -.. autoclass:: ReportSpec - :members: - :undoc-members: - .. autoclass:: SchemaSpec :members: :undoc-members: diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 3deefc873..a3d16a162 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -8,82 +8,105 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: AiGatewayConfig +.. autoclass:: AmazonBedrockConfig :members: :undoc-members: -.. autoclass:: AiGatewayGuardrailParameters - :members: - :undoc-members: +.. py:class:: AmazonBedrockConfigBedrockProvider -.. autoclass:: AiGatewayGuardrailPiiBehavior - :members: - :undoc-members: + The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. -.. py:class:: AiGatewayGuardrailPiiBehaviorBehavior + .. py:attribute:: AI21LABS + :value: "AI21LABS" - Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. + .. py:attribute:: AMAZON + :value: "AMAZON" - .. py:attribute:: BLOCK - :value: "BLOCK" + .. py:attribute:: ANTHROPIC + :value: "ANTHROPIC" - .. py:attribute:: NONE - :value: "NONE" + .. py:attribute:: COHERE + :value: "COHERE" -.. autoclass:: AiGatewayGuardrails +.. autoclass:: AnthropicConfig :members: :undoc-members: -.. autoclass:: AiGatewayInferenceTableConfig +.. autoclass:: App :members: :undoc-members: -.. autoclass:: AiGatewayRateLimit +.. autoclass:: AppDeployment :members: :undoc-members: -.. py:class:: AiGatewayRateLimitKey +.. autoclass:: AppDeploymentArtifacts + :members: + :undoc-members: - Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. +.. py:class:: AppDeploymentMode - .. py:attribute:: ENDPOINT - :value: "ENDPOINT" + .. py:attribute:: AUTO_SYNC + :value: "AUTO_SYNC" - .. py:attribute:: USER - :value: "USER" + .. py:attribute:: MODE_UNSPECIFIED + :value: "MODE_UNSPECIFIED" -.. py:class:: AiGatewayRateLimitRenewalPeriod + .. py:attribute:: SNAPSHOT + :value: "SNAPSHOT" - Renewal period field for a rate limit. Currently, only 'minute' is supported. +.. py:class:: AppDeploymentState - .. py:attribute:: MINUTE - :value: "MINUTE" + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: IN_PROGRESS + :value: "IN_PROGRESS" -.. autoclass:: AiGatewayUsageTrackingConfig + .. py:attribute:: STATE_UNSPECIFIED + :value: "STATE_UNSPECIFIED" + + .. py:attribute:: STOPPED + :value: "STOPPED" + + .. py:attribute:: SUCCEEDED + :value: "SUCCEEDED" + +.. autoclass:: AppDeploymentStatus :members: :undoc-members: -.. autoclass:: AmazonBedrockConfig +.. autoclass:: AppEnvironment :members: :undoc-members: -.. py:class:: AmazonBedrockConfigBedrockProvider +.. py:class:: AppState - The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. + .. py:attribute:: CREATING + :value: "CREATING" - .. py:attribute:: AI21LABS - :value: "AI21LABS" + .. py:attribute:: DELETED + :value: "DELETED" - .. py:attribute:: AMAZON - :value: "AMAZON" + .. py:attribute:: DELETING + :value: "DELETING" - .. py:attribute:: ANTHROPIC - :value: "ANTHROPIC" + .. py:attribute:: ERROR + :value: "ERROR" - .. py:attribute:: COHERE - :value: "COHERE" + .. py:attribute:: IDLE + :value: "IDLE" -.. autoclass:: AnthropicConfig + .. py:attribute:: RUNNING + :value: "RUNNING" + + .. py:attribute:: STARTING + :value: "STARTING" + + .. py:attribute:: STATE_UNSPECIFIED + :value: "STATE_UNSPECIFIED" + +.. autoclass:: AppStatus :members: :undoc-members: @@ -124,6 +147,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateAppDeploymentRequest + :members: + :undoc-members: + +.. autoclass:: CreateAppRequest + :members: + :undoc-members: + .. autoclass:: CreateServingEndpoint :members: :undoc-members: @@ -181,9 +212,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NOT_UPDATING :value: "NOT_UPDATING" - .. py:attribute:: UPDATE_CANCELED - :value: "UPDATE_CANCELED" - .. py:attribute:: UPDATE_FAILED :value: "UPDATE_FAILED" @@ -201,6 +229,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: EnvVariable + :members: + :undoc-members: + .. autoclass:: ExportMetricsResponse :members: :undoc-members: @@ -211,7 +243,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ExternalModelProvider - The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", + The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.", .. py:attribute:: AI21LABS :value: "AI21LABS" @@ -228,9 +260,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DATABRICKS_MODEL_SERVING :value: "DATABRICKS_MODEL_SERVING" - .. py:attribute:: GOOGLE_CLOUD_VERTEX_AI - :value: "GOOGLE_CLOUD_VERTEX_AI" - .. py:attribute:: OPENAI :value: "OPENAI" @@ -253,7 +282,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GoogleCloudVertexAiConfig +.. autoclass:: ListAppDeploymentsResponse + :members: + :undoc-members: + +.. autoclass:: ListAppsResponse :members: :undoc-members: @@ -281,10 +314,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: PutAiGatewayResponse - :members: - :undoc-members: - .. autoclass:: PutResponse :members: :undoc-members: @@ -477,10 +506,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: StopAppRequest + :members: + :undoc-members: + +.. autoclass:: StopAppResponse + :members: + :undoc-members: + .. autoclass:: TrafficConfig :members: :undoc-members: +.. autoclass:: UpdateAppRequest + :members: + :undoc-members: + .. autoclass:: V1ResponseChoiceElement :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 12043e3c5..542749997 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -8,10 +8,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: BooleanMessage - :members: - :undoc-members: - .. autoclass:: ClusterAutoRestartMessage :members: :undoc-members: @@ -26,6 +22,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ClusterAutoRestartMessageMaintenanceWindowDayOfWeek + .. py:attribute:: DAY_OF_WEEK_UNSPECIFIED + :value: "DAY_OF_WEEK_UNSPECIFIED" + .. py:attribute:: FRIDAY :value: "FRIDAY" @@ -74,6 +73,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: THIRD_OF_MONTH :value: "THIRD_OF_MONTH" + .. py:attribute:: WEEK_DAY_FREQUENCY_UNSPECIFIED + :value: "WEEK_DAY_FREQUENCY_UNSPECIFIED" + .. autoclass:: ClusterAutoRestartMessageMaintenanceWindowWindowStartTime :members: :undoc-members: @@ -90,11 +92,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo Compliance stardard for SHIELD customers - .. py:attribute:: CANADA_PROTECTED_B - :value: "CANADA_PROTECTED_B" - - .. py:attribute:: CYBER_ESSENTIAL_PLUS - :value: "CYBER_ESSENTIAL_PLUS" + .. py:attribute:: COMPLIANCE_STANDARD_UNSPECIFIED + :value: "COMPLIANCE_STANDARD_UNSPECIFIED" .. py:attribute:: FEDRAMP_HIGH :value: "FEDRAMP_HIGH" @@ -120,10 +119,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PCI_DSS :value: "PCI_DSS" -.. autoclass:: Config - :members: - :undoc-members: - .. autoclass:: CreateIpAccessList :members: :undoc-members: @@ -136,10 +131,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateNotificationDestinationRequest - :members: - :undoc-members: - .. autoclass:: CreateOboTokenRequest :members: :undoc-members: @@ -192,18 +183,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteDisableLegacyAccessResponse - :members: - :undoc-members: - -.. autoclass:: DeleteDisableLegacyDbfsResponse - :members: - :undoc-members: - -.. autoclass:: DeleteDisableLegacyFeaturesResponse - :members: - :undoc-members: - .. autoclass:: DeleteNetworkConnectivityConfigurationResponse :members: :undoc-members: @@ -220,43 +199,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: DestinationType - - .. py:attribute:: EMAIL - :value: "EMAIL" - - .. py:attribute:: MICROSOFT_TEAMS - :value: "MICROSOFT_TEAMS" - - .. py:attribute:: PAGERDUTY - :value: "PAGERDUTY" - - .. py:attribute:: SLACK - :value: "SLACK" - - .. py:attribute:: WEBHOOK - :value: "WEBHOOK" - -.. autoclass:: DisableLegacyAccess - :members: - :undoc-members: - -.. autoclass:: DisableLegacyDbfs - :members: - :undoc-members: - -.. autoclass:: DisableLegacyFeatures - :members: - :undoc-members: - -.. autoclass:: EmailConfig - :members: - :undoc-members: - -.. autoclass:: Empty - :members: - :undoc-members: - .. autoclass:: EnhancedSecurityMonitoring :members: :undoc-members: @@ -289,10 +231,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GenericWebhookConfig - :members: - :undoc-members: - .. autoclass:: GetIpAccessListResponse :members: :undoc-members: @@ -325,14 +263,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListNotificationDestinationsResponse - :members: - :undoc-members: - -.. autoclass:: ListNotificationDestinationsResult - :members: - :undoc-members: - .. autoclass:: ListPublicTokensResponse :members: :undoc-members: @@ -352,10 +282,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BLOCK :value: "BLOCK" -.. autoclass:: MicrosoftTeamsConfig - :members: - :undoc-members: - .. autoclass:: NccAwsStableIpRule :members: :undoc-members: @@ -420,14 +346,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: NotificationDestination - :members: - :undoc-members: - -.. autoclass:: PagerdutyConfig - :members: - :undoc-members: - .. autoclass:: PartitionId :members: :undoc-members: @@ -474,6 +392,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: RESTRICT_TOKENS_AND_JOB_RUN_AS :value: "RESTRICT_TOKENS_AND_JOB_RUN_AS" + .. py:attribute:: STATUS_UNSPECIFIED + :value: "STATUS_UNSPECIFIED" + .. autoclass:: RestrictWorkspaceAdminsSetting :members: :undoc-members: @@ -490,10 +411,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: SlackConfig - :members: - :undoc-members: - .. autoclass:: StringMessage :members: :undoc-members: @@ -537,9 +454,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo The type of token request. As of now, only `AZURE_ACTIVE_DIRECTORY_TOKEN` is supported. - .. py:attribute:: ARCLIGHT_AZURE_EXCHANGE_TOKEN - :value: "ARCLIGHT_AZURE_EXCHANGE_TOKEN" - .. py:attribute:: AZURE_ACTIVE_DIRECTORY_TOKEN :value: "AZURE_ACTIVE_DIRECTORY_TOKEN" @@ -559,18 +473,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateDisableLegacyAccessRequest - :members: - :undoc-members: - -.. autoclass:: UpdateDisableLegacyDbfsRequest - :members: - :undoc-members: - -.. autoclass:: UpdateDisableLegacyFeaturesRequest - :members: - :undoc-members: - .. autoclass:: UpdateEnhancedSecurityMonitoringSettingRequest :members: :undoc-members: @@ -583,10 +485,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateNotificationDestinationRequest - :members: - :undoc-members: - .. autoclass:: UpdatePersonalComputeSettingRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index ded587fe5..ff48c9774 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -265,9 +265,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXECUTE :value: "EXECUTE" - .. py:attribute:: MANAGE - :value: "MANAGE" - .. py:attribute:: MANAGE_ALLOWLIST :value: "MANAGE_ALLOWLIST" @@ -292,6 +289,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SET_SHARE_PERMISSION :value: "SET_SHARE_PERMISSION" + .. py:attribute:: SINGLE_USER_ACCESS + :value: "SINGLE_USER_ACCESS" + .. py:attribute:: USAGE :value: "USAGE" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 1657146c3..adf3ced56 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -12,49 +12,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: AlertCondition - :members: - :undoc-members: - -.. autoclass:: AlertConditionOperand - :members: - :undoc-members: - -.. autoclass:: AlertConditionThreshold - :members: - :undoc-members: - -.. autoclass:: AlertOperandColumn - :members: - :undoc-members: - -.. autoclass:: AlertOperandValue - :members: - :undoc-members: - -.. py:class:: AlertOperator - - .. py:attribute:: EQUAL - :value: "EQUAL" - - .. py:attribute:: GREATER_THAN - :value: "GREATER_THAN" - - .. py:attribute:: GREATER_THAN_OR_EQUAL - :value: "GREATER_THAN_OR_EQUAL" - - .. py:attribute:: IS_NULL - :value: "IS_NULL" - - .. py:attribute:: LESS_THAN - :value: "LESS_THAN" - - .. py:attribute:: LESS_THAN_OR_EQUAL - :value: "LESS_THAN_OR_EQUAL" - - .. py:attribute:: NOT_EQUAL - :value: "NOT_EQUAL" - .. autoclass:: AlertOptions :members: :undoc-members: @@ -78,6 +35,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: AlertState + State of the alert. Possible values are: `unknown` (yet to be evaluated), `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated and did not fulfill trigger conditions). + .. py:attribute:: OK :value: "OK" @@ -114,6 +73,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CHANNEL_NAME_PREVIEW :value: "CHANNEL_NAME_PREVIEW" + .. py:attribute:: CHANNEL_NAME_PREVIOUS + :value: "CHANNEL_NAME_PREVIOUS" + .. py:attribute:: CHANNEL_NAME_UNSPECIFIED :value: "CHANNEL_NAME_UNSPECIFIED" @@ -186,30 +148,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateAlertRequest - :members: - :undoc-members: - -.. autoclass:: CreateAlertRequestAlert - :members: - :undoc-members: - -.. autoclass:: CreateQueryRequest - :members: - :undoc-members: - -.. autoclass:: CreateQueryRequestQuery - :members: - :undoc-members: - -.. autoclass:: CreateVisualizationRequest - :members: - :undoc-members: - -.. autoclass:: CreateVisualizationRequestVisualization - :members: - :undoc-members: - .. autoclass:: CreateWarehouseRequest :members: :undoc-members: @@ -255,90 +193,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: DatePrecision - - .. py:attribute:: DAY_PRECISION - :value: "DAY_PRECISION" - - .. py:attribute:: MINUTE_PRECISION - :value: "MINUTE_PRECISION" - - .. py:attribute:: SECOND_PRECISION - :value: "SECOND_PRECISION" - -.. autoclass:: DateRange - :members: - :undoc-members: - -.. autoclass:: DateRangeValue - :members: - :undoc-members: - -.. py:class:: DateRangeValueDynamicDateRange - - .. py:attribute:: LAST_12_MONTHS - :value: "LAST_12_MONTHS" - - .. py:attribute:: LAST_14_DAYS - :value: "LAST_14_DAYS" - - .. py:attribute:: LAST_24_HOURS - :value: "LAST_24_HOURS" - - .. py:attribute:: LAST_30_DAYS - :value: "LAST_30_DAYS" - - .. py:attribute:: LAST_60_DAYS - :value: "LAST_60_DAYS" - - .. py:attribute:: LAST_7_DAYS - :value: "LAST_7_DAYS" - - .. py:attribute:: LAST_8_HOURS - :value: "LAST_8_HOURS" - - .. py:attribute:: LAST_90_DAYS - :value: "LAST_90_DAYS" - - .. py:attribute:: LAST_HOUR - :value: "LAST_HOUR" - - .. py:attribute:: LAST_MONTH - :value: "LAST_MONTH" - - .. py:attribute:: LAST_WEEK - :value: "LAST_WEEK" - - .. py:attribute:: LAST_YEAR - :value: "LAST_YEAR" - - .. py:attribute:: THIS_MONTH - :value: "THIS_MONTH" - - .. py:attribute:: THIS_WEEK - :value: "THIS_WEEK" - - .. py:attribute:: THIS_YEAR - :value: "THIS_YEAR" - - .. py:attribute:: TODAY - :value: "TODAY" - - .. py:attribute:: YESTERDAY - :value: "YESTERDAY" - -.. autoclass:: DateValue - :members: - :undoc-members: - -.. py:class:: DateValueDynamicDate - - .. py:attribute:: NOW - :value: "NOW" - - .. py:attribute:: YESTERDAY - :value: "YESTERDAY" - .. autoclass:: DeleteResponse :members: :undoc-members: @@ -349,6 +203,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: Disposition + The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`. + Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` format, in a series of chunks. If a given statement produces a result set with a size larger than 25 MiB, that statement execution is aborted, and no result set will be available. + **NOTE** Byte limits are computed based upon internal representations of the result set data, and might not match the sizes visible in JSON responses. + Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The resulting links have two important properties: + 1. They point to resources _external_ to the Databricks compute; therefore any associated authentication information (typically a personal access token, OAuth token, or similar) _must be removed_ when fetching from these links. + 2. These are presigned URLs with a specific expiration, indicated in the response. The behavior when attempting to use an expired link is cloud specific. + .. py:attribute:: EXTERNAL_LINKS :value: "EXTERNAL_LINKS" @@ -380,10 +241,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: Empty - :members: - :undoc-members: - .. autoclass:: EndpointConfPair :members: :undoc-members: @@ -417,10 +274,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: EnumValue - :members: - :undoc-members: - .. autoclass:: ExecuteStatementRequest :members: :undoc-members: @@ -435,6 +288,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONTINUE :value: "CONTINUE" +.. autoclass:: ExecuteStatementResponse + :members: + :undoc-members: + .. autoclass:: ExternalLink :members: :undoc-members: @@ -454,6 +311,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetStatementResponse + :members: + :undoc-members: + .. autoclass:: GetWarehousePermissionLevelsResponse :members: :undoc-members: @@ -492,47 +353,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PASSTHROUGH :value: "PASSTHROUGH" -.. autoclass:: LegacyAlert - :members: - :undoc-members: - -.. py:class:: LegacyAlertState - - State of the alert. Possible values are: `unknown` (yet to be evaluated), `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated and did not fulfill trigger conditions). - - .. py:attribute:: OK - :value: "OK" - - .. py:attribute:: TRIGGERED - :value: "TRIGGERED" - - .. py:attribute:: UNKNOWN - :value: "UNKNOWN" - -.. autoclass:: LegacyQuery - :members: - :undoc-members: - -.. autoclass:: LegacyVisualization - :members: - :undoc-members: - -.. py:class:: LifecycleState - - .. py:attribute:: ACTIVE - :value: "ACTIVE" - - .. py:attribute:: TRASHED - :value: "TRASHED" - -.. autoclass:: ListAlertsResponse - :members: - :undoc-members: - -.. autoclass:: ListAlertsResponseAlert - :members: - :undoc-members: - .. py:class:: ListOrder .. py:attribute:: CREATED_AT @@ -545,22 +365,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListQueryObjectsResponse - :members: - :undoc-members: - -.. autoclass:: ListQueryObjectsResponseQuery - :members: - :undoc-members: - .. autoclass:: ListResponse :members: :undoc-members: -.. autoclass:: ListVisualizationsForQueryResponse - :members: - :undoc-members: - .. autoclass:: ListWarehousesResponse :members: :undoc-members: @@ -569,10 +377,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: NumericValue - :members: - :undoc-members: - .. py:class:: ObjectType A singular noun object type. @@ -663,7 +467,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: PlansState - Possible Reasons for which we have not saved plans in the database + Whether plans exist for the execution, or the reason why they are missing .. py:attribute:: EMPTY :value: "EMPTY" @@ -687,10 +491,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QueryBackedValue - :members: - :undoc-members: - .. autoclass:: QueryEditContent :members: :undoc-members: @@ -715,16 +515,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QueryParameter - :members: - :undoc-members: - .. autoclass:: QueryPostContent :members: :undoc-members: .. py:class:: QueryStatementType + Type of statement for this query + .. py:attribute:: ALTER :value: "ALTER" @@ -793,17 +591,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: QueryStatus - Statuses which are also used by OperationStatus in runtime + Query status with one the following values: * `QUEUED`: Query has been received and queued. * `RUNNING`: Query has started. * `CANCELED`: Query has been cancelled by the user. * `FAILED`: Query has failed. * `FINISHED`: Query has completed. .. py:attribute:: CANCELED :value: "CANCELED" - .. py:attribute:: COMPILED - :value: "COMPILED" - - .. py:attribute:: COMPILING - :value: "COMPILING" - .. py:attribute:: FAILED :value: "FAILED" @@ -816,9 +608,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: RUNNING :value: "RUNNING" - .. py:attribute:: STARTED - :value: "STARTED" - .. autoclass:: RepeatedEndpointConfPairs :members: :undoc-members: @@ -839,14 +628,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: RunAsMode - - .. py:attribute:: OWNER - :value: "OWNER" - - .. py:attribute:: VIEWER - :value: "VIEWER" - .. py:class:: RunAsRole Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" behavior) @@ -973,10 +754,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: StatementResponse - :members: - :undoc-members: - .. py:class:: StatementState Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful, and statement closed; result no longer available for fetch @@ -1293,10 +1070,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" -.. autoclass:: TextValue - :members: - :undoc-members: - .. autoclass:: TimeRange :members: :undoc-members: @@ -1305,34 +1078,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateAlertRequest - :members: - :undoc-members: - -.. autoclass:: UpdateAlertRequestAlert - :members: - :undoc-members: - -.. autoclass:: UpdateQueryRequest - :members: - :undoc-members: - -.. autoclass:: UpdateQueryRequestQuery - :members: - :undoc-members: - .. autoclass:: UpdateResponse :members: :undoc-members: -.. autoclass:: UpdateVisualizationRequest - :members: - :undoc-members: - -.. autoclass:: UpdateVisualizationRequestVisualization - :members: - :undoc-members: - .. autoclass:: User :members: :undoc-members: @@ -1360,9 +1109,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CAN_MANAGE :value: "CAN_MANAGE" - .. py:attribute:: CAN_MONITOR - :value: "CAN_MONITOR" - .. py:attribute:: CAN_USE :value: "CAN_USE" diff --git a/docs/dbdataclasses/workspace.rst b/docs/dbdataclasses/workspace.rst index 9ff3eb66b..eaf70f9e0 100644 --- a/docs/dbdataclasses/workspace.rst +++ b/docs/dbdataclasses/workspace.rst @@ -23,7 +23,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateCredentialsRequest +.. autoclass:: CreateCredentials :members: :undoc-members: @@ -31,11 +31,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateRepoRequest - :members: - :undoc-members: - -.. autoclass:: CreateRepoResponse +.. autoclass:: CreateRepo :members: :undoc-members: @@ -63,14 +59,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteCredentialsResponse - :members: - :undoc-members: - -.. autoclass:: DeleteRepoResponse - :members: - :undoc-members: - .. autoclass:: DeleteResponse :members: :undoc-members: @@ -123,10 +111,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GetRepoResponse - :members: - :undoc-members: - .. autoclass:: GetSecretResponse :members: :undoc-members: @@ -187,10 +171,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListCredentialsResponse - :members: - :undoc-members: - .. autoclass:: ListReposResponse :members: :undoc-members: @@ -326,19 +306,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateCredentialsRequest - :members: - :undoc-members: - -.. autoclass:: UpdateCredentialsResponse +.. autoclass:: UpdateCredentials :members: :undoc-members: -.. autoclass:: UpdateRepoRequest +.. autoclass:: UpdateRepo :members: :undoc-members: -.. autoclass:: UpdateRepoResponse +.. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index 774e75b8b..857778296 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -7,7 +7,7 @@ Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. - .. py:method:: create(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> Wait[App] + .. py:method:: create(name: str [, description: Optional[str]]) -> Wait[App] Create an app. @@ -18,18 +18,16 @@ must be unique within the workspace. :param description: str (optional) The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. :returns: Long-running operation waiter for :class:`App`. - See :method:wait_get_app_active for more details. + See :method:wait_get_app_idle for more details. - .. py:method:: create_and_wait(name: str [, description: Optional[str], resources: Optional[List[AppResource]], timeout: datetime.timedelta = 0:20:00]) -> App + .. py:method:: create_and_wait(name: str [, description: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> App - .. py:method:: delete(name: str) -> App + .. py:method:: delete(name: str) Delete an app. @@ -38,10 +36,10 @@ :param name: str The name of the app. - :returns: :class:`App` + - .. py:method:: deploy(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str]]) -> Wait[AppDeployment] + .. py:method:: deploy(app_name: str, source_code_path: str, mode: AppDeploymentMode) -> Wait[AppDeployment] Create an app deployment. @@ -49,23 +47,21 @@ :param app_name: str The name of the app. - :param deployment_id: str (optional) - The unique id of the deployment. - :param mode: :class:`AppDeploymentMode` (optional) - The mode of which the deployment will manage the source code. - :param source_code_path: str (optional) + :param source_code_path: str The workspace file system path of the source code used to create the app deployment. This is different from `deployment_artifacts.source_code_path`, which is the path used by the deployed app. The former refers to the original source code location of the app in the workspace during deployment creation, whereas the latter provides a system generated stable snapshotted source code path used by the deployment. + :param mode: :class:`AppDeploymentMode` + The mode of which the deployment will manage the source code. :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. - .. py:method:: deploy_and_wait(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment + .. py:method:: deploy_and_wait(app_name: str, source_code_path: str, mode: AppDeploymentMode, timeout: datetime.timedelta = 0:20:00) -> AppDeployment .. py:method:: get(name: str) -> App @@ -94,28 +90,16 @@ :returns: :class:`AppDeployment` - .. py:method:: get_permission_levels(app_name: str) -> GetAppPermissionLevelsResponse - - Get app permission levels. - - Gets the permission levels that a user can have on an object. - - :param app_name: str - The app for which to get or manage permissions. - - :returns: :class:`GetAppPermissionLevelsResponse` - - - .. py:method:: get_permissions(app_name: str) -> AppPermissions + .. py:method:: get_environment(name: str) -> AppEnvironment - Get app permissions. + Get app environment. - Gets the permissions of an app. Apps can inherit permissions from their root object. + Retrieves app environment. - :param app_name: str - The app for which to get or manage permissions. + :param name: str + The name of the app. - :returns: :class:`AppPermissions` + :returns: :class:`AppEnvironment` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[App] @@ -148,37 +132,7 @@ :returns: Iterator over :class:`AppDeployment` - .. py:method:: set_permissions(app_name: str [, access_control_list: Optional[List[AppAccessControlRequest]]]) -> AppPermissions - - Set app permissions. - - Sets permissions on an app. Apps can inherit permissions from their root object. - - :param app_name: str - The app for which to get or manage permissions. - :param access_control_list: List[:class:`AppAccessControlRequest`] (optional) - - :returns: :class:`AppPermissions` - - - .. py:method:: start(name: str) -> Wait[App] - - Start an app. - - Start the last active deployment of the app in the workspace. - - :param name: str - The name of the app. - - :returns: - Long-running operation waiter for :class:`App`. - See :method:wait_get_app_active for more details. - - - .. py:method:: start_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App - - - .. py:method:: stop(name: str) -> Wait[App] + .. py:method:: stop(name: str) Stop an app. @@ -187,15 +141,10 @@ :param name: str The name of the app. - :returns: - Long-running operation waiter for :class:`App`. - See :method:wait_get_app_stopped for more details. + - .. py:method:: stop_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App - - - .. py:method:: update(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> App + .. py:method:: update(name: str [, description: Optional[str]]) -> App Update an app. @@ -206,29 +155,11 @@ must be unique within the workspace. :param description: str (optional) The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. :returns: :class:`App` - .. py:method:: update_permissions(app_name: str [, access_control_list: Optional[List[AppAccessControlRequest]]]) -> AppPermissions - - Update app permissions. - - Updates the permissions on an app. Apps can inherit permissions from their root object. - - :param app_name: str - The app for which to get or manage permissions. - :param access_control_list: List[:class:`AppAccessControlRequest`] (optional) - - :returns: :class:`AppPermissions` - - - .. py:method:: wait_get_app_active(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App - - - .. py:method:: wait_get_app_stopped(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App + .. py:method:: wait_get_app_idle(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App .. py:method:: wait_get_deployment_app_succeeded(app_name: str, deployment_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[AppDeployment], None]]) -> AppDeployment diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 200168ee6..95615e503 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -130,20 +130,20 @@ Whether to include catalogs in the response for which the principal can only access selective metadata for :param max_results: int (optional) - Maximum number of catalogs to return. - when set to 0, the page length is set to a server configured - value (recommended); - when set to a value greater than 0, the page length is the minimum of this - value and a server configured value; - when set to a value less than 0, an invalid parameter error - is returned; - If not set, all valid catalogs are returned (not recommended). - Note: The number of - returned catalogs might be less than the specified max_results size, even zero. The only definitive - indication that no further catalogs can be fetched is when the next_page_token is unset from the - response. + Maximum number of catalogs to return. - If not set, all valid catalogs are returned (not + recommended). - when set to a value greater than 0, the page length is the minimum of this value and + a server configured value; - when set to 0, the page length is set to a server configured value + (recommended); - when set to a value less than 0, an invalid parameter error is returned; - Note: + The number of returned catalogs might be less than the specified max_results size, even reaching + zero. Reaching zero does not necessarily signify reaching the end. The definitive indication that no + further catalogs can be fetched is when the next_page_token is unset from response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. :returns: Iterator over :class:`CatalogInfo` - .. py:method:: update(name: str [, comment: Optional[str], enable_predictive_optimization: Optional[EnablePredictiveOptimization], isolation_mode: Optional[CatalogIsolationMode], new_name: Optional[str], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> CatalogInfo + .. py:method:: update(name: str [, comment: Optional[str], enable_predictive_optimization: Optional[EnablePredictiveOptimization], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> CatalogInfo Usage: @@ -174,7 +174,7 @@ User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) Whether predictive optimization should be enabled for this object and objects under it. - :param isolation_mode: :class:`CatalogIsolationMode` (optional) + :param isolation_mode: :class:`IsolationMode` (optional) Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the catalog. diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 365007b09..34c0d6729 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -15,7 +15,7 @@ To create external locations, you must be a metastore admin or a user with the **CREATE_EXTERNAL_LOCATION** privilege. - .. py:method:: create(name: str, url: str, credential_name: str [, access_point: Optional[str], comment: Optional[str], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], read_only: Optional[bool], skip_validation: Optional[bool]]) -> ExternalLocationInfo + .. py:method:: create(name: str, url: str, credential_name: str [, access_point: Optional[str], comment: Optional[str], encryption_details: Optional[EncryptionDetails], read_only: Optional[bool], skip_validation: Optional[bool]]) -> ExternalLocationInfo Usage: @@ -63,10 +63,6 @@ User-provided free-form text description. :param encryption_details: :class:`EncryptionDetails` (optional) Encryption options that apply to clients connecting to cloud storage. - :param fallback: bool (optional) - Indicates whether fallback mode is enabled for this external location. When fallback mode is - enabled, the access to the location falls back to cluster credentials if UC credentials are not - sufficient. :param read_only: bool (optional) Indicates whether the external location is read-only. :param skip_validation: bool (optional) @@ -167,7 +163,7 @@ :returns: Iterator over :class:`ExternalLocationInfo` - .. py:method:: update(name: str [, access_point: Optional[str], comment: Optional[str], credential_name: Optional[str], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], force: Optional[bool], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool], url: Optional[str]]) -> ExternalLocationInfo + .. py:method:: update(name: str [, access_point: Optional[str], comment: Optional[str], credential_name: Optional[str], encryption_details: Optional[EncryptionDetails], force: Optional[bool], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool], url: Optional[str]]) -> ExternalLocationInfo Usage: @@ -214,14 +210,8 @@ Name of the storage credential used with this location. :param encryption_details: :class:`EncryptionDetails` (optional) Encryption options that apply to clients connecting to cloud storage. - :param fallback: bool (optional) - Indicates whether fallback mode is enabled for this external location. When fallback mode is - enabled, the access to the location falls back to cluster credentials if UC credentials are not - sufficient. :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. - :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the external location. :param owner: str (optional) diff --git a/docs/workspace/catalog/functions.rst b/docs/workspace/catalog/functions.rst index 646488074..97398be87 100644 --- a/docs/workspace/catalog/functions.rst +++ b/docs/workspace/catalog/functions.rst @@ -14,8 +14,6 @@ Create a function. - **WARNING: This API is experimental and will change in future versions** - Creates a new function The user must have the following permissions in order for the function to be created: - diff --git a/docs/workspace/catalog/index.rst b/docs/workspace/catalog/index.rst index 1372ca5a1..935804016 100644 --- a/docs/workspace/catalog/index.rst +++ b/docs/workspace/catalog/index.rst @@ -18,12 +18,10 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, online_tables quality_monitors registered_models - resource_quotas schemas storage_credentials system_schemas table_constraints tables - temporary_table_credentials volumes workspace_bindings \ No newline at end of file diff --git a/docs/workspace/catalog/metastores.rst b/docs/workspace/catalog/metastores.rst index 01a936e0b..6fb939894 100644 --- a/docs/workspace/catalog/metastores.rst +++ b/docs/workspace/catalog/metastores.rst @@ -52,8 +52,7 @@ :param metastore_id: str The unique ID of the metastore. :param default_catalog_name: str - The name of the default catalog in the metastore. This field is depracted. Please use "Default - Namespace API" to configure the default catalog for a Databricks workspace. + The name of the default catalog in the metastore. @@ -89,9 +88,8 @@ :param name: str The user-specified name of the metastore. :param region: str (optional) - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted in - the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is omitted, - the region of the workspace receiving the request will be used. + Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). If this field is omitted, the + region of the workspace receiving the request will be used. :param storage_root: str (optional) The storage root URL for metastore @@ -306,8 +304,7 @@ :param workspace_id: int A workspace ID. :param default_catalog_name: str (optional) - The name of the default catalog in the metastore. This field is depracted. Please use "Default - Namespace API" to configure the default catalog for a Databricks workspace. + The name of the default catalog for the metastore. :param metastore_id: str (optional) The unique ID of the metastore. diff --git a/docs/workspace/catalog/model_versions.rst b/docs/workspace/catalog/model_versions.rst index bae6f25f8..017a6aa15 100644 --- a/docs/workspace/catalog/model_versions.rst +++ b/docs/workspace/catalog/model_versions.rst @@ -30,7 +30,7 @@ - .. py:method:: get(full_name: str, version: int [, include_aliases: Optional[bool], include_browse: Optional[bool]]) -> ModelVersionInfo + .. py:method:: get(full_name: str, version: int [, include_browse: Optional[bool]]) -> RegisteredModelInfo Get a Model Version. @@ -44,16 +44,14 @@ The three-level (fully qualified) name of the model version :param version: int The integer version number of the model version - :param include_aliases: bool (optional) - Whether to include aliases associated with the model version in the response :param include_browse: bool (optional) Whether to include model versions in the response for which the principal can only access selective metadata for - :returns: :class:`ModelVersionInfo` + :returns: :class:`RegisteredModelInfo` - .. py:method:: get_by_alias(full_name: str, alias: str [, include_aliases: Optional[bool]]) -> ModelVersionInfo + .. py:method:: get_by_alias(full_name: str, alias: str) -> ModelVersionInfo Get Model Version By Alias. @@ -67,8 +65,6 @@ The three-level (fully qualified) name of the registered model :param alias: str The name of the alias - :param include_aliases: bool (optional) - Whether to include aliases associated with the model version in the response :returns: :class:`ModelVersionInfo` diff --git a/docs/workspace/catalog/quality_monitors.rst b/docs/workspace/catalog/quality_monitors.rst index 93f05b69a..030094049 100644 --- a/docs/workspace/catalog/quality_monitors.rst +++ b/docs/workspace/catalog/quality_monitors.rst @@ -166,29 +166,6 @@ :returns: :class:`MonitorRefreshListResponse` - .. py:method:: regenerate_dashboard(table_name: str [, warehouse_id: Optional[str]]) -> RegenerateDashboardResponse - - Regenerate a monitoring dashboard. - - Regenerates the monitoring dashboard for the specified table. - - The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the - table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an - owner of the table - - The call must be made from the workspace where the monitor was created. The dashboard will be - regenerated in the assets directory that was specified when the monitor was created. - - :param table_name: str - Full name of the table. - :param warehouse_id: str (optional) - Optional argument to specify the warehouse for dashboard regeneration. If not specified, the first - running warehouse will be used. - - :returns: :class:`RegenerateDashboardResponse` - - .. py:method:: run_refresh(table_name: str) -> MonitorRefreshInfo Queue a metric refresh for a monitor. diff --git a/docs/workspace/catalog/registered_models.rst b/docs/workspace/catalog/registered_models.rst index b05a702b5..6a60c4f6d 100644 --- a/docs/workspace/catalog/registered_models.rst +++ b/docs/workspace/catalog/registered_models.rst @@ -91,7 +91,7 @@ - .. py:method:: get(full_name: str [, include_aliases: Optional[bool], include_browse: Optional[bool]]) -> RegisteredModelInfo + .. py:method:: get(full_name: str [, include_browse: Optional[bool]]) -> RegisteredModelInfo Get a Registered Model. @@ -103,8 +103,6 @@ :param full_name: str The three-level (fully qualified) name of the registered model - :param include_aliases: bool (optional) - Whether to include registered model aliases in the response :param include_browse: bool (optional) Whether to include registered models in the response for which the principal can only access selective metadata for diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index feaf7c7a0..1c9fcbbd0 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -49,7 +49,7 @@ :returns: :class:`SchemaInfo` - .. py:method:: delete(full_name: str [, force: Optional[bool]]) + .. py:method:: delete(full_name: str) Delete a schema. @@ -58,8 +58,6 @@ :param full_name: str Full name of the schema. - :param force: bool (optional) - Force deletion even if the schema is not empty. diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 30b04654c..2c9c479ff 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -145,7 +145,7 @@ :returns: Iterator over :class:`StorageCredentialInfo` - .. py:method:: update(name: str [, aws_iam_role: Optional[AwsIamRoleRequest], azure_managed_identity: Optional[AzureManagedIdentityResponse], azure_service_principal: Optional[AzureServicePrincipal], cloudflare_api_token: Optional[CloudflareApiToken], comment: Optional[str], databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest], force: Optional[bool], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool]]) -> StorageCredentialInfo + .. py:method:: update(name: str [, aws_iam_role: Optional[AwsIamRoleRequest], azure_managed_identity: Optional[AzureManagedIdentityResponse], azure_service_principal: Optional[AzureServicePrincipal], cloudflare_api_token: Optional[CloudflareApiToken], comment: Optional[str], databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest], force: Optional[bool], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool]]) -> StorageCredentialInfo Usage: @@ -192,8 +192,6 @@ The Databricks managed GCP service account configuration. :param force: bool (optional) Force update even if there are dependent external locations or external tables. - :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the storage credential. :param owner: str (optional) diff --git a/docs/workspace/catalog/system_schemas.rst b/docs/workspace/catalog/system_schemas.rst index 2028a3623..b9ab3b0f9 100644 --- a/docs/workspace/catalog/system_schemas.rst +++ b/docs/workspace/catalog/system_schemas.rst @@ -37,7 +37,7 @@ - .. py:method:: list(metastore_id: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[SystemSchemaInfo] + .. py:method:: list(metastore_id: str) -> Iterator[SystemSchemaInfo] List system schemas. @@ -46,13 +46,6 @@ :param metastore_id: str The ID for the metastore in which the system schema resides. - :param max_results: int (optional) - Maximum number of schemas to return. - When set to 0, the page length is set to a server configured - value (recommended); - When set to a value greater than 0, the page length is the minimum of this - value and a server configured value; - When set to a value less than 0, an invalid parameter error - is returned; - If not set, all the schemas are returned (not recommended). - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: Iterator over :class:`SystemSchemaInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index 4cb458b46..6249f0da1 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -45,7 +45,7 @@ :returns: :class:`TableExistsResponse` - .. py:method:: get(full_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], include_manifest_capabilities: Optional[bool]]) -> TableInfo + .. py:method:: get(full_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool]]) -> TableInfo Usage: @@ -94,13 +94,11 @@ for :param include_delta_metadata: bool (optional) Whether delta metadata should be included in the response. - :param include_manifest_capabilities: bool (optional) - Whether to include a manifest containing capabilities the table has. :returns: :class:`TableInfo` - .. py:method:: list(catalog_name: str, schema_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], include_manifest_capabilities: Optional[bool], max_results: Optional[int], omit_columns: Optional[bool], omit_properties: Optional[bool], page_token: Optional[str]]) -> Iterator[TableInfo] + .. py:method:: list(catalog_name: str, schema_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], max_results: Optional[int], omit_columns: Optional[bool], omit_properties: Optional[bool], page_token: Optional[str]]) -> Iterator[TableInfo] Usage: @@ -140,8 +138,6 @@ for :param include_delta_metadata: bool (optional) Whether delta metadata should be included in the response. - :param include_manifest_capabilities: bool (optional) - Whether to include a manifest containing capabilities the table has. :param max_results: int (optional) Maximum number of tables to return. If not set, all the tables are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this value and a server @@ -157,7 +153,7 @@ :returns: Iterator over :class:`TableInfo` - .. py:method:: list_summaries(catalog_name: str [, include_manifest_capabilities: Optional[bool], max_results: Optional[int], page_token: Optional[str], schema_name_pattern: Optional[str], table_name_pattern: Optional[str]]) -> Iterator[TableSummary] + .. py:method:: list_summaries(catalog_name: str [, max_results: Optional[int], page_token: Optional[str], schema_name_pattern: Optional[str], table_name_pattern: Optional[str]]) -> Iterator[TableSummary] Usage: @@ -196,8 +192,6 @@ :param catalog_name: str Name of parent catalog for tables of interest. - :param include_manifest_capabilities: bool (optional) - Whether to include a manifest containing capabilities the table has. :param max_results: int (optional) Maximum number of summaries for tables to return. If not set, the page length is set to a server configured value (10000, as of 1/5/2024). - when set to a value greater than 0, the page length is diff --git a/docs/workspace/catalog/workspace_bindings.rst b/docs/workspace/catalog/workspace_bindings.rst index 08a74b29e..e1ec753d4 100644 --- a/docs/workspace/catalog/workspace_bindings.rst +++ b/docs/workspace/catalog/workspace_bindings.rst @@ -17,7 +17,7 @@ the new path (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which introduces the ability to bind a securable in READ_ONLY mode (catalogs only). - Securable types that support binding: - catalog - storage_credential - external_location + Securables that support binding: - catalog .. py:method:: get(name: str) -> CurrentWorkspaceBindings @@ -50,26 +50,19 @@ :returns: :class:`CurrentWorkspaceBindings` - .. py:method:: get_bindings(securable_type: GetBindingsSecurableType, securable_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[WorkspaceBinding] + .. py:method:: get_bindings(securable_type: str, securable_name: str) -> WorkspaceBindingsResponse Get securable workspace bindings. Gets workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - :param securable_type: :class:`GetBindingsSecurableType` - The type of the securable to bind to a workspace. + :param securable_type: str + The type of the securable. :param securable_name: str The name of the securable. - :param max_results: int (optional) - Maximum number of workspace bindings to return. - When set to 0, the page length is set to a server - configured value (recommended); - When set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - When set to a value less than 0, an invalid parameter - error is returned; - If not set, all the workspace bindings are returned (not recommended). - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. - :returns: Iterator over :class:`WorkspaceBinding` + :returns: :class:`WorkspaceBindingsResponse` .. py:method:: update(name: str [, assign_workspaces: Optional[List[int]], unassign_workspaces: Optional[List[int]]]) -> CurrentWorkspaceBindings @@ -110,15 +103,15 @@ :returns: :class:`CurrentWorkspaceBindings` - .. py:method:: update_bindings(securable_type: UpdateBindingsSecurableType, securable_name: str [, add: Optional[List[WorkspaceBinding]], remove: Optional[List[WorkspaceBinding]]]) -> WorkspaceBindingsResponse + .. py:method:: update_bindings(securable_type: str, securable_name: str [, add: Optional[List[WorkspaceBinding]], remove: Optional[List[WorkspaceBinding]]]) -> WorkspaceBindingsResponse Update securable workspace bindings. Updates workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - :param securable_type: :class:`UpdateBindingsSecurableType` - The type of the securable to bind to a workspace. + :param securable_type: str + The type of the securable. :param securable_name: str The name of the securable. :param add: List[:class:`WorkspaceBinding`] (optional) diff --git a/docs/workspace/compute/cluster_policies.rst b/docs/workspace/compute/cluster_policies.rst index 1cefc8ca6..b6e67acff 100644 --- a/docs/workspace/compute/cluster_policies.rst +++ b/docs/workspace/compute/cluster_policies.rst @@ -22,7 +22,7 @@ If no policies exist in the workspace, the Policy drop-down doesn't appear. Only admin users can create, edit, and delete policies. Admin users also have access to all policies. - .. py:method:: create( [, definition: Optional[str], description: Optional[str], libraries: Optional[List[Library]], max_clusters_per_user: Optional[int], name: Optional[str], policy_family_definition_overrides: Optional[str], policy_family_id: Optional[str]]) -> CreatePolicyResponse + .. py:method:: create(name: str [, definition: Optional[str], description: Optional[str], libraries: Optional[List[Library]], max_clusters_per_user: Optional[int], policy_family_definition_overrides: Optional[str], policy_family_id: Optional[str]]) -> CreatePolicyResponse Usage: @@ -51,6 +51,9 @@ Creates a new policy with prescribed settings. + :param name: str + Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100 + characters. :param definition: str (optional) Policy definition document expressed in [Databricks Cluster Policy Definition Language]. @@ -63,9 +66,6 @@ :param max_clusters_per_user: int (optional) Max number of clusters per user that can be active using this policy. If not present, there is no max limit. - :param name: str (optional) - Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100 - characters. :param policy_family_definition_overrides: str (optional) Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON document must be passed as a string and cannot be embedded in the requests. @@ -96,7 +96,7 @@ - .. py:method:: edit(policy_id: str [, definition: Optional[str], description: Optional[str], libraries: Optional[List[Library]], max_clusters_per_user: Optional[int], name: Optional[str], policy_family_definition_overrides: Optional[str], policy_family_id: Optional[str]]) + .. py:method:: edit(policy_id: str, name: str [, definition: Optional[str], description: Optional[str], libraries: Optional[List[Library]], max_clusters_per_user: Optional[int], policy_family_definition_overrides: Optional[str], policy_family_id: Optional[str]]) Usage: @@ -140,6 +140,9 @@ :param policy_id: str The ID of the policy to update. + :param name: str + Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100 + characters. :param definition: str (optional) Policy definition document expressed in [Databricks Cluster Policy Definition Language]. @@ -152,9 +155,6 @@ :param max_clusters_per_user: int (optional) Max number of clusters per user that can be active using this policy. If not present, there is no max limit. - :param name: str (optional) - Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100 - characters. :param policy_family_definition_overrides: str (optional) Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON document must be passed as a string and cannot be embedded in the requests. @@ -205,7 +205,7 @@ Get a cluster policy entity. Creation and editing is available to admins only. :param policy_id: str - Canonical unique identifier for the Cluster Policy. + Canonical unique identifier for the cluster policy. :returns: :class:`Policy` diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index ac52edecb..58362d05e 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -21,8 +21,9 @@ restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive analysis. - IMPORTANT: Databricks retains cluster configuration information for terminated clusters for 30 days. To - keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an + IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose clusters + terminated in the last 30 days and up to 30 job clusters recently terminated by the job scheduler. To keep + an all-purpose cluster configuration even after it has been terminated for more than 30 days, an administrator can pin a cluster to the cluster list. .. py:method:: change_owner(cluster_id: str, owner_username: str) @@ -107,11 +108,6 @@ If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an informative error message. - Rather than authoring the cluster's JSON definition from scratch, Databricks recommends filling out - the [create compute UI] and then copying the generated JSON definition from the UI. - - [create compute UI]: https://docs.databricks.com/compute/configure.html - :param spark_version: str The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call. @@ -207,13 +203,8 @@ :param policy_id: str (optional) The ID of the cluster policy used to create the cluster if applicable. :param runtime_engine: :class:`RuntimeEngine` (optional) - Determines the cluster's runtime engine, either standard or Photon. - - This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove - `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. - - If left unspecified, the runtime engine defaults to standard unless the spark_version contains - -photon-, in which case Photon will be used. + Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine + is inferred from spark_version. :param single_user_name: str (optional) Single user name if data_security_mode is `SINGLE_USER` :param spark_conf: Dict[str,str] (optional) @@ -435,13 +426,8 @@ :param policy_id: str (optional) The ID of the cluster policy used to create the cluster if applicable. :param runtime_engine: :class:`RuntimeEngine` (optional) - Determines the cluster's runtime engine, either standard or Photon. - - This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove - `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. - - If left unspecified, the runtime engine defaults to standard unless the spark_version contains - -photon-, in which case Photon will be used. + Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine + is inferred from spark_version. :param single_user_name: str (optional) Single user name if data_security_mode is `SINGLE_USER` :param spark_conf: Dict[str,str] (optional) @@ -618,7 +604,7 @@ :returns: :class:`ClusterPermissions` - .. py:method:: list( [, filter_by: Optional[ListClustersFilterBy], page_size: Optional[int], page_token: Optional[str], sort_by: Optional[ListClustersSortBy]]) -> Iterator[ClusterDetails] + .. py:method:: list( [, can_use_client: Optional[str]]) -> Iterator[ClusterDetails] Usage: @@ -632,21 +618,21 @@ all = w.clusters.list(compute.ListClustersRequest()) - List clusters. + List all clusters. + + Return information about all pinned clusters, active clusters, up to 200 of the most recently + terminated all-purpose clusters in the past 30 days, and up to 30 of the most recently terminated job + clusters in the past 30 days. - Return information about all pinned and active clusters, and all clusters terminated within the last - 30 days. Clusters terminated prior to this period are not included. + For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in + the past 30 days, and 50 terminated job clusters in the past 30 days, then this API returns the 1 + pinned cluster, 4 active clusters, all 45 terminated all-purpose clusters, and the 30 most recently + terminated job clusters. - :param filter_by: :class:`ListClustersFilterBy` (optional) - Filters to apply to the list of clusters. - :param page_size: int (optional) - Use this field to specify the maximum number of results to be returned by the server. The server may - further constrain the maximum number of results returned in a single page. - :param page_token: str (optional) - Use next_page_token or prev_page_token returned from the previous request to list the next or - previous page of clusters respectively. - :param sort_by: :class:`ListClustersSortBy` (optional) - Sort the list of clusters by a specific criteria. + :param can_use_client: str (optional) + Filter clusters based on what type of client it can be used for. Could be either NOTEBOOKS or JOBS. + No input for this field will get all clusters in the workspace without filtering on its supported + client :returns: Iterator over :class:`ClusterDetails` @@ -1014,37 +1000,6 @@ - .. py:method:: update(cluster_id: str, update_mask: str [, cluster: Optional[UpdateClusterResource]]) -> Wait[ClusterDetails] - - Update cluster configuration (partial). - - Updates the configuration of a cluster to match the partial set of attributes and size. Denote which - fields to update using the `update_mask` field in the request body. A cluster can be updated if it is - in a `RUNNING` or `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it will be - restarted so that the new attributes can take effect. If a cluster is updated while in a `TERMINATED` - state, it will remain `TERMINATED`. The updated attributes will take effect the next time the cluster - is started using the `clusters/start` API. Attempts to update a cluster in any other state will be - rejected with an `INVALID_STATE` error code. Clusters created by the Databricks Jobs service cannot be - updated. - - :param cluster_id: str - ID of the cluster. - :param update_mask: str - Specifies which fields of the cluster will be updated. This is required in the POST request. The - update mask should be supplied as a single string. To specify multiple fields, separate them with - commas (no spaces). To delete a field from a cluster configuration, add it to the `update_mask` - string but omit it from the `cluster` object. - :param cluster: :class:`UpdateClusterResource` (optional) - The cluster to be updated. - - :returns: - Long-running operation waiter for :class:`ClusterDetails`. - See :method:wait_get_cluster_running for more details. - - - .. py:method:: update_and_wait(cluster_id: str, update_mask: str [, cluster: Optional[UpdateClusterResource], timeout: datetime.timedelta = 0:20:00]) -> ClusterDetails - - .. py:method:: update_permissions(cluster_id: str [, access_control_list: Optional[List[ClusterAccessControlRequest]]]) -> ClusterPermissions Update cluster permissions. diff --git a/docs/workspace/compute/command_execution.rst b/docs/workspace/compute/command_execution.rst index 916a48ba5..a5b94b5a5 100644 --- a/docs/workspace/compute/command_execution.rst +++ b/docs/workspace/compute/command_execution.rst @@ -4,8 +4,7 @@ .. py:class:: CommandExecutionAPI - This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters. This API - only supports (classic) all-purpose clusters. Serverless compute is not supported. + This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters. .. py:method:: cancel( [, cluster_id: Optional[str], command_id: Optional[str], context_id: Optional[str]]) -> Wait[CommandStatusResponse] diff --git a/docs/workspace/compute/index.rst b/docs/workspace/compute/index.rst index 858cf70ff..b13a21610 100644 --- a/docs/workspace/compute/index.rst +++ b/docs/workspace/compute/index.rst @@ -14,5 +14,4 @@ Use and configure compute for Databricks instance_pools instance_profiles libraries - policy_compliance_for_clusters policy_families \ No newline at end of file diff --git a/docs/workspace/compute/policy_families.rst b/docs/workspace/compute/policy_families.rst index 56e4f4275..43194ef01 100644 --- a/docs/workspace/compute/policy_families.rst +++ b/docs/workspace/compute/policy_families.rst @@ -14,7 +14,7 @@ policy family. Cluster policies created using a policy family inherit the policy family's policy definition. - .. py:method:: get(policy_family_id: str [, version: Optional[int]]) -> PolicyFamily + .. py:method:: get(policy_family_id: str) -> PolicyFamily Usage: @@ -32,12 +32,9 @@ Get policy family information. - Retrieve the information for an policy family based on its identifier and version + Retrieve the information for an policy family based on its identifier. :param policy_family_id: str - The family ID about which to retrieve information. - :param version: int (optional) - The version number for the family to fetch. Defaults to the latest version. :returns: :class:`PolicyFamily` @@ -58,11 +55,10 @@ List policy families. - Returns the list of policy definition types available to use at their latest version. This API is - paginated. + Retrieve a list of policy families. This API is paginated. :param max_results: int (optional) - Maximum number of policy families to return. + The max number of policy families to return. :param page_token: str (optional) A token that can be used to get the next page of results. diff --git a/docs/workspace/dashboards/index.rst b/docs/workspace/dashboards/index.rst index 6d1565bb6..756c9b549 100644 --- a/docs/workspace/dashboards/index.rst +++ b/docs/workspace/dashboards/index.rst @@ -7,5 +7,4 @@ Manage Lakeview dashboards .. toctree:: :maxdepth: 1 - genie lakeview \ No newline at end of file diff --git a/docs/workspace/dashboards/lakeview.rst b/docs/workspace/dashboards/lakeview.rst index fe358063c..70df23c18 100644 --- a/docs/workspace/dashboards/lakeview.rst +++ b/docs/workspace/dashboards/lakeview.rst @@ -17,14 +17,9 @@ The display name of the dashboard. :param parent_path: str (optional) The workspace path of the folder containing the dashboard. Includes leading slash and no trailing - slash. This field is excluded in List Dashboards responses. + slash. :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get + The contents of the dashboard in serialized string form. :param warehouse_id: str (optional) The warehouse ID used to run the dashboard. @@ -143,24 +138,6 @@ :returns: :class:`Subscription` - .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str], show_trashed: Optional[bool], view: Optional[DashboardView]]) -> Iterator[Dashboard] - - List dashboards. - - :param page_size: int (optional) - The number of dashboards to return per page. - :param page_token: str (optional) - A page token, received from a previous `ListDashboards` call. This token can be used to retrieve the - subsequent page. - :param show_trashed: bool (optional) - The flag to include dashboards located in the trash. If unspecified, only active dashboards will be - returned. - :param view: :class:`DashboardView` (optional) - `DASHBOARD_VIEW_BASIC`only includes summary metadata from the dashboard. - - :returns: Iterator over :class:`Dashboard` - - .. py:method:: list_schedules(dashboard_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Schedule] List dashboard schedules. @@ -262,14 +239,9 @@ The display name of the dashboard. :param etag: str (optional) The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard has - not been modified since the last read. This field is excluded in List Dashboards responses. + not been modified since the last read. :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get + The contents of the dashboard in serialized string form. :param warehouse_id: str (optional) The warehouse ID used to run the dashboard. diff --git a/docs/workspace/iam/permission_migration.rst b/docs/workspace/iam/permission_migration.rst index 8eef6e0e1..1ffd3ad56 100644 --- a/docs/workspace/iam/permission_migration.rst +++ b/docs/workspace/iam/permission_migration.rst @@ -4,14 +4,17 @@ .. py:class:: PermissionMigrationAPI - APIs for migrating acl permissions, used only by the ucx tool: https://github.com/databrickslabs/ucx + This spec contains undocumented permission migration APIs used in https://github.com/databrickslabs/ucx. - .. py:method:: migrate_permissions(workspace_id: int, from_workspace_group_name: str, to_account_group_name: str [, size: Optional[int]]) -> MigratePermissionsResponse + .. py:method:: migrate_permissions(workspace_id: int, from_workspace_group_name: str, to_account_group_name: str [, size: Optional[int]]) -> PermissionMigrationResponse Migrate Permissions. + Migrate a batch of permissions from a workspace local group to an account group. + :param workspace_id: int - WorkspaceId of the associated workspace where the permission migration will occur. + WorkspaceId of the associated workspace where the permission migration will occur. Both workspace + group and account group must be in this workspace. :param from_workspace_group_name: str The name of the workspace group that permissions will be migrated from. :param to_account_group_name: str @@ -19,5 +22,5 @@ :param size: int (optional) The maximum number of permissions that will be migrated. - :returns: :class:`MigratePermissionsResponse` + :returns: :class:`PermissionMigrationResponse` \ No newline at end of file diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index 1f2fd2851..47ff4f37f 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -7,8 +7,6 @@ Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints. - * **[Apps permissions](:service:apps)** — Manage which users can manage or use apps. - * **[Cluster permissions](:service:clusters)** — Manage which users can manage, restart, or attach to clusters. @@ -44,7 +42,7 @@ * **[Token permissions](:service:tokenmanagement)** — Manage which users can create or use tokens. * **[Workspace object permissions](:service:workspace)** — Manage which users can read, run, edit, or - manage alerts, dbsql-dashboards, directories, files, notebooks and queries. + manage directories, files, and notebooks. For the mapping of the required permissions for specific actions or abilities and other important information, see [Access Control]. @@ -80,9 +78,9 @@ object. :param request_object_type: str - The type of the request object. Can be one of the following: alerts, authorization, clusters, - cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools, - jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. + The type of the request object. Can be one of the following: authorization, clusters, + cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, + registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str The id of the request object. @@ -157,9 +155,9 @@ object. :param request_object_type: str - The type of the request object. Can be one of the following: alerts, authorization, clusters, - cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools, - jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. + The type of the request object. Can be one of the following: authorization, clusters, + cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, + registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str The id of the request object. :param access_control_list: List[:class:`AccessControlRequest`] (optional) @@ -175,9 +173,9 @@ root object. :param request_object_type: str - The type of the request object. Can be one of the following: alerts, authorization, clusters, - cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools, - jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. + The type of the request object. Can be one of the following: authorization, clusters, + cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, + registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str The id of the request object. :param access_control_list: List[:class:`AccessControlRequest`] (optional) diff --git a/docs/workspace/jobs/index.rst b/docs/workspace/jobs/index.rst index 0729f8dce..a8f242ea2 100644 --- a/docs/workspace/jobs/index.rst +++ b/docs/workspace/jobs/index.rst @@ -7,5 +7,4 @@ Schedule automated jobs on Databricks Workspaces .. toctree:: :maxdepth: 1 - jobs - policy_compliance_for_jobs \ No newline at end of file + jobs \ No newline at end of file diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index b097c94c8..32cfd55c4 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -120,7 +120,7 @@ .. py:method:: cancel_run_and_wait(run_id: int, timeout: datetime.timedelta = 0:20:00) -> Run - .. py:method:: create( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse + .. py:method:: create( [, access_control_list: Optional[List[iam.AccessControlRequest]], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse Usage: @@ -156,19 +156,15 @@ Create a new job. - :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) + :param access_control_list: List[:class:`AccessControlRequest`] (optional) List of permissions to set on the job. - :param budget_policy_id: str (optional) - The id of the user specified budget policy to use for this job. If not specified, a default budget - policy may be applied when creating or modifying the job. See `effective_budget_policy_id` for the - budget policy used by this workload. :param continuous: :class:`Continuous` (optional) An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used. :param deployment: :class:`JobDeployment` (optional) Deployment information for jobs managed by external sources. :param description: str (optional) - An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding. + An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding. :param edit_mode: :class:`JobEditMode` (optional) Edit mode of the job. @@ -178,10 +174,7 @@ An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) - A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + A list of task execution environment specifications that can be referenced by tasks of this job. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -218,11 +211,12 @@ :param queue: :class:`QueueSettings` (optional) The queue settings of the job. :param run_as: :class:`JobRunAs` (optional) - Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If - not specified, the job/pipeline runs as the user who created the job/pipeline. + Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or + service principal that the job runs as. If not specified, the job runs as the user who created the + job. - Exactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an - error is thrown. + Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is + thrown. :param schedule: :class:`CronSchedule` (optional) An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. @@ -382,7 +376,7 @@ :returns: :class:`JobPermissions` - .. py:method:: get_run(run_id: int [, include_history: Optional[bool], include_resolved_values: Optional[bool], page_token: Optional[str]]) -> Run + .. py:method:: get_run(run_id: int [, include_history: Optional[bool], include_resolved_values: Optional[bool]]) -> Run Usage: @@ -424,9 +418,6 @@ Whether to include the repair history in the response. :param include_resolved_values: bool (optional) Whether to include resolved parameter values in the response. - :param page_token: str (optional) - To list the next page or the previous page of job tasks, set this field to the value of the - `next_page_token` or `prev_page_token` returned in the GetJob response. :returns: :class:`Run` @@ -685,7 +676,6 @@ [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) - Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. @@ -875,7 +865,6 @@ [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) - Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. @@ -935,7 +924,7 @@ :returns: :class:`JobPermissions` - .. py:method:: submit( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications]]) -> Wait[Run] + .. py:method:: submit( [, access_control_list: Optional[List[iam.AccessControlRequest]], condition_task: Optional[ConditionTask], dbt_task: Optional[DbtTask], email_notifications: Optional[JobEmailNotifications], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notebook_task: Optional[NotebookTask], notification_settings: Optional[JobNotificationSettings], pipeline_task: Optional[PipelineTask], python_wheel_task: Optional[PythonWheelTask], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_job_task: Optional[RunJobTask], run_name: Optional[str], spark_jar_task: Optional[SparkJarTask], spark_python_task: Optional[SparkPythonTask], spark_submit_task: Optional[SparkSubmitTask], sql_task: Optional[SqlTask], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications]]) -> Wait[Run] Usage: @@ -971,15 +960,16 @@ Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted. - :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) + :param access_control_list: List[:class:`AccessControlRequest`] (optional) List of permissions to set on the job. - :param budget_policy_id: str (optional) - The user specified id of the budget policy to use for this one-time run. If not specified, the run - will be not be attributed to any budget policy. + :param condition_task: :class:`ConditionTask` (optional) + If condition_task, specifies a condition with an outcome that can be used to control the execution + of other tasks. Does not require a cluster to execute and does not support retries or notifications. + :param dbt_task: :class:`DbtTask` (optional) + If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the + ability to use a serverless or a pro SQL warehouse. :param email_notifications: :class:`JobEmailNotifications` (optional) An optional set of email addresses notified when the run begins or completes. - :param environments: List[:class:`JobEnvironment`] (optional) - A list of task execution environment specifications that can be referenced by tasks of this run. :param git_source: :class:`GitSource` (optional) An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. @@ -1004,16 +994,47 @@ For more information, see [How to ensure idempotency for jobs]. [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html + :param notebook_task: :class:`NotebookTask` (optional) + If notebook_task, indicates that this task must run a notebook. This field may not be specified in + conjunction with spark_jar_task. :param notification_settings: :class:`JobNotificationSettings` (optional) Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this run. + :param pipeline_task: :class:`PipelineTask` (optional) + If pipeline_task, indicates that this task must execute a Pipeline. + :param python_wheel_task: :class:`PythonWheelTask` (optional) + If python_wheel_task, indicates that this job must execute a PythonWheel. :param queue: :class:`QueueSettings` (optional) The queue settings of the one-time run. :param run_as: :class:`JobRunAs` (optional) Specifies the user or service principal that the job runs as. If not specified, the job runs as the user who submits the request. + :param run_job_task: :class:`RunJobTask` (optional) + If run_job_task, indicates that this task must execute another job. :param run_name: str (optional) An optional name for the run. The default value is `Untitled`. + :param spark_jar_task: :class:`SparkJarTask` (optional) + If spark_jar_task, indicates that this task must run a JAR. + :param spark_python_task: :class:`SparkPythonTask` (optional) + If spark_python_task, indicates that this task must run a Python file. + :param spark_submit_task: :class:`SparkSubmitTask` (optional) + If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This + task can run only on new clusters. + + In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use + `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark + configurations. + + `master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you + _cannot_ specify them in parameters. + + By default, the Spark submit job uses all available memory (excluding reserved memory for Databricks + services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some + room for off-heap usage. + + The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths. + :param sql_task: :class:`SqlTask` (optional) + If sql_task, indicates that this job must execute a SQL task. :param tasks: List[:class:`SubmitTask`] (optional) :param timeout_seconds: int (optional) An optional timeout applied to each run of this job. A value of `0` means no timeout. @@ -1025,7 +1046,7 @@ See :method:wait_get_run_job_terminated_or_skipped for more details. - .. py:method:: submit_and_wait( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications], timeout: datetime.timedelta = 0:20:00]) -> Run + .. py:method:: submit_and_wait( [, access_control_list: Optional[List[iam.AccessControlRequest]], condition_task: Optional[ConditionTask], dbt_task: Optional[DbtTask], email_notifications: Optional[JobEmailNotifications], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notebook_task: Optional[NotebookTask], notification_settings: Optional[JobNotificationSettings], pipeline_task: Optional[PipelineTask], python_wheel_task: Optional[PythonWheelTask], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_job_task: Optional[RunJobTask], run_name: Optional[str], spark_jar_task: Optional[SparkJarTask], spark_python_task: Optional[SparkPythonTask], spark_submit_task: Optional[SparkSubmitTask], sql_task: Optional[SqlTask], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications], timeout: datetime.timedelta = 0:20:00]) -> Run .. py:method:: update(job_id: int [, fields_to_remove: Optional[List[str]], new_settings: Optional[JobSettings]]) diff --git a/docs/workspace/marketplace/consumer_listings.rst b/docs/workspace/marketplace/consumer_listings.rst index 242a8fce7..654fe82d4 100644 --- a/docs/workspace/marketplace/consumer_listings.rst +++ b/docs/workspace/marketplace/consumer_listings.rst @@ -29,7 +29,7 @@ :returns: :class:`GetListingResponse` - .. py:method:: list( [, assets: Optional[List[AssetType]], categories: Optional[List[Category]], is_free: Optional[bool], is_private_exchange: Optional[bool], is_staff_pick: Optional[bool], page_size: Optional[int], page_token: Optional[str], provider_ids: Optional[List[str]], tags: Optional[List[ListingTag]]]) -> Iterator[Listing] + .. py:method:: list( [, assets: Optional[List[AssetType]], categories: Optional[List[Category]], is_ascending: Optional[bool], is_free: Optional[bool], is_private_exchange: Optional[bool], is_staff_pick: Optional[bool], page_size: Optional[int], page_token: Optional[str], provider_ids: Optional[List[str]], sort_by: Optional[SortBy], tags: Optional[List[ListingTag]]]) -> Iterator[Listing] List listings. @@ -39,6 +39,7 @@ Matches any of the following asset types :param categories: List[:class:`Category`] (optional) Matches any of the following categories + :param is_ascending: bool (optional) :param is_free: bool (optional) Filters each listing based on if it is free. :param is_private_exchange: bool (optional) @@ -49,13 +50,15 @@ :param page_token: str (optional) :param provider_ids: List[str] (optional) Matches any of the following provider ids + :param sort_by: :class:`SortBy` (optional) + Criteria for sorting the resulting set of listings. :param tags: List[:class:`ListingTag`] (optional) Matches any of the following tags :returns: Iterator over :class:`Listing` - .. py:method:: search(query: str [, assets: Optional[List[AssetType]], categories: Optional[List[Category]], is_free: Optional[bool], is_private_exchange: Optional[bool], page_size: Optional[int], page_token: Optional[str], provider_ids: Optional[List[str]]]) -> Iterator[Listing] + .. py:method:: search(query: str [, assets: Optional[List[AssetType]], categories: Optional[List[Category]], is_ascending: Optional[bool], is_free: Optional[bool], is_private_exchange: Optional[bool], page_size: Optional[int], page_token: Optional[str], provider_ids: Optional[List[str]], sort_by: Optional[SortBy]]) -> Iterator[Listing] Search listings. @@ -68,12 +71,14 @@ Matches any of the following asset types :param categories: List[:class:`Category`] (optional) Matches any of the following categories + :param is_ascending: bool (optional) :param is_free: bool (optional) :param is_private_exchange: bool (optional) :param page_size: int (optional) :param page_token: str (optional) :param provider_ids: List[str] (optional) Matches any of the following provider ids + :param sort_by: :class:`SortBy` (optional) :returns: Iterator over :class:`Listing` \ No newline at end of file diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index c09cfe353..1ada6b1e5 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -270,16 +270,10 @@ Get all artifacts. List artifacts for a run. Takes an optional `artifact_path` prefix. If it is specified, the response - contains only artifacts with the specified prefix. This API does not support pagination when listing - artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call - `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports - pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). + contains only artifacts with the specified prefix.", :param page_token: str (optional) - Token indicating the page of artifact results to fetch. `page_token` is not supported when listing - artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call - `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports - pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). + Token indicating the page of artifact results to fetch :param path: str (optional) Filter artifacts matching this path (a relative path from the root artifact directory). :param run_id: str (optional) diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index 9801a200e..a80e7c799 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -15,7 +15,7 @@ also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations. - .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse + .. py:method:: create( [, allow_duplicate_names: Optional[bool], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[ManagedIngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse Usage: @@ -55,8 +55,6 @@ :param allow_duplicate_names: bool (optional) If false, deployment will fail if name conflicts with that of another pipeline. - :param budget_policy_id: str (optional) - Budget policy of this pipeline. :param catalog: str (optional) A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, @@ -82,7 +80,7 @@ The definition of a gateway pipeline to support CDC. :param id: str (optional) Unique identifier for this pipeline. - :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional) + :param ingestion_definition: :class:`ManagedIngestionPipelineDefinition` (optional) The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings. :param libraries: List[:class:`PipelineLibrary`] (optional) @@ -93,9 +91,6 @@ List of notification settings for this pipeline. :param photon: bool (optional) Whether Photon is enabled for this pipeline. - :param schema: str (optional) - The default schema (database) where tables are read from or published to. The presence of this field - implies that the pipeline is in direct publishing mode. :param serverless: bool (optional) Whether serverless compute is enabled for this pipeline. :param storage: str (optional) @@ -376,7 +371,7 @@ .. py:method:: stop_and_wait(pipeline_id: str, timeout: datetime.timedelta = 0:20:00) -> GetPipelineResponse - .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) + .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[ManagedIngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) Usage: @@ -430,8 +425,6 @@ Unique identifier for this pipeline. :param allow_duplicate_names: bool (optional) If false, deployment will fail if name has changed and conflicts the name of another pipeline. - :param budget_policy_id: str (optional) - Budget policy of this pipeline. :param catalog: str (optional) A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, @@ -459,7 +452,7 @@ The definition of a gateway pipeline to support CDC. :param id: str (optional) Unique identifier for this pipeline. - :param ingestion_definition: :class:`IngestionPipelineDefinition` (optional) + :param ingestion_definition: :class:`ManagedIngestionPipelineDefinition` (optional) The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings. :param libraries: List[:class:`PipelineLibrary`] (optional) @@ -470,9 +463,6 @@ List of notification settings for this pipeline. :param photon: bool (optional) Whether Photon is enabled for this pipeline. - :param schema: str (optional) - The default schema (database) where tables are read from or published to. The presence of this field - implies that the pipeline is in direct publishing mode. :param serverless: bool (optional) Whether serverless compute is enabled for this pipeline. :param storage: str (optional) diff --git a/docs/workspace/serving/index.rst b/docs/workspace/serving/index.rst index 7a39a4043..76733d63d 100644 --- a/docs/workspace/serving/index.rst +++ b/docs/workspace/serving/index.rst @@ -7,5 +7,4 @@ Use real-time inference for machine learning .. toctree:: :maxdepth: 1 - serving_endpoints - serving_endpoints_data_plane \ No newline at end of file + serving_endpoints \ No newline at end of file diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index cbcbca964..9244f333a 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -2,7 +2,7 @@ ========================================== .. currentmodule:: databricks.sdk.service.serving -.. py:class:: ServingEndpointsExt +.. py:class:: ServingEndpointsAPI The Serving Endpoints API allows you to create, update, and delete model serving endpoints. @@ -29,7 +29,7 @@ :returns: :class:`BuildLogsResponse` - .. py:method:: create(name: str, config: EndpointCoreConfigInput [, ai_gateway: Optional[AiGatewayConfig], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] + .. py:method:: create(name: str, config: EndpointCoreConfigInput [, rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new serving endpoint. @@ -38,12 +38,9 @@ workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. :param config: :class:`EndpointCoreConfigInput` The core config of the serving endpoint. - :param ai_gateway: :class:`AiGatewayConfig` (optional) - The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are - supported as of now. :param rate_limits: List[:class:`RateLimit`] (optional) - Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI - Gateway to manage rate limits. + Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model + endpoints are supported as of now. :param route_optimized: bool (optional) Enable route optimization for the serving endpoint. :param tags: List[:class:`EndpointTag`] (optional) @@ -54,7 +51,7 @@ See :method:wait_get_serving_endpoint_not_updating for more details. - .. py:method:: create_and_wait(name: str, config: EndpointCoreConfigInput [, ai_gateway: Optional[AiGatewayConfig], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: create_and_wait(name: str, config: EndpointCoreConfigInput [, rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed .. py:method:: delete(name: str) @@ -92,12 +89,6 @@ :returns: :class:`ServingEndpointDetailed` - .. py:method:: get_langchain_chat_open_ai_client(model) - - - .. py:method:: get_open_ai_client() - - .. py:method:: get_open_api(name: str) Get the schema for a serving endpoint. @@ -177,8 +168,8 @@ Update rate limits of a serving endpoint. - Used to update the rate limits of a serving endpoint. NOTE: Only foundation model endpoints are - currently supported. For external models, use AI Gateway to manage rate limits. + Used to update the rate limits of a serving endpoint. NOTE: only external and foundation model + endpoints are supported as of now. :param name: str The name of the serving endpoint whose rate limits are being updated. This field is required. @@ -188,29 +179,6 @@ :returns: :class:`PutResponse` - .. py:method:: put_ai_gateway(name: str [, guardrails: Optional[AiGatewayGuardrails], inference_table_config: Optional[AiGatewayInferenceTableConfig], rate_limits: Optional[List[AiGatewayRateLimit]], usage_tracking_config: Optional[AiGatewayUsageTrackingConfig]]) -> PutAiGatewayResponse - - Update AI Gateway of a serving endpoint. - - Used to update the AI Gateway of a serving endpoint. NOTE: Only external model endpoints are currently - supported. - - :param name: str - The name of the serving endpoint whose AI Gateway is being updated. This field is required. - :param guardrails: :class:`AiGatewayGuardrails` (optional) - Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses. - :param inference_table_config: :class:`AiGatewayInferenceTableConfig` (optional) - Configuration for payload logging using inference tables. Use these tables to monitor and audit data - being sent to and received from model APIs and to improve model quality. - :param rate_limits: List[:class:`AiGatewayRateLimit`] (optional) - Configuration for rate limits which can be set to limit endpoint traffic. - :param usage_tracking_config: :class:`AiGatewayUsageTrackingConfig` (optional) - Configuration to enable usage tracking using system tables. These tables allow you to monitor - operational usage on endpoints and their associated costs. - - :returns: :class:`PutAiGatewayResponse` - - .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse Query a serving endpoint. diff --git a/docs/workspace/settings/index.rst b/docs/workspace/settings/index.rst index 22655853b..5b56652ec 100644 --- a/docs/workspace/settings/index.rst +++ b/docs/workspace/settings/index.rst @@ -9,13 +9,10 @@ Manage security settings for Accounts and Workspaces credentials_manager ip_access_lists - notification_destinations settings automatic_cluster_update compliance_security_profile default_namespace - disable_legacy_access - disable_legacy_dbfs enhanced_security_monitoring restrict_workspace_admins token_management diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index 588031926..55f47dae0 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -34,22 +34,6 @@ This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. - .. py:property:: disable_legacy_access - :type: DisableLegacyAccessAPI - - 'Disabling legacy access' has the following impacts: - - 1. Disables direct access to the Hive Metastore. However, you can still access Hive Metastore through HMS - Federation. 2. Disables Fallback Mode (docs link) on any External Location access from the workspace. 3. - Alters DBFS path access to use External Location permissions in place of legacy credentials. 4. Enforces - Unity Catalog access on all path based access. - - .. py:property:: disable_legacy_dbfs - :type: DisableLegacyDbfsAPI - - When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new - mounts). When the setting is off, all DBFS functionality is enabled - .. py:property:: enhanced_security_monitoring :type: EnhancedSecurityMonitoringAPI diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 7cf398ac0..1382b5a92 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -100,7 +100,7 @@ :returns: :class:`ProviderInfo` - .. py:method:: list( [, data_provider_global_metastore_id: Optional[str], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ProviderInfo] + .. py:method:: list( [, data_provider_global_metastore_id: Optional[str]]) -> Iterator[ProviderInfo] Usage: @@ -123,21 +123,11 @@ :param data_provider_global_metastore_id: str (optional) If not provided, all providers will be returned. If no providers exist with this ID, no results will be returned. - :param max_results: int (optional) - Maximum number of providers to return. - when set to 0, the page length is set to a server - configured value (recommended); - when set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - when set to a value less than 0, an invalid parameter - error is returned; - If not set, all valid providers are returned (not recommended). - Note: The - number of returned providers might be less than the specified max_results size, even zero. The only - definitive indication that no further providers can be fetched is when the next_page_token is unset - from the response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: Iterator over :class:`ProviderInfo` - .. py:method:: list_shares(name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ProviderShare] + .. py:method:: list_shares(name: str) -> Iterator[ProviderShare] Usage: @@ -172,16 +162,6 @@ :param name: str Name of the provider in which to list shares. - :param max_results: int (optional) - Maximum number of shares to return. - when set to 0, the page length is set to a server configured - value (recommended); - when set to a value greater than 0, the page length is the minimum of this - value and a server configured value; - when set to a value less than 0, an invalid parameter error - is returned; - If not set, all valid shares are returned (not recommended). - Note: The number of - returned shares might be less than the specified max_results size, even zero. The only definitive - indication that no further shares can be fetched is when the next_page_token is unset from the - response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: Iterator over :class:`ProviderShare` diff --git a/docs/workspace/sharing/recipients.rst b/docs/workspace/sharing/recipients.rst index 44f2042bb..86a004d36 100644 --- a/docs/workspace/sharing/recipients.rst +++ b/docs/workspace/sharing/recipients.rst @@ -18,7 +18,7 @@ recipient follows the activation link to download the credential file, and then uses the credential file to establish a secure connection to receive the shared data. This sharing mode is called **open sharing**. - .. py:method:: create(name: str, authentication_type: AuthenticationType [, comment: Optional[str], data_recipient_global_metastore_id: Optional[str], expiration_time: Optional[int], ip_access_list: Optional[IpAccessList], owner: Optional[str], properties_kvpairs: Optional[SecurablePropertiesKvPairs], sharing_code: Optional[str]]) -> RecipientInfo + .. py:method:: create(name: str, authentication_type: AuthenticationType [, comment: Optional[str], data_recipient_global_metastore_id: Optional[str], ip_access_list: Optional[IpAccessList], owner: Optional[str], properties_kvpairs: Optional[SecurablePropertiesKvPairs], sharing_code: Optional[str]]) -> RecipientInfo Usage: @@ -51,8 +51,6 @@ The global Unity Catalog metastore id provided by the data recipient. This field is required when the __authentication_type__ is **DATABRICKS**. The identifier is of format __cloud__:__region__:__metastore-uuid__. - :param expiration_time: int (optional) - Expiration timestamp of the token, in epoch milliseconds. :param ip_access_list: :class:`IpAccessList` (optional) IP Access List :param owner: str (optional) @@ -110,7 +108,7 @@ :returns: :class:`RecipientInfo` - .. py:method:: list( [, data_recipient_global_metastore_id: Optional[str], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[RecipientInfo] + .. py:method:: list( [, data_recipient_global_metastore_id: Optional[str]]) -> Iterator[RecipientInfo] Usage: @@ -134,16 +132,6 @@ :param data_recipient_global_metastore_id: str (optional) If not provided, all recipients will be returned. If no recipients exist with this ID, no results will be returned. - :param max_results: int (optional) - Maximum number of recipients to return. - when set to 0, the page length is set to a server - configured value (recommended); - when set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - when set to a value less than 0, an invalid parameter - error is returned; - If not set, all valid recipients are returned (not recommended). - Note: The - number of returned recipients might be less than the specified max_results size, even zero. The only - definitive indication that no further recipients can be fetched is when the next_page_token is unset - from the response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: Iterator over :class:`RecipientInfo` @@ -183,7 +171,7 @@ :returns: :class:`RecipientInfo` - .. py:method:: share_permissions(name: str [, max_results: Optional[int], page_token: Optional[str]]) -> GetRecipientSharePermissionsResponse + .. py:method:: share_permissions(name: str) -> GetRecipientSharePermissionsResponse Usage: @@ -210,21 +198,11 @@ :param name: str The name of the Recipient. - :param max_results: int (optional) - Maximum number of permissions to return. - when set to 0, the page length is set to a server - configured value (recommended); - when set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - when set to a value less than 0, an invalid parameter - error is returned; - If not set, all valid permissions are returned (not recommended). - Note: The - number of returned permissions might be less than the specified max_results size, even zero. The - only definitive indication that no further permissions can be fetched is when the next_page_token is - unset from the response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: :class:`GetRecipientSharePermissionsResponse` - .. py:method:: update(name: str [, comment: Optional[str], expiration_time: Optional[int], ip_access_list: Optional[IpAccessList], new_name: Optional[str], owner: Optional[str], properties_kvpairs: Optional[SecurablePropertiesKvPairs]]) + .. py:method:: update(name: str [, comment: Optional[str], ip_access_list: Optional[IpAccessList], new_name: Optional[str], owner: Optional[str], properties_kvpairs: Optional[SecurablePropertiesKvPairs]]) Usage: @@ -254,8 +232,6 @@ Name of the recipient. :param comment: str (optional) Description about the recipient. - :param expiration_time: int (optional) - Expiration timestamp of the token, in epoch milliseconds. :param ip_access_list: :class:`IpAccessList` (optional) IP Access List :param new_name: str (optional) diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 4d14b811d..dd917f541 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -87,7 +87,7 @@ :returns: :class:`ShareInfo` - .. py:method:: list( [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ShareInfo] + .. py:method:: list() -> Iterator[ShareInfo] Usage: @@ -106,21 +106,10 @@ Gets an array of data object shares from the metastore. The caller must be a metastore admin or the owner of the share. There is no guarantee of a specific ordering of the elements in the array. - :param max_results: int (optional) - Maximum number of shares to return. - when set to 0, the page length is set to a server configured - value (recommended); - when set to a value greater than 0, the page length is the minimum of this - value and a server configured value; - when set to a value less than 0, an invalid parameter error - is returned; - If not set, all valid shares are returned (not recommended). - Note: The number of - returned shares might be less than the specified max_results size, even zero. The only definitive - indication that no further shares can be fetched is when the next_page_token is unset from the - response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. - :returns: Iterator over :class:`ShareInfo` - .. py:method:: share_permissions(name: str [, max_results: Optional[int], page_token: Optional[str]]) -> catalog.PermissionsList + .. py:method:: share_permissions(name: str) -> catalog.PermissionsList Get permissions. @@ -129,16 +118,6 @@ :param name: str The name of the share. - :param max_results: int (optional) - Maximum number of permissions to return. - when set to 0, the page length is set to a server - configured value (recommended); - when set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - when set to a value less than 0, an invalid parameter - error is returned; - If not set, all valid permissions are returned (not recommended). - Note: The - number of returned permissions might be less than the specified max_results size, even zero. The - only definitive indication that no further permissions can be fetched is when the next_page_token is - unset from the response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. :returns: :class:`PermissionsList` @@ -222,7 +201,7 @@ :returns: :class:`ShareInfo` - .. py:method:: update_permissions(name: str [, changes: Optional[List[catalog.PermissionsChange]], max_results: Optional[int], page_token: Optional[str]]) + .. py:method:: update_permissions(name: str [, changes: Optional[List[catalog.PermissionsChange]]]) Update permissions. @@ -236,16 +215,6 @@ The name of the share. :param changes: List[:class:`PermissionsChange`] (optional) Array of permission changes. - :param max_results: int (optional) - Maximum number of permissions to return. - when set to 0, the page length is set to a server - configured value (recommended); - when set to a value greater than 0, the page length is the minimum - of this value and a server configured value; - when set to a value less than 0, an invalid parameter - error is returned; - If not set, all valid permissions are returned (not recommended). - Note: The - number of returned permissions might be less than the specified max_results size, even zero. The - only definitive indication that no further permissions can be fetched is when the next_page_token is - unset from the response. - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. \ No newline at end of file diff --git a/docs/workspace/sql/alerts.rst b/docs/workspace/sql/alerts.rst index c552d5f80..95f846b24 100644 --- a/docs/workspace/sql/alerts.rst +++ b/docs/workspace/sql/alerts.rst @@ -9,7 +9,7 @@ notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - .. py:method:: create( [, alert: Optional[CreateAlertRequestAlert]]) -> Alert + .. py:method:: create(name: str, options: AlertOptions, query_id: str [, parent: Optional[str], rearm: Optional[int]]) -> Alert Usage: @@ -46,27 +46,37 @@ Create an alert. - Creates an alert. - - :param alert: :class:`CreateAlertRequestAlert` (optional) + Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a + condition of its result, and notifies users or notification destinations if the condition was met. + + :param name: str + Name of the alert. + :param options: :class:`AlertOptions` + Alert configuration options. + :param query_id: str + Query ID. + :param parent: str (optional) + The identifier of the workspace folder containing the object. + :param rearm: int (optional) + Number of seconds after being triggered before the alert rearms itself and can be triggered again. + If `null`, alert will never be triggered again. :returns: :class:`Alert` - .. py:method:: delete(id: str) + .. py:method:: delete(alert_id: str) Delete an alert. - Moves an alert to the trash. Trashed alerts immediately disappear from searches and list views, and - can no longer trigger. You can restore a trashed alert through the UI. A trashed alert is permanently - deleted after 30 days. + Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note:** Unlike + queries and dashboards, alerts cannot be moved to the trash. - :param id: str + :param alert_id: str - .. py:method:: get(id: str) -> Alert + .. py:method:: get(alert_id: str) -> Alert Usage: @@ -107,12 +117,12 @@ Gets an alert. - :param id: str + :param alert_id: str :returns: :class:`Alert` - .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ListAlertsResponseAlert] + .. py:method:: list() -> Iterator[Alert] Usage: @@ -126,18 +136,14 @@ all = w.alerts.list(sql.ListAlertsRequest()) - List alerts. - - Gets a list of alerts accessible to the user, ordered by creation time. **Warning:** Calling this API - concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. + Get alerts. - :param page_size: int (optional) - :param page_token: str (optional) + Gets a list of alerts. - :returns: Iterator over :class:`ListAlertsResponseAlert` + :returns: Iterator over :class:`Alert` - .. py:method:: update(id: str, update_mask: str [, alert: Optional[UpdateAlertRequestAlert]]) -> Alert + .. py:method:: update(alert_id: str, name: str, options: AlertOptions, query_id: str [, rearm: Optional[int]]) Usage: @@ -180,12 +186,16 @@ Updates an alert. - :param id: str - :param update_mask: str - Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the - setting payload will be updated. The field mask needs to be supplied as single string. To specify - multiple fields in the field mask, use comma as the separator (no space). - :param alert: :class:`UpdateAlertRequestAlert` (optional) + :param alert_id: str + :param name: str + Name of the alert. + :param options: :class:`AlertOptions` + Alert configuration options. + :param query_id: str + Query ID. + :param rearm: int (optional) + Number of seconds after being triggered before the alert rearms itself and can be triggered again. + If `null`, alert will never be triggered again. + - :returns: :class:`Alert` \ No newline at end of file diff --git a/docs/workspace/sql/dashboards.rst b/docs/workspace/sql/dashboards.rst index 97ea1014d..a59e625f1 100644 --- a/docs/workspace/sql/dashboards.rst +++ b/docs/workspace/sql/dashboards.rst @@ -123,8 +123,8 @@ Fetch a paginated list of dashboard objects. - **Warning**: Calling this API concurrently 10 or more times could result in throttling, service - degradation, or a temporary ban. + ### **Warning: Calling this API concurrently 10 or more times could result in throttling, service + degradation, or a temporary ban.** :param order: :class:`ListOrder` (optional) Name of dashboard attribute to order by. diff --git a/docs/workspace/sql/data_sources.rst b/docs/workspace/sql/data_sources.rst index 8f7321fa0..3006e5e0b 100644 --- a/docs/workspace/sql/data_sources.rst +++ b/docs/workspace/sql/data_sources.rst @@ -11,10 +11,6 @@ This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL. - - **Note**: A new version of the Databricks SQL API is now available. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: list() -> Iterator[DataSource] @@ -35,10 +31,5 @@ API response are enumerated for clarity. However, you need only a SQL warehouse's `id` to create new queries against it. - **Note**: A new version of the Databricks SQL API is now available. Please use :method:warehouses/list - instead. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - :returns: Iterator over :class:`DataSource` \ No newline at end of file diff --git a/docs/workspace/sql/dbsql_permissions.rst b/docs/workspace/sql/dbsql_permissions.rst index 7f9e5d19c..07aa4f00f 100644 --- a/docs/workspace/sql/dbsql_permissions.rst +++ b/docs/workspace/sql/dbsql_permissions.rst @@ -15,10 +15,6 @@ - `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`) - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`) - - **Note**: A new version of the Databricks SQL API is now available. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: get(object_type: ObjectTypePlural, object_id: str) -> GetResponse @@ -26,11 +22,6 @@ Gets a JSON representation of the access control list (ACL) for a specified object. - **Note**: A new version of the Databricks SQL API is now available. Please use - :method:workspace/getpermissions instead. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - :param object_type: :class:`ObjectTypePlural` The type of object permissions to check. :param object_id: str @@ -46,11 +37,6 @@ Sets the access control list (ACL) for a specified object. This operation will complete rewrite the ACL. - **Note**: A new version of the Databricks SQL API is now available. Please use - :method:workspace/setpermissions instead. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - :param object_type: :class:`ObjectTypePlural` The type of object permission to set. :param object_id: str @@ -66,11 +52,6 @@ Transfers ownership of a dashboard, query, or alert to an active user. Requires an admin API key. - **Note**: A new version of the Databricks SQL API is now available. For queries and alerts, please use - :method:queries/update and :method:alerts/update respectively instead. [Learn more] - - [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - :param object_type: :class:`OwnableObjectType` The type of object on which to change ownership. :param object_id: :class:`TransferOwnershipObjectId` diff --git a/docs/workspace/sql/index.rst b/docs/workspace/sql/index.rst index 728730209..397de5c72 100644 --- a/docs/workspace/sql/index.rst +++ b/docs/workspace/sql/index.rst @@ -8,15 +8,12 @@ Manage Databricks SQL assets, including warehouses, dashboards, queries and quer :maxdepth: 1 alerts - alerts_legacy dashboard_widgets dashboards data_sources dbsql_permissions queries - queries_legacy query_history query_visualizations - query_visualizations_legacy statement_execution warehouses \ No newline at end of file diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 1f01c2f1d..45a8ed4e2 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -4,11 +4,11 @@ .. py:class:: QueriesAPI - The queries API can be used to perform CRUD operations on queries. A query is a Databricks SQL object that - includes the target SQL warehouse, query text, name, description, tags, and parameters. Queries can be + These endpoints are used for CRUD operations on query definitions. Query definitions include the target + SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - .. py:method:: create( [, query: Optional[CreateQueryRequestQuery]]) -> Query + .. py:method:: create( [, data_source_id: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], parent: Optional[str], query: Optional[str], run_as_role: Optional[RunAsRole], tags: Optional[List[str]]]) -> Query Usage: @@ -32,29 +32,55 @@ # cleanup w.queries.delete(id=query.id) - Create a query. - - Creates a query. - - :param query: :class:`CreateQueryRequestQuery` (optional) + Create a new query definition. + + Creates a new query definition. Queries created with this endpoint belong to the authenticated user + making the request. + + The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can + use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the + `data_source_id` from an existing query. + + **Note**: You cannot add a visualization until you create the query. + + :param data_source_id: str (optional) + Data source ID maps to the ID of the data source used by the resource and is distinct from the + warehouse ID. [Learn more]. + + [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + :param description: str (optional) + General description that conveys additional information about this query such as usage notes. + :param name: str (optional) + The title of this query that appears in list views, widget headings, and on the query page. + :param options: Any (optional) + Exclusively used for storing a list parameter definitions. A parameter is an object with `title`, + `name`, `type`, and `value` properties. The `value` field here is the default value. It can be + overridden at runtime. + :param parent: str (optional) + The identifier of the workspace folder containing the object. + :param query: str (optional) + The text of the query to be run. + :param run_as_role: :class:`RunAsRole` (optional) + Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as + viewer" behavior) or `"owner"` (signifying "run as owner" behavior) + :param tags: List[str] (optional) :returns: :class:`Query` - .. py:method:: delete(id: str) + .. py:method:: delete(query_id: str) Delete a query. Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and - cannot be used for alerts. You can restore a trashed query through the UI. A trashed query is - permanently deleted after 30 days. + they cannot be used for alerts. The trash is deleted after 30 days. - :param id: str + :param query_id: str - .. py:method:: get(id: str) -> Query + .. py:method:: get(query_id: str) -> Query Usage: @@ -80,42 +106,62 @@ # cleanup w.queries.delete(id=query.id) - Get a query. + Get a query definition. - Gets a query. + Retrieve a query object definition along with contextual permissions information about the currently + authenticated user. - :param id: str + :param query_id: str :returns: :class:`Query` - .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ListQueryObjectsResponseQuery] + .. py:method:: list( [, order: Optional[str], page: Optional[int], page_size: Optional[int], q: Optional[str]]) -> Iterator[Query] - List queries. + Get a list of queries. - Gets a list of queries accessible to the user, ordered by creation time. **Warning:** Calling this API - concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. + Gets a list of queries. Optionally, this list can be filtered by a search term. + ### **Warning: Calling this API concurrently 10 or more times could result in throttling, service + degradation, or a temporary ban.** + + :param order: str (optional) + Name of query attribute to order by. Default sort order is ascending. Append a dash (`-`) to order + descending instead. + + - `name`: The name of the query. + + - `created_at`: The timestamp the query was created. + + - `runtime`: The time it took to run this query. This is blank for parameterized queries. A blank + value is treated as the highest value for sorting. + + - `executed_at`: The timestamp when the query was last run. + + - `created_by`: The user name of the user that created the query. + :param page: int (optional) + Page number to retrieve. :param page_size: int (optional) - :param page_token: str (optional) + Number of queries to return per page. + :param q: str (optional) + Full text search term - :returns: Iterator over :class:`ListQueryObjectsResponseQuery` + :returns: Iterator over :class:`Query` - .. py:method:: list_visualizations(id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Visualization] + .. py:method:: restore(query_id: str) - List visualizations on a query. + Restore a query. - Gets a list of visualizations on a query. + Restore a query that has been moved to the trash. A restored query appears in list views and searches. + You can use restored queries for alerts. + + :param query_id: str - :param id: str - :param page_size: int (optional) - :param page_token: str (optional) - :returns: Iterator over :class:`Visualization` - .. py:method:: update(id: str, update_mask: str [, query: Optional[UpdateQueryRequestQuery]]) -> Query + .. py:method:: update(query_id: str [, data_source_id: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], query: Optional[str], run_as_role: Optional[RunAsRole], tags: Optional[List[str]]]) -> Query Usage: @@ -145,16 +191,32 @@ # cleanup w.queries.delete(id=query.id) - Update a query. - - Updates a query. - - :param id: str - :param update_mask: str - Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the - setting payload will be updated. The field mask needs to be supplied as single string. To specify - multiple fields in the field mask, use comma as the separator (no space). - :param query: :class:`UpdateQueryRequestQuery` (optional) + Change a query definition. + + Modify this query definition. + + **Note**: You cannot undo this operation. + + :param query_id: str + :param data_source_id: str (optional) + Data source ID maps to the ID of the data source used by the resource and is distinct from the + warehouse ID. [Learn more]. + + [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + :param description: str (optional) + General description that conveys additional information about this query such as usage notes. + :param name: str (optional) + The title of this query that appears in list views, widget headings, and on the query page. + :param options: Any (optional) + Exclusively used for storing a list parameter definitions. A parameter is an object with `title`, + `name`, `type`, and `value` properties. The `value` field here is the default value. It can be + overridden at runtime. + :param query: str (optional) + The text of the query to be run. + :param run_as_role: :class:`RunAsRole` (optional) + Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as + viewer" behavior) or `"owner"` (signifying "run as owner" behavior) + :param tags: List[str] (optional) :returns: :class:`Query` \ No newline at end of file diff --git a/docs/workspace/sql/query_history.rst b/docs/workspace/sql/query_history.rst index 2f5520cdf..6aacd3c78 100644 --- a/docs/workspace/sql/query_history.rst +++ b/docs/workspace/sql/query_history.rst @@ -4,10 +4,9 @@ .. py:class:: QueryHistoryAPI - A service responsible for storing and retrieving the list of queries run against SQL endpoints and - serverless compute. + Access the history of queries through SQL warehouses. - .. py:method:: list( [, filter_by: Optional[QueryFilter], include_metrics: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> ListQueriesResponse + .. py:method:: list( [, filter_by: Optional[QueryFilter], include_metrics: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[QueryInfo] Usage: @@ -24,23 +23,20 @@ List Queries. - List the history of queries through SQL warehouses, and serverless compute. + List the history of queries through SQL warehouses. - You can filter by user ID, warehouse ID, status, and time range. Most recently started queries are - returned first (up to max_results in request). The pagination token returned in response can be used - to list subsequent query statuses. + You can filter by user ID, warehouse ID, status, and time range. :param filter_by: :class:`QueryFilter` (optional) A filter to limit query history results. This field is optional. :param include_metrics: bool (optional) - Whether to include the query metrics with each query. Only use this for a small subset of queries - (max_results). Defaults to false. + Whether to include metrics about query. :param max_results: int (optional) - Limit the number of results returned in one page. Must be less than 1000 and the default is 100. + Limit the number of results returned in one page. The default is 100. :param page_token: str (optional) A token that can be used to get the next page of results. The token can contains characters that need to be encoded before using it in a URL. For example, the character '+' needs to be replaced by - %2B. This field is optional. + %2B. - :returns: :class:`ListQueriesResponse` + :returns: Iterator over :class:`QueryInfo` \ No newline at end of file diff --git a/docs/workspace/sql/query_visualizations.rst b/docs/workspace/sql/query_visualizations.rst index 95095fb20..53888cee7 100644 --- a/docs/workspace/sql/query_visualizations.rst +++ b/docs/workspace/sql/query_visualizations.rst @@ -4,43 +4,56 @@ .. py:class:: QueryVisualizationsAPI - This is an evolving API that facilitates the addition and removal of visualizations from existing queries - in the Databricks Workspace. Data structures can change over time. + This is an evolving API that facilitates the addition and removal of vizualisations from existing queries + within the Databricks Workspace. Data structures may change over time. - .. py:method:: create( [, visualization: Optional[CreateVisualizationRequestVisualization]]) -> Visualization + .. py:method:: create(query_id: str, type: str, options: Any [, description: Optional[str], name: Optional[str]]) -> Visualization - Add a visualization to a query. + Add visualization to a query. - Adds a visualization to a query. - - :param visualization: :class:`CreateVisualizationRequestVisualization` (optional) + :param query_id: str + The identifier returned by :method:queries/create + :param type: str + The type of visualization: chart, table, pivot table, and so on. + :param options: Any + The options object varies widely from one visualization type to the next and is unsupported. + Databricks does not recommend modifying visualization settings in JSON. + :param description: str (optional) + A short description of this visualization. This is not displayed in the UI. + :param name: str (optional) + The name of the visualization that appears on dashboards and the query screen. :returns: :class:`Visualization` .. py:method:: delete(id: str) - Remove a visualization. - - Removes a visualization. + Remove visualization. :param id: str + Widget ID returned by :method:queryvizualisations/create - .. py:method:: update(id: str, update_mask: str [, visualization: Optional[UpdateVisualizationRequestVisualization]]) -> Visualization + .. py:method:: update(id: str [, created_at: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], query: Optional[Query], type: Optional[str], updated_at: Optional[str]]) -> Visualization - Update a visualization. - - Updates a visualization. + Edit existing visualization. :param id: str - :param update_mask: str - Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the - setting payload will be updated. The field mask needs to be supplied as single string. To specify - multiple fields in the field mask, use comma as the separator (no space). - :param visualization: :class:`UpdateVisualizationRequestVisualization` (optional) + The UUID for this visualization. + :param created_at: str (optional) + :param description: str (optional) + A short description of this visualization. This is not displayed in the UI. + :param name: str (optional) + The name of the visualization that appears on dashboards and the query screen. + :param options: Any (optional) + The options object varies widely from one visualization type to the next and is unsupported. + Databricks does not recommend modifying visualization settings in JSON. + :param query: :class:`Query` (optional) + :param type: str (optional) + The type of visualization: chart, table, pivot table, and so on. + :param updated_at: str (optional) :returns: :class:`Visualization` \ No newline at end of file diff --git a/docs/workspace/sql/statement_execution.rst b/docs/workspace/sql/statement_execution.rst index 716fa4fdc..7914977c2 100644 --- a/docs/workspace/sql/statement_execution.rst +++ b/docs/workspace/sql/statement_execution.rst @@ -82,9 +82,7 @@ are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - The system will auto-close a statement after one hour if the client stops polling and thus you must poll at least once an hour. - The results are only available for one hour - after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle - of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL - Execution API to cancel it. + after success; polling does not extend this. [Apache Arrow Columnar]: https://arrow.apache.org/overview/ [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html @@ -103,7 +101,7 @@ - .. py:method:: execute_statement(statement: str, warehouse_id: str [, byte_limit: Optional[int], catalog: Optional[str], disposition: Optional[Disposition], format: Optional[Format], on_wait_timeout: Optional[ExecuteStatementRequestOnWaitTimeout], parameters: Optional[List[StatementParameterListItem]], row_limit: Optional[int], schema: Optional[str], wait_timeout: Optional[str]]) -> StatementResponse + .. py:method:: execute_statement(statement: str, warehouse_id: str [, byte_limit: Optional[int], catalog: Optional[str], disposition: Optional[Disposition], format: Optional[Format], on_wait_timeout: Optional[ExecuteStatementRequestOnWaitTimeout], parameters: Optional[List[StatementParameterListItem]], row_limit: Optional[int], schema: Optional[str], wait_timeout: Optional[str]]) -> ExecuteStatementResponse Execute a SQL statement. @@ -124,6 +122,26 @@ [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html :param disposition: :class:`Disposition` (optional) + The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`. + + Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` + format, in a series of chunks. If a given statement produces a result set with a size larger than 25 + MiB, that statement execution is aborted, and no result set will be available. + + **NOTE** Byte limits are computed based upon internal representations of the result set data, and + might not match the sizes visible in JSON responses. + + Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: + URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition + allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The + resulting links have two important properties: + + 1. They point to resources _external_ to the Databricks compute; therefore any associated + authentication information (typically a personal access token, OAuth token, or similar) _must be + removed_ when fetching from these links. + + 2. These are presigned URLs with a specific expiration, indicated in the response. The behavior when + attempting to use an expired link is cloud specific. :param format: :class:`Format` (optional) Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and `CSV`. @@ -211,10 +229,10 @@ the statement takes longer to execute, `on_wait_timeout` determines what should happen after the timeout is reached. - :returns: :class:`StatementResponse` + :returns: :class:`ExecuteStatementResponse` - .. py:method:: get_statement(statement_id: str) -> StatementResponse + .. py:method:: get_statement(statement_id: str) -> GetStatementResponse Get status, manifest, and result first chunk. @@ -230,7 +248,7 @@ The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. - :returns: :class:`StatementResponse` + :returns: :class:`GetStatementResponse` .. py:method:: get_statement_result_chunk_n(statement_id: str, chunk_index: int) -> ResultData diff --git a/docs/workspace/sql/warehouses.rst b/docs/workspace/sql/warehouses.rst index 58b8a3fc0..8a5da4302 100644 --- a/docs/workspace/sql/warehouses.rst +++ b/docs/workspace/sql/warehouses.rst @@ -41,8 +41,7 @@ The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for - non-serverless warehouses - 0 indicates no autostop. + Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. Defaults to 120 mins :param channel: :class:`Channel` (optional) diff --git a/docs/workspace/workspace/git_credentials.rst b/docs/workspace/workspace/git_credentials.rst index 34851e84a..490cb16ea 100644 --- a/docs/workspace/workspace/git_credentials.rst +++ b/docs/workspace/workspace/git_credentials.rst @@ -33,9 +33,9 @@ existing credentials, or the DELETE endpoint to delete existing credentials. :param git_provider: str - Git provider. This field is case-insensitive. The available Git providers are `gitHub`, - `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, - `gitLabEnterpriseEdition` and `awsCodeCommit`. + Git provider. This field is case-insensitive. The available Git providers are gitHub, + bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, + gitLabEnterpriseEdition and awsCodeCommit. :param git_username: str (optional) The username or email provided with your Git provider account, depending on which provider you are using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may @@ -44,7 +44,8 @@ Access Token authentication documentation to see what is supported. :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain - providers, support may exist for other types of scoped access tokens. [Learn more]. + providers, support may exist for other types of scoped access tokens. [Learn more]. The personal + access token used to authenticate to the corresponding Git [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html @@ -63,7 +64,7 @@ - .. py:method:: get(credential_id: int) -> GetCredentialsResponse + .. py:method:: get(credential_id: int) -> CredentialInfo Usage: @@ -88,7 +89,7 @@ :param credential_id: int The ID for the corresponding credential to access. - :returns: :class:`GetCredentialsResponse` + :returns: :class:`CredentialInfo` .. py:method:: list() -> Iterator[CredentialInfo] @@ -111,7 +112,7 @@ :returns: Iterator over :class:`CredentialInfo` - .. py:method:: update(credential_id: int, git_provider: str [, git_username: Optional[str], personal_access_token: Optional[str]]) + .. py:method:: update(credential_id: int [, git_provider: Optional[str], git_username: Optional[str], personal_access_token: Optional[str]]) Usage: @@ -140,10 +141,10 @@ :param credential_id: int The ID for the corresponding credential to access. - :param git_provider: str - Git provider. This field is case-insensitive. The available Git providers are `gitHub`, - `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, - `gitLabEnterpriseEdition` and `awsCodeCommit`. + :param git_provider: str (optional) + Git provider. This field is case-insensitive. The available Git providers are gitHub, + bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, + gitLabEnterpriseEdition and awsCodeCommit. :param git_username: str (optional) The username or email provided with your Git provider account, depending on which provider you are using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may @@ -152,7 +153,8 @@ Access Token authentication documentation to see what is supported. :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain - providers, support may exist for other types of scoped access tokens. [Learn more]. + providers, support may exist for other types of scoped access tokens. [Learn more]. The personal + access token used to authenticate to the corresponding Git [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html diff --git a/docs/workspace/workspace/repos.rst b/docs/workspace/workspace/repos.rst index 01b1c875f..a5c602a3a 100644 --- a/docs/workspace/workspace/repos.rst +++ b/docs/workspace/workspace/repos.rst @@ -14,7 +14,7 @@ Within Repos you can develop code in notebooks or other files and follow data science and engineering code development best practices using Git for version control, collaboration, and CI/CD. - .. py:method:: create(url: str, provider: str [, path: Optional[str], sparse_checkout: Optional[SparseCheckout]]) -> CreateRepoResponse + .. py:method:: create(url: str, provider: str [, path: Optional[str], sparse_checkout: Optional[SparseCheckout]]) -> RepoInfo Usage: @@ -42,17 +42,17 @@ :param url: str URL of the Git repository to be linked. :param provider: str - Git provider. This field is case-insensitive. The available Git providers are `gitHub`, - `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, - `gitLabEnterpriseEdition` and `awsCodeCommit`. + Git provider. This field is case-insensitive. The available Git providers are gitHub, + bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, + gitLabEnterpriseEdition and awsCodeCommit. :param path: str (optional) Desired path for the repo in the workspace. Almost any path in the workspace can be chosen. If repo - is created in `/Repos`, path must be in the format `/Repos/{folder}/{repo-name}`. + is created in /Repos, path must be in the format /Repos/{folder}/{repo-name}. :param sparse_checkout: :class:`SparseCheckout` (optional) If specified, the repo will be created with sparse checkout enabled. You cannot enable/disable sparse checkout after the repo is created. - :returns: :class:`CreateRepoResponse` + :returns: :class:`RepoInfo` .. py:method:: delete(repo_id: int) @@ -62,12 +62,12 @@ Deletes the specified repo. :param repo_id: int - ID of the Git folder (repo) object in the workspace. + The ID for the corresponding repo to access. - .. py:method:: get(repo_id: int) -> GetRepoResponse + .. py:method:: get(repo_id: int) -> RepoInfo Usage: @@ -94,9 +94,9 @@ Returns the repo with the given repo ID. :param repo_id: int - ID of the Git folder (repo) object in the workspace. + The ID for the corresponding repo to access. - :returns: :class:`GetRepoResponse` + :returns: :class:`RepoInfo` .. py:method:: get_permission_levels(repo_id: str) -> GetRepoPermissionLevelsResponse @@ -139,16 +139,15 @@ Get repos. - Returns repos that the calling user has Manage permissions on. Use `next_page_token` to iterate - through additional pages. + Returns repos that the calling user has Manage permissions on. Results are paginated with each page + containing twenty repos. :param next_page_token: str (optional) Token used to get the next page of results. If not specified, returns the first page of results as well as a next page token if there are more results. :param path_prefix: str (optional) - Filters repos that have paths starting with the given path prefix. If not provided or when provided - an effectively empty prefix (`/` or `/Workspace`) Git folders (repos) from `/Workspace/Repos` will - be served. + Filters repos that have paths starting with the given path prefix. If not provided repos from /Repos + will be served. :returns: Iterator over :class:`RepoInfo` @@ -194,7 +193,7 @@ branch. :param repo_id: int - ID of the Git folder (repo) object in the workspace. + The ID for the corresponding repo to access. :param branch: str (optional) Branch that the local version of the repo is checked out to. :param sparse_checkout: :class:`SparseCheckoutUpdate` (optional) From 5a4de7afdb66b0e1317e4a3ebe9fac053043c047 Mon Sep 17 00:00:00 2001 From: Nick Lee Date: Tue, 22 Oct 2024 15:47:32 +0200 Subject: [PATCH 3/4] Add integration test for baremetal API --- databricks/sdk/mixins/files.py | 6 ++++-- tests/integration/test_files.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/databricks/sdk/mixins/files.py b/databricks/sdk/mixins/files.py index e07dc449c..39cab09b3 100644 --- a/databricks/sdk/mixins/files.py +++ b/databricks/sdk/mixins/files.py @@ -724,7 +724,7 @@ def _files_multipart_upload_complete(self, file_path: str, session_token: str, e body={'etags': etags}) return - def _ps_multipart_upload_create_part_urls(self, session_token: str, page_token: str, page_size: int): + def multipart_upload_create_part_urls(self, session_token: str, *, page_token: str, page_size: int) -> MultipartUploadCreatePartUrlsResponse: """Request a set of presigned URLs for uploading parts of a file in a multipart upload session.""" headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} @@ -762,7 +762,9 @@ def execute_presigned_url_request(self, "Not all client-provided headers are populated") # TODO: Move to a dedicated exception type request_headers = {**presigned_url.headers, **headers} - self._api.do(presigned_url.method, presigned_url.url, headers=request_headers, data=data) + resp_headers = {} + resp = self._api.do(presigned_url.method, presigned_url.url, headers=request_headers, data=data, response_headers = resp_headers) + return (resp, resp_headers) class PresignedUrl: diff --git a/tests/integration/test_files.py b/tests/integration/test_files.py index 7b9ede556..fff84e39b 100644 --- a/tests/integration/test_files.py +++ b/tests/integration/test_files.py @@ -8,6 +8,7 @@ import pytest from databricks.sdk.core import DatabricksError +from databricks.sdk.mixins.files import MultipartUploadCreate, MultipartUploadCreatePartUrlsResponse from databricks.sdk.service.catalog import VolumeType @@ -228,6 +229,35 @@ def test_files_api_upload_download(ucws, random): with w.files.download(target_file).contents as f: assert f.read() == b"some text data" +def test_files_multipart_upload_download_baremetal(ucws, random): + w = ucws + schema = 'filesit-' + random() + volume = 'filesit-' + random() + with ResourceWithCleanup.create_schema(w, 'main', schema): + with ResourceWithCleanup.create_volume(w, 'main', schema, volume): + f = io.BytesIO(b"some text data") + target_file = f'/Volumes/main/{schema}/{volume}/filesit-with-?-and-#-{random()}.txt' + + ## Create a session, upload the file in a single part, and then finalize the session + session: MultipartUploadCreate = w.files.multipart_upload_create(target_file) + + url_page_one: MultipartUploadCreatePartUrlsResponse = w.files.multipart_upload_create_part_urls(session.session_token, page_size=1) + + # Verify that a second page of URLs may be requested + w.files.multipart_upload_create_part_urls(session.session_token, page_token=url_page_one.next_page_token, page_size=1) + + + (part_upload_resp, part_upload_resp_headers) = w.files.execute_presigned_url_request(url_page_one.upload_part_urls[0], data=f) + print(part_upload_resp) + print(part_upload_resp_headers) + + w.files.multipart_upload_complete(target_file, session.session_token, [part_upload_resp_headers["etag"]]) + + ## Download the file & assert that it's what we expect + with w.files.download(target_file).contents as f: + assert f.read() == b"some text data" + + def test_files_api_read_twice_from_one_download(ucws, random): w = ucws From 9aab55aa2bcbf7de8daf0515458418a3bcb43a4a Mon Sep 17 00:00:00 2001 From: Nick Lee Date: Wed, 23 Oct 2024 16:40:51 +0200 Subject: [PATCH 4/4] Format --- databricks/sdk/mixins/files.py | 13 +++++++++---- tests/integration/test_files.py | 19 ++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/databricks/sdk/mixins/files.py b/databricks/sdk/mixins/files.py index 39cab09b3..a7314a32f 100644 --- a/databricks/sdk/mixins/files.py +++ b/databricks/sdk/mixins/files.py @@ -11,8 +11,8 @@ from enum import Enum from io import BytesIO from types import TracebackType -from typing import (TYPE_CHECKING, AnyStr, BinaryIO, Generator, Iterable, Iterator, Type, Union) -from typing import Dict, List, Optional +from typing import (TYPE_CHECKING, AnyStr, BinaryIO, Dict, Generator, Iterable, + Iterator, List, Optional, Type, Union) from urllib import parse from .._property import _cached_property @@ -724,7 +724,8 @@ def _files_multipart_upload_complete(self, file_path: str, session_token: str, e body={'etags': etags}) return - def multipart_upload_create_part_urls(self, session_token: str, *, page_token: str, page_size: int) -> MultipartUploadCreatePartUrlsResponse: + def multipart_upload_create_part_urls(self, session_token: str, *, page_token: str, + page_size: int) -> MultipartUploadCreatePartUrlsResponse: """Request a set of presigned URLs for uploading parts of a file in a multipart upload session.""" headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} @@ -763,7 +764,11 @@ def execute_presigned_url_request(self, request_headers = {**presigned_url.headers, **headers} resp_headers = {} - resp = self._api.do(presigned_url.method, presigned_url.url, headers=request_headers, data=data, response_headers = resp_headers) + resp = self._api.do(presigned_url.method, + presigned_url.url, + headers=request_headers, + data=data, + response_headers=resp_headers) return (resp, resp_headers) diff --git a/tests/integration/test_files.py b/tests/integration/test_files.py index fff84e39b..f6da18022 100644 --- a/tests/integration/test_files.py +++ b/tests/integration/test_files.py @@ -8,7 +8,8 @@ import pytest from databricks.sdk.core import DatabricksError -from databricks.sdk.mixins.files import MultipartUploadCreate, MultipartUploadCreatePartUrlsResponse +from databricks.sdk.mixins.files import (MultipartUploadCreate, + MultipartUploadCreatePartUrlsResponse) from databricks.sdk.service.catalog import VolumeType @@ -229,6 +230,7 @@ def test_files_api_upload_download(ucws, random): with w.files.download(target_file).contents as f: assert f.read() == b"some text data" + def test_files_multipart_upload_download_baremetal(ucws, random): w = ucws schema = 'filesit-' + random() @@ -241,24 +243,27 @@ def test_files_multipart_upload_download_baremetal(ucws, random): ## Create a session, upload the file in a single part, and then finalize the session session: MultipartUploadCreate = w.files.multipart_upload_create(target_file) - url_page_one: MultipartUploadCreatePartUrlsResponse = w.files.multipart_upload_create_part_urls(session.session_token, page_size=1) + url_page_one: MultipartUploadCreatePartUrlsResponse = w.files.multipart_upload_create_part_urls( + session.session_token, page_size=1) # Verify that a second page of URLs may be requested - w.files.multipart_upload_create_part_urls(session.session_token, page_token=url_page_one.next_page_token, page_size=1) - + w.files.multipart_upload_create_part_urls(session.session_token, + page_token=url_page_one.next_page_token, + page_size=1) - (part_upload_resp, part_upload_resp_headers) = w.files.execute_presigned_url_request(url_page_one.upload_part_urls[0], data=f) + (part_upload_resp, part_upload_resp_headers) = w.files.execute_presigned_url_request( + url_page_one.upload_part_urls[0], data=f) print(part_upload_resp) print(part_upload_resp_headers) - w.files.multipart_upload_complete(target_file, session.session_token, [part_upload_resp_headers["etag"]]) + w.files.multipart_upload_complete(target_file, session.session_token, + [part_upload_resp_headers["etag"]]) ## Download the file & assert that it's what we expect with w.files.download(target_file).contents as f: assert f.read() == b"some text data" - def test_files_api_read_twice_from_one_download(ucws, random): w = ucws schema = 'filesit-' + random()