diff --git a/.github/workflows/sync_mult_prs.yml b/.github/workflows/sync_mult_prs.yml new file mode 100644 index 00000000..73f62400 --- /dev/null +++ b/.github/workflows/sync_mult_prs.yml @@ -0,0 +1,28 @@ +name: Sync doc changes from multiple PRs + +on: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + + sync_mult_prs: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout current repo + uses: actions/checkout@v3 + + - name: Run sync_mult_prs script + run: | + git config user.name "Docsite Preview Bot" + git config user.email "" + ./sync_mult_prs.sh + ./.github/git_push.sh ${{ github.ref_name }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/sync_pr.yml b/.github/workflows/sync_pr.yml index 570fea8a..3937dfbc 100644 --- a/.github/workflows/sync_pr.yml +++ b/.github/workflows/sync_pr.yml @@ -1,4 +1,4 @@ -name: Sync documentation changes from a PR to the preview branch +name: Sync doc changes from a PR on: push: diff --git a/sync_mult_prs.sh b/sync_mult_prs.sh new file mode 100755 index 00000000..e97b3e60 --- /dev/null +++ b/sync_mult_prs.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Synchronize the content of multiple PRs to the markdown-pages folder to deploy a preview website. + +# Usage: ./sync_mult_prs.sh + +set -ex + +# Get the directory of this script. +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +cd "$SCRIPT_DIR" + + +# Define the PRs to sync. +# The PRs will be synced in the order of the following statements. +./sync_pr.sh preview/pingcap/docs/"$DOCS_PR" +./sync_pr.sh preview/pingcap/docs-cn/"$DOCS_CN_PR" +./sync_pr.sh preview-cloud/pingcap/docs/"$CLOUD_DOCS_PR" +./sync_pr.sh preview-operator/pingcap/docs-tidb-operator/"$OPERATOR_DOCS_PR" + +# Synchronize the content from master to release-x.y directories. +rsync -av markdown-pages/zh/tidb/master/ markdown-pages/zh/tidb/"$RELEASE_DIR"/ +rsync -av markdown-pages/en/tidb/master/ markdown-pages/en/tidb/"$RELEASE_DIR"/ +rsync -av markdown-pages/en/tidb-in-kubernetes/master/ markdown-pages/en/tidb-in-kubernetes/"$RELEASE_DIR"/ +rsync -av markdown-pages/zh/tidb-in-kubernetes/master/ markdown-pages/zh/tidb-in-kubernetes/"$RELEASE_DIR"/ + +commit_changes() { + # Exit if TEST is set and not empty. + test -n "$TEST" && echo "Test mode, exiting..." && exit 0 + # Handle untracked files. + git add . + # Commit changes, if any. + git commit -m "Update the {release-x.y} directory" || echo "No changes to commit" +} + +commit_changes diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md new file mode 100644 index 00000000..dfed473a --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes Docs](https://docs.pingcap.com/tidb-in-kubernetes/dev) +- Introduction + - [Overview](tidb-operator-overview.md) + - [What's New in v1.5](whats-new-in-v1.5.md) +- [Get Started](get-started.md) +- Deploy + - On Self-Managed Kubernetes + - [Prerequisites](prerequisites.md) + - [Configure Storage Class](configure-storage-class.md) + - [Deploy TiDB Operator](deploy-tidb-operator.md) + - [Configure a TiDB Cluster](configure-a-tidb-cluster.md) + - [Deploy a TiDB Cluster](deploy-on-general-kubernetes.md) + - [Initialize a TiDB Cluster](initialize-a-cluster.md) + - [Access a TiDB Cluster](access-tidb.md) + - On Public Cloud Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + - [Deploy TiDB on ARM64 Machines](deploy-cluster-on-arm64.md) + - [Deploy TiFlash to Explore TiDB HTAP](deploy-tiflash.md) + - Deploy TiDB Across Multiple Kubernetes Clusters + - [Build Multiple Interconnected AWS EKS Clusters](build-multi-aws-eks.md) + - [Build Multiple Interconnected GKE Clusters](build-multi-gcp-gke.md) + - [Deploy TiDB Across Multiple Kubernetes Clusters](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [Deploy a Heterogeneous TiDB Cluster](deploy-heterogeneous-tidb-cluster.md) + - [Deploy TiCDC](deploy-ticdc.md) + - [Deploy TiDB Binlog](deploy-tidb-binlog.md) +- Monitor and Alert + - [Deploy Monitoring and Alerts for TiDB](monitor-a-tidb-cluster.md) + - [Monitor and Diagnose TiDB Using TiDB Dashboard](access-dashboard.md) + - [Aggregate Monitoring Data of Multiple TiDB Clusters](aggregate-multiple-cluster-monitor-data.md) + - [Monitor a TiDB Cluster across Multiple Kubernetes Clusters](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [Enable Dynamic Configuration for TidbMonitor](enable-monitor-dynamic-configuration.md) + - [Enable Shards for TidbMonitor](enable-monitor-shards.md) +- Migrate + - [Import Data](restore-data-using-tidb-lightning.md) + - Migrate from MySQL + - [Deploy DM](deploy-tidb-dm.md) + - [Migrate to TiDB Using DM](use-tidb-dm.md) + - [Migrate TiDB to Kubernetes](migrate-tidb-to-kubernetes.md) +- Manage + - Secure + - [Enable TLS for the MySQL Client](enable-tls-for-mysql-client.md) + - [Enable TLS between TiDB Components](enable-tls-between-components.md) + - [Enable TLS for DM](enable-tls-for-dm.md) + - [Replicate Data to TLS-enabled Downstream Services](enable-tls-for-ticdc-sink.md) + - [Renew and Replace the TLS Certificate](renew-tls-certificate.md) + - [Run Containers as a Non-root User](containers-run-as-non-root-user.md) + - [Scale](scale-a-tidb-cluster.md) + - Upgrade + - [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) + - Upgrade TiDB Operator + - [Normal Upgrade](upgrade-tidb-operator.md) + - [Canary Upgrade](canary-upgrade-tidb-operator.md) + - Backup and Restore + - [Overview](backup-restore-overview.md) + - [Backup and Restore Custom Resources](backup-restore-cr.md) + - [Grant Permissions to Remote Storage](grant-permissions-to-remote-storage.md) + - Amazon S3 Compatible Storage + - [Back Up Data Using BR](backup-to-aws-s3-using-br.md) + - [Restore Data Using BR](restore-from-aws-s3-using-br.md) + - [Back Up Data Using Dumpling](backup-to-s3.md) + - [Restore Data Using TiDB Lightning](restore-from-s3.md) + - Google Cloud Storage + - [Back Up Data Using BR](backup-to-gcs-using-br.md) + - [Restore Data Using BR](restore-from-gcs-using-br.md) + - [Back Up Data Using Dumpling](backup-to-gcs.md) + - [Restore Data Using TiDB Lightning](restore-from-gcs.md) + - Azure Blob Storage + - [Back Up Data Using BR](backup-to-azblob-using-br.md) + - [Restore Data Using BR](restore-from-azblob-using-br.md) + - Persistent Volumes + - [Back Up Data](backup-to-pv-using-br.md) + - [Restore Data](restore-from-pv-using-br.md) + - Snapshot Backup and Restore + - [Architecture](volume-snapshot-backup-restore.md) + - [Back Up Data Using EBS Snapshots](backup-to-aws-s3-by-snapshot.md) + - [Restore Data from EBS Snapshots](restore-from-aws-s3-by-snapshot.md) + - [Backup and Restore Performance](backup-restore-snapshot-perf.md) + - [FAQs](backup-restore-faq.md) + - Maintain + - [Restart a TiDB Cluster](restart-a-tidb-cluster.md) + - [Destroy a TiDB Cluster](destroy-a-tidb-cluster.md) + - [View TiDB Logs](view-logs.md) + - [Modify TiDB Cluster Configuration](modify-tidb-configuration.md) + - [Configure Automatic Failover](use-auto-failover.md) + - [Pause Sync of a TiDB Cluster](pause-sync-of-tidb-cluster.md) + - [Suspend a TiDB Cluster](suspend-tidb-cluster.md) + - [Maintain Different TiDB Clusters Separately Using Multiple TiDB Operator](deploy-multiple-tidb-operator.md) + - [Maintain Kubernetes Nodes](maintain-a-kubernetes-node.md) + - [Migrate from Helm 2 to Helm 3](migrate-to-helm3.md) + - Replace Nodes for a TiDB Cluster + - [Replace Nodes on Cloud Disks](replace-nodes-for-cloud-disk.md) + - [Replace Nodes on Local Disks](replace-nodes-for-local-disk.md) + - Disaster Recovery + - [Recover a Deleted TiDB Cluster](recover-deleted-cluster.md) + - [Recover a PD Cluster](pd-recover.md) +- Troubleshoot + - [Troubleshooting Tips](tips.md) + - [Deployment Failures](deploy-failures.md) + - [Cluster Exceptions](exceptions.md) + - [Network Issues](network-issues.md) + - [Troubleshoot TiDB Cluster Using PingCAP Clinic](clinic-user-guide.md) +- [FAQs](faq.md) +- Reference + - Architecture + - [TiDB Operator](architecture.md) + - [TiDB Scheduler](tidb-scheduler.md) + - [Advanced StatefulSet Controller](advanced-statefulset.md) + - [Admission Controller](enable-admission-webhook.md) + - [Sysbench Performance Test](benchmark-sysbench.md) + - [API References](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [Required RBAC Rules](tidb-operator-rbac.md) + - Tools + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - Configure + - [Configure tidb-drainer Chart](configure-tidb-binlog-drainer.md) + - [Log Collection](logs-collection.md) + - [Monitoring and Alert on Kubernetes](monitor-kubernetes.md) + - [PingCAP Clinic Diagnostic Data](clinic-data-collection.md) +- Release Notes + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..2e660799 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md @@ -0,0 +1,207 @@ +--- +title: Grant Permissions to Remote Storage +summary: Learn how to grant permissions to access remote storage for backup and restore. +--- + +# Grant Permissions to Remote Storage + +This document describes how to grant permissions to access remote storage for backup and restore. During the backup process, TiDB cluster data is backed up to the remote storage. During the restore process, the backup data is restored from the remote storage to the TiDB cluster. + +## AWS account permissions + +Amazon Web Service (AWS) provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following three methods. + +### Grant permissions by AccessKey and SecretKey + +The AWS client can read `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` from the process environment variables to obtain the associated user or role permissions. + +Create the `s3-secret` secret by running the following command. Use the AWS account's AccessKey and SecretKey. The secret stores the credential used for accessing S3-compatible storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### Grant permissions by associating IAM with Pod + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with the resources of the running Pods, the processes running in the Pods can have the permissions of the role. This method is provided by [`kube2iam`](https://github.com/jtblin/kube2iam). + +> **Note:** +> +> - When you use this method to grant permissions, you can [create the `kube2iam` environment](https://github.com/jtblin/kube2iam#usage) in the Kubernetes cluster and deploy TiDB Operator and the TiDB cluster. +> - This method is not applicable to the [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) mode. Make sure the value of `spec.tikv.hostNetwork` is set to `false`. + +1. Create an IAM role. + + First, [create an IAM User](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) for your account. + + Then, Give the required permission to the IAM role you have created. Refer to [Adding and Removing IAM Identity Permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) for details. + + Because the `Backup` CR needs to access the Amazon S3 storage, the IAM role is granted the `AmazonS3FullAccess` permission. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. Associate IAM with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on S3-compatible storage as the BR Pod does. Therefore, you need to add annotations to the TiKV Pod to associate it with the IAM role. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the annotation. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 1. + +### Grant permissions by associating IAM with ServiceAccount + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) of Kubernetes, the Pods using the `serviceAccount` can have the permissions of the role. This method is provided by [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook). + +When you use this method to grant permissions, you can [create the EKS cluster](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html) and deploy TiDB Operator and the TiDB cluster. + +1. Enable the IAM role for the `serviceAccount` in the cluster: + + Refer to [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). + +2. Create the IAM role: + + [Create an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) and grant the `AmazonS3FullAccess` permissions to the role. Edit the role's `Trust relationships` to grant tidb-backup-manager the access to this IAM role. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + At the same time, edit the role's `Trust relationships` to grant tidb-controller-manager the access to this IAM role. + +3. Associate the IAM role with the `ServiceAccount` resources. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + When backing up or restoring a TiDB cluster using EBS volume snapshots, you need to associate the IAM role with the `ServiceAccount` resources of tidb-controller-manager. + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + Restart the tidb-controller-manager Pod of TiDB Operator to make the configured `ServiceAccount` take effect. + +4. Associate the `ServiceAccount` with the TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + Modify the value of `spec.tikv.serviceAccount` to `tidb-backup-manager`. After the TiKV Pod is restarted, check whether the Pod's `serviceAccountName` is changed. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 2. + +## GCS account permissions + +### Grant permissions by the service account + +Create the `gcs-secret` secret which stores the credential used to access GCS. The `google-credentials.json` file stores the service account key that you have downloaded from the Google Cloud console. Refer to [Google Cloud documentation](https://cloud.google.com/docs/authentication/getting-started) for details. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure account permissions + +Azure provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following two methods. + +### Grant permissions by access key + +The Azure client can read `AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_KEY` from the process environment variables to obtain the associated user or role permissions. + +Run the following command to create the `azblob-secret` secret and use your Azure account access key to grant permissions. The secret stores the credential used for accessing Azure Blob Storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### Grant permissions by Azure AD + +The Azure client can read `AZURE_STORAGE_ACCOUNT`, `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, and `AZURE_CLIENT_SECRET` to obtain the associated user or role permissions. + +1. Create the `azblob-secret-ad` secret by running the following command. Use the Active Directory (AD) of your Azure account. The secret stores the credential used for accessing Azure Blob Storage. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from-literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. Associate the secret with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on Azure Blob Storage as the BR Pod does. Therefore, you need to associate the TiKV Pod with the secret. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the environment variables. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md new file mode 100644 index 00000000..5034c822 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: Learn about new features, improvements, and bug fixes in TiDB Operator 1.5.0. +--- + +# TiDB Operator 1.5.0 Release Notes + +Release date: August 4, 2023 + +TiDB Operator version: 1.5.0 + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## New features + +- Add the BR Federation Manager component to orchestrate `Backup` and `Restore` custom resources (CR) across multiple Kubernetes clusters ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support using the `VolumeBackup` CR to back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeRestore` CR to restore a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeBackupSchedule` CR to automatically back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- Support backing up CRs related to `TidbCluster` when backing up a TiDB cluster deployed across multiple Kubernetes based on EBS snapshots ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## Improvements + +- Add the `startUpScriptVersion` field for DM master to specify the version of the startup script ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- Support `spec.preferIPv6` for DmCluster, TidbDashboard, TidbMonitor, and TidbNGMonitoring ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- Support setting expiration time for TiKV leader eviction and PD leader transfer ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- Support setting toleration for `TidbInitializer` ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support configuring the timeout for PD start ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- Skip evicting leaders for TiKV when changing PVC size to avoid leader eviction blocked caused by low disk space ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support updating annotations and labels in services for PD, TiKV, TiFlash, TiProxy, DM-master, and DM-worker ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- Enable volume resizing by default for PV expansion ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug fixes + +- Fix the quorum loss issue during TiKV upgrade due to some TiKV stores going down ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- Fix the quorum loss issue during PD upgrade due to some members going down ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- Fix the issue that TiDB Operator panics when no Kubernetes cluster-level permission is configured ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that TiDB Operator might panic when `AdditionalVolumeMounts` is set for the `TidbCluster` CR ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that `baseImage` for the `TidbDashboard` CR is parsed incorrectly when custom image registry is used ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md new file mode 100644 index 00000000..35b2d76e --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md @@ -0,0 +1,69 @@ +--- +title: TiDB Operator Overview +summary: Learn the overview of TiDB Operator. +aliases: ['/docs/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator Overview + +[TiDB Operator](https://github.com/pingcap/tidb-operator) is an automatic operation system for TiDB clusters on Kubernetes. It provides a full management life-cycle for TiDB including deployment, upgrades, scaling, backup, fail-over, and configuration changes. With TiDB Operator, TiDB can run seamlessly in the Kubernetes clusters deployed on a public cloud or in a self-hosted environment. + +The corresponding relationship between TiDB Operator and TiDB versions is as follows: + +| TiDB versions | Compatible TiDB Operator versions | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5 (Recommended), 1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4 (Recommended), 1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3 (Recommended) | +| 5.1 <= TiDB < 5.4 | 1.4, 1.3 (Recommended), 1.2 | +| 3.0 <= TiDB < 5.1 | 1.4, 1.3 (Recommended), 1.2, 1.1 | +| 2.1 <= TiDB < v3.0| 1.0 (End of support) | + +## Manage TiDB clusters using TiDB Operator + +TiDB Operator provides several ways to deploy TiDB clusters on Kubernetes: + ++ For test environment: + + - [Get Started](get-started.md) using kind, Minikube, or the Google Cloud Shell + ++ For production environment: + + + On public cloud: + - [Deploy TiDB on AWS EKS](deploy-on-aws-eks.md) + - [Deploy TiDB on Google Cloud GKE](deploy-on-gcp-gke.md) + - [Deploy TiDB on Azure AKS](deploy-on-azure-aks.md) + - [Deploy TiDB on Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + + - In an existing Kubernetes cluster: + + First install TiDB Operator on a Kubernetes cluster according to [Deploy TiDB Operator on Kubernetes](deploy-tidb-operator.md), then deploy your TiDB clusters according to [Deploy TiDB on General Kubernetes](deploy-on-general-kubernetes.md). + + You also need to adjust the configuration of the Kubernetes cluster based on [Prerequisites for TiDB on Kubernetes](prerequisites.md) and configure the local PV for your Kubernetes cluster to achieve low latency of local storage for TiKV according to [Local PV Configuration](configure-storage-class.md#local-pv-configuration). + +Before deploying TiDB on any of the above two environments, you can always refer to [TiDB Cluster Configuration Document](configure-a-tidb-cluster.md) to customize TiDB configurations. + +After the deployment is complete, see the following documents to use, operate, and maintain TiDB clusters on Kubernetes: + ++ [Access the TiDB Cluster](access-tidb.md) ++ [Scale TiDB Cluster](scale-a-tidb-cluster.md) ++ [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) ++ [Change the Configuration of TiDB Cluster](configure-a-tidb-cluster.md) ++ [Back up and Restore a TiDB Cluster](backup-restore-overview.md) ++ [Automatic Failover](use-auto-failover.md) ++ [Monitor a TiDB Cluster on Kubernetes](monitor-a-tidb-cluster.md) ++ [View TiDB Logs on Kubernetes](view-logs.md) ++ [Maintain Kubernetes Nodes that Hold the TiDB Cluster](maintain-a-kubernetes-node.md) + +When a problem occurs and the cluster needs diagnosis, you can: + ++ See [TiDB FAQs on Kubernetes](faq.md) for any available solution; ++ See [Troubleshoot TiDB on Kubernetes](tips.md) to shoot troubles. + +TiDB on Kubernetes provides a dedicated command-line tool `tkctl` for cluster management and auxiliary diagnostics. Meanwhile, some of TiDB's tools are used differently on Kubernetes. You can: + ++ Use `tkctl` according to [`tkctl` Guide](use-tkctl.md ); ++ See [Tools on Kubernetes](tidb-toolkit.md) to understand how TiDB tools are used on Kubernetes. + +Finally, when a new version of TiDB Operator is released, you can refer to [Upgrade TiDB Operator](upgrade-tidb-operator.md) to upgrade to the latest version. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md new file mode 100644 index 00000000..e0e4ddce --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: What's New in TiDB Operator 1.5 +summary: Learn about new features in TiDB Operator 1.5.0. +--- + +# What's New in TiDB Operator 1.5 + +TiDB Operator 1.5 introduces the following key features, which helps you manage TiDB clusters and the tools more easily in terms of extensibility and usability. + +## Compatibility changes + +To use the `PreferDualStack` feature (enabled with `spec.preferIPv6: true`) introduced in [#4959](https://github.com/pingcap/tidb-operator/pull/4959), Kubernetes version >= v1.20 is required. + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## Extensibility + +- Support specifying an initialization SQL file to be executed during the first bootstrap of TiDB with the `bootstrapSQLConfigMapName` field. +- Support setting `PreferDualStack` for all Service's `ipFamilyPolicy` with `spec.preferIPv6: true`. +- Support managing TiCDC and TiProxy with [Advanced StatefulSet](advanced-statefulset.md). +- Add the BR Federation Manager component to support the backup and restore of a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots. + +## Usability + +- Support using the `tidb.pingcap.com/pd-transfer-leader` annotation to restart PD Pods gracefully. +- Support using the `tidb.pingcap.com/tidb-graceful-shutdown` annotation to restart TiDB Pods gracefully. +- Allow users to define a strategy to restart failed backup jobs, enhancing backup stability. +- Add metrics for the reconciler and worker queue to improve observability. +- Add metrics for counting errors that occur during the reconciliation to improve observability. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/TOC.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/TOC.md new file mode 100644 index 00000000..dfed473a --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes Docs](https://docs.pingcap.com/tidb-in-kubernetes/dev) +- Introduction + - [Overview](tidb-operator-overview.md) + - [What's New in v1.5](whats-new-in-v1.5.md) +- [Get Started](get-started.md) +- Deploy + - On Self-Managed Kubernetes + - [Prerequisites](prerequisites.md) + - [Configure Storage Class](configure-storage-class.md) + - [Deploy TiDB Operator](deploy-tidb-operator.md) + - [Configure a TiDB Cluster](configure-a-tidb-cluster.md) + - [Deploy a TiDB Cluster](deploy-on-general-kubernetes.md) + - [Initialize a TiDB Cluster](initialize-a-cluster.md) + - [Access a TiDB Cluster](access-tidb.md) + - On Public Cloud Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + - [Deploy TiDB on ARM64 Machines](deploy-cluster-on-arm64.md) + - [Deploy TiFlash to Explore TiDB HTAP](deploy-tiflash.md) + - Deploy TiDB Across Multiple Kubernetes Clusters + - [Build Multiple Interconnected AWS EKS Clusters](build-multi-aws-eks.md) + - [Build Multiple Interconnected GKE Clusters](build-multi-gcp-gke.md) + - [Deploy TiDB Across Multiple Kubernetes Clusters](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [Deploy a Heterogeneous TiDB Cluster](deploy-heterogeneous-tidb-cluster.md) + - [Deploy TiCDC](deploy-ticdc.md) + - [Deploy TiDB Binlog](deploy-tidb-binlog.md) +- Monitor and Alert + - [Deploy Monitoring and Alerts for TiDB](monitor-a-tidb-cluster.md) + - [Monitor and Diagnose TiDB Using TiDB Dashboard](access-dashboard.md) + - [Aggregate Monitoring Data of Multiple TiDB Clusters](aggregate-multiple-cluster-monitor-data.md) + - [Monitor a TiDB Cluster across Multiple Kubernetes Clusters](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [Enable Dynamic Configuration for TidbMonitor](enable-monitor-dynamic-configuration.md) + - [Enable Shards for TidbMonitor](enable-monitor-shards.md) +- Migrate + - [Import Data](restore-data-using-tidb-lightning.md) + - Migrate from MySQL + - [Deploy DM](deploy-tidb-dm.md) + - [Migrate to TiDB Using DM](use-tidb-dm.md) + - [Migrate TiDB to Kubernetes](migrate-tidb-to-kubernetes.md) +- Manage + - Secure + - [Enable TLS for the MySQL Client](enable-tls-for-mysql-client.md) + - [Enable TLS between TiDB Components](enable-tls-between-components.md) + - [Enable TLS for DM](enable-tls-for-dm.md) + - [Replicate Data to TLS-enabled Downstream Services](enable-tls-for-ticdc-sink.md) + - [Renew and Replace the TLS Certificate](renew-tls-certificate.md) + - [Run Containers as a Non-root User](containers-run-as-non-root-user.md) + - [Scale](scale-a-tidb-cluster.md) + - Upgrade + - [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) + - Upgrade TiDB Operator + - [Normal Upgrade](upgrade-tidb-operator.md) + - [Canary Upgrade](canary-upgrade-tidb-operator.md) + - Backup and Restore + - [Overview](backup-restore-overview.md) + - [Backup and Restore Custom Resources](backup-restore-cr.md) + - [Grant Permissions to Remote Storage](grant-permissions-to-remote-storage.md) + - Amazon S3 Compatible Storage + - [Back Up Data Using BR](backup-to-aws-s3-using-br.md) + - [Restore Data Using BR](restore-from-aws-s3-using-br.md) + - [Back Up Data Using Dumpling](backup-to-s3.md) + - [Restore Data Using TiDB Lightning](restore-from-s3.md) + - Google Cloud Storage + - [Back Up Data Using BR](backup-to-gcs-using-br.md) + - [Restore Data Using BR](restore-from-gcs-using-br.md) + - [Back Up Data Using Dumpling](backup-to-gcs.md) + - [Restore Data Using TiDB Lightning](restore-from-gcs.md) + - Azure Blob Storage + - [Back Up Data Using BR](backup-to-azblob-using-br.md) + - [Restore Data Using BR](restore-from-azblob-using-br.md) + - Persistent Volumes + - [Back Up Data](backup-to-pv-using-br.md) + - [Restore Data](restore-from-pv-using-br.md) + - Snapshot Backup and Restore + - [Architecture](volume-snapshot-backup-restore.md) + - [Back Up Data Using EBS Snapshots](backup-to-aws-s3-by-snapshot.md) + - [Restore Data from EBS Snapshots](restore-from-aws-s3-by-snapshot.md) + - [Backup and Restore Performance](backup-restore-snapshot-perf.md) + - [FAQs](backup-restore-faq.md) + - Maintain + - [Restart a TiDB Cluster](restart-a-tidb-cluster.md) + - [Destroy a TiDB Cluster](destroy-a-tidb-cluster.md) + - [View TiDB Logs](view-logs.md) + - [Modify TiDB Cluster Configuration](modify-tidb-configuration.md) + - [Configure Automatic Failover](use-auto-failover.md) + - [Pause Sync of a TiDB Cluster](pause-sync-of-tidb-cluster.md) + - [Suspend a TiDB Cluster](suspend-tidb-cluster.md) + - [Maintain Different TiDB Clusters Separately Using Multiple TiDB Operator](deploy-multiple-tidb-operator.md) + - [Maintain Kubernetes Nodes](maintain-a-kubernetes-node.md) + - [Migrate from Helm 2 to Helm 3](migrate-to-helm3.md) + - Replace Nodes for a TiDB Cluster + - [Replace Nodes on Cloud Disks](replace-nodes-for-cloud-disk.md) + - [Replace Nodes on Local Disks](replace-nodes-for-local-disk.md) + - Disaster Recovery + - [Recover a Deleted TiDB Cluster](recover-deleted-cluster.md) + - [Recover a PD Cluster](pd-recover.md) +- Troubleshoot + - [Troubleshooting Tips](tips.md) + - [Deployment Failures](deploy-failures.md) + - [Cluster Exceptions](exceptions.md) + - [Network Issues](network-issues.md) + - [Troubleshoot TiDB Cluster Using PingCAP Clinic](clinic-user-guide.md) +- [FAQs](faq.md) +- Reference + - Architecture + - [TiDB Operator](architecture.md) + - [TiDB Scheduler](tidb-scheduler.md) + - [Advanced StatefulSet Controller](advanced-statefulset.md) + - [Admission Controller](enable-admission-webhook.md) + - [Sysbench Performance Test](benchmark-sysbench.md) + - [API References](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [Required RBAC Rules](tidb-operator-rbac.md) + - Tools + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - Configure + - [Configure tidb-drainer Chart](configure-tidb-binlog-drainer.md) + - [Log Collection](logs-collection.md) + - [Monitoring and Alert on Kubernetes](monitor-kubernetes.md) + - [PingCAP Clinic Diagnostic Data](clinic-data-collection.md) +- Release Notes + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..2e660799 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md @@ -0,0 +1,207 @@ +--- +title: Grant Permissions to Remote Storage +summary: Learn how to grant permissions to access remote storage for backup and restore. +--- + +# Grant Permissions to Remote Storage + +This document describes how to grant permissions to access remote storage for backup and restore. During the backup process, TiDB cluster data is backed up to the remote storage. During the restore process, the backup data is restored from the remote storage to the TiDB cluster. + +## AWS account permissions + +Amazon Web Service (AWS) provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following three methods. + +### Grant permissions by AccessKey and SecretKey + +The AWS client can read `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` from the process environment variables to obtain the associated user or role permissions. + +Create the `s3-secret` secret by running the following command. Use the AWS account's AccessKey and SecretKey. The secret stores the credential used for accessing S3-compatible storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### Grant permissions by associating IAM with Pod + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with the resources of the running Pods, the processes running in the Pods can have the permissions of the role. This method is provided by [`kube2iam`](https://github.com/jtblin/kube2iam). + +> **Note:** +> +> - When you use this method to grant permissions, you can [create the `kube2iam` environment](https://github.com/jtblin/kube2iam#usage) in the Kubernetes cluster and deploy TiDB Operator and the TiDB cluster. +> - This method is not applicable to the [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) mode. Make sure the value of `spec.tikv.hostNetwork` is set to `false`. + +1. Create an IAM role. + + First, [create an IAM User](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) for your account. + + Then, Give the required permission to the IAM role you have created. Refer to [Adding and Removing IAM Identity Permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) for details. + + Because the `Backup` CR needs to access the Amazon S3 storage, the IAM role is granted the `AmazonS3FullAccess` permission. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. Associate IAM with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on S3-compatible storage as the BR Pod does. Therefore, you need to add annotations to the TiKV Pod to associate it with the IAM role. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the annotation. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 1. + +### Grant permissions by associating IAM with ServiceAccount + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) of Kubernetes, the Pods using the `serviceAccount` can have the permissions of the role. This method is provided by [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook). + +When you use this method to grant permissions, you can [create the EKS cluster](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html) and deploy TiDB Operator and the TiDB cluster. + +1. Enable the IAM role for the `serviceAccount` in the cluster: + + Refer to [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). + +2. Create the IAM role: + + [Create an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) and grant the `AmazonS3FullAccess` permissions to the role. Edit the role's `Trust relationships` to grant tidb-backup-manager the access to this IAM role. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + At the same time, edit the role's `Trust relationships` to grant tidb-controller-manager the access to this IAM role. + +3. Associate the IAM role with the `ServiceAccount` resources. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + When backing up or restoring a TiDB cluster using EBS volume snapshots, you need to associate the IAM role with the `ServiceAccount` resources of tidb-controller-manager. + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + Restart the tidb-controller-manager Pod of TiDB Operator to make the configured `ServiceAccount` take effect. + +4. Associate the `ServiceAccount` with the TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + Modify the value of `spec.tikv.serviceAccount` to `tidb-backup-manager`. After the TiKV Pod is restarted, check whether the Pod's `serviceAccountName` is changed. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 2. + +## GCS account permissions + +### Grant permissions by the service account + +Create the `gcs-secret` secret which stores the credential used to access GCS. The `google-credentials.json` file stores the service account key that you have downloaded from the Google Cloud console. Refer to [Google Cloud documentation](https://cloud.google.com/docs/authentication/getting-started) for details. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure account permissions + +Azure provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following two methods. + +### Grant permissions by access key + +The Azure client can read `AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_KEY` from the process environment variables to obtain the associated user or role permissions. + +Run the following command to create the `azblob-secret` secret and use your Azure account access key to grant permissions. The secret stores the credential used for accessing Azure Blob Storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### Grant permissions by Azure AD + +The Azure client can read `AZURE_STORAGE_ACCOUNT`, `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, and `AZURE_CLIENT_SECRET` to obtain the associated user or role permissions. + +1. Create the `azblob-secret-ad` secret by running the following command. Use the Active Directory (AD) of your Azure account. The secret stores the credential used for accessing Azure Blob Storage. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from-literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. Associate the secret with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on Azure Blob Storage as the BR Pod does. Therefore, you need to associate the TiKV Pod with the secret. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the environment variables. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md new file mode 100644 index 00000000..5034c822 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: Learn about new features, improvements, and bug fixes in TiDB Operator 1.5.0. +--- + +# TiDB Operator 1.5.0 Release Notes + +Release date: August 4, 2023 + +TiDB Operator version: 1.5.0 + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## New features + +- Add the BR Federation Manager component to orchestrate `Backup` and `Restore` custom resources (CR) across multiple Kubernetes clusters ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support using the `VolumeBackup` CR to back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeRestore` CR to restore a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeBackupSchedule` CR to automatically back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- Support backing up CRs related to `TidbCluster` when backing up a TiDB cluster deployed across multiple Kubernetes based on EBS snapshots ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## Improvements + +- Add the `startUpScriptVersion` field for DM master to specify the version of the startup script ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- Support `spec.preferIPv6` for DmCluster, TidbDashboard, TidbMonitor, and TidbNGMonitoring ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- Support setting expiration time for TiKV leader eviction and PD leader transfer ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- Support setting toleration for `TidbInitializer` ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support configuring the timeout for PD start ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- Skip evicting leaders for TiKV when changing PVC size to avoid leader eviction blocked caused by low disk space ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support updating annotations and labels in services for PD, TiKV, TiFlash, TiProxy, DM-master, and DM-worker ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- Enable volume resizing by default for PV expansion ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug fixes + +- Fix the quorum loss issue during TiKV upgrade due to some TiKV stores going down ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- Fix the quorum loss issue during PD upgrade due to some members going down ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- Fix the issue that TiDB Operator panics when no Kubernetes cluster-level permission is configured ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that TiDB Operator might panic when `AdditionalVolumeMounts` is set for the `TidbCluster` CR ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that `baseImage` for the `TidbDashboard` CR is parsed incorrectly when custom image registry is used ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md new file mode 100644 index 00000000..35b2d76e --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md @@ -0,0 +1,69 @@ +--- +title: TiDB Operator Overview +summary: Learn the overview of TiDB Operator. +aliases: ['/docs/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator Overview + +[TiDB Operator](https://github.com/pingcap/tidb-operator) is an automatic operation system for TiDB clusters on Kubernetes. It provides a full management life-cycle for TiDB including deployment, upgrades, scaling, backup, fail-over, and configuration changes. With TiDB Operator, TiDB can run seamlessly in the Kubernetes clusters deployed on a public cloud or in a self-hosted environment. + +The corresponding relationship between TiDB Operator and TiDB versions is as follows: + +| TiDB versions | Compatible TiDB Operator versions | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5 (Recommended), 1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4 (Recommended), 1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3 (Recommended) | +| 5.1 <= TiDB < 5.4 | 1.4, 1.3 (Recommended), 1.2 | +| 3.0 <= TiDB < 5.1 | 1.4, 1.3 (Recommended), 1.2, 1.1 | +| 2.1 <= TiDB < v3.0| 1.0 (End of support) | + +## Manage TiDB clusters using TiDB Operator + +TiDB Operator provides several ways to deploy TiDB clusters on Kubernetes: + ++ For test environment: + + - [Get Started](get-started.md) using kind, Minikube, or the Google Cloud Shell + ++ For production environment: + + + On public cloud: + - [Deploy TiDB on AWS EKS](deploy-on-aws-eks.md) + - [Deploy TiDB on Google Cloud GKE](deploy-on-gcp-gke.md) + - [Deploy TiDB on Azure AKS](deploy-on-azure-aks.md) + - [Deploy TiDB on Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + + - In an existing Kubernetes cluster: + + First install TiDB Operator on a Kubernetes cluster according to [Deploy TiDB Operator on Kubernetes](deploy-tidb-operator.md), then deploy your TiDB clusters according to [Deploy TiDB on General Kubernetes](deploy-on-general-kubernetes.md). + + You also need to adjust the configuration of the Kubernetes cluster based on [Prerequisites for TiDB on Kubernetes](prerequisites.md) and configure the local PV for your Kubernetes cluster to achieve low latency of local storage for TiKV according to [Local PV Configuration](configure-storage-class.md#local-pv-configuration). + +Before deploying TiDB on any of the above two environments, you can always refer to [TiDB Cluster Configuration Document](configure-a-tidb-cluster.md) to customize TiDB configurations. + +After the deployment is complete, see the following documents to use, operate, and maintain TiDB clusters on Kubernetes: + ++ [Access the TiDB Cluster](access-tidb.md) ++ [Scale TiDB Cluster](scale-a-tidb-cluster.md) ++ [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) ++ [Change the Configuration of TiDB Cluster](configure-a-tidb-cluster.md) ++ [Back up and Restore a TiDB Cluster](backup-restore-overview.md) ++ [Automatic Failover](use-auto-failover.md) ++ [Monitor a TiDB Cluster on Kubernetes](monitor-a-tidb-cluster.md) ++ [View TiDB Logs on Kubernetes](view-logs.md) ++ [Maintain Kubernetes Nodes that Hold the TiDB Cluster](maintain-a-kubernetes-node.md) + +When a problem occurs and the cluster needs diagnosis, you can: + ++ See [TiDB FAQs on Kubernetes](faq.md) for any available solution; ++ See [Troubleshoot TiDB on Kubernetes](tips.md) to shoot troubles. + +TiDB on Kubernetes provides a dedicated command-line tool `tkctl` for cluster management and auxiliary diagnostics. Meanwhile, some of TiDB's tools are used differently on Kubernetes. You can: + ++ Use `tkctl` according to [`tkctl` Guide](use-tkctl.md ); ++ See [Tools on Kubernetes](tidb-toolkit.md) to understand how TiDB tools are used on Kubernetes. + +Finally, when a new version of TiDB Operator is released, you can refer to [Upgrade TiDB Operator](upgrade-tidb-operator.md) to upgrade to the latest version. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md new file mode 100644 index 00000000..e0e4ddce --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: What's New in TiDB Operator 1.5 +summary: Learn about new features in TiDB Operator 1.5.0. +--- + +# What's New in TiDB Operator 1.5 + +TiDB Operator 1.5 introduces the following key features, which helps you manage TiDB clusters and the tools more easily in terms of extensibility and usability. + +## Compatibility changes + +To use the `PreferDualStack` feature (enabled with `spec.preferIPv6: true`) introduced in [#4959](https://github.com/pingcap/tidb-operator/pull/4959), Kubernetes version >= v1.20 is required. + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## Extensibility + +- Support specifying an initialization SQL file to be executed during the first bootstrap of TiDB with the `bootstrapSQLConfigMapName` field. +- Support setting `PreferDualStack` for all Service's `ipFamilyPolicy` with `spec.preferIPv6: true`. +- Support managing TiCDC and TiProxy with [Advanced StatefulSet](advanced-statefulset.md). +- Add the BR Federation Manager component to support the backup and restore of a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots. + +## Usability + +- Support using the `tidb.pingcap.com/pd-transfer-leader` annotation to restart PD Pods gracefully. +- Support using the `tidb.pingcap.com/tidb-graceful-shutdown` annotation to restart TiDB Pods gracefully. +- Allow users to define a strategy to restart failed backup jobs, enhancing backup stability. +- Add metrics for the reconciler and worker queue to improve observability. +- Add metrics for counting errors that occur during the reconciliation to improve observability. diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/master/TOC.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/TOC.md new file mode 100644 index 00000000..fb1aa16b --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/TOC.md @@ -0,0 +1,1169 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB + - [TiDB Introduction](/overview.md) + - [TiDB 7.2 Release Notes](/releases/release-7.2.0.md) + - [Features](/basic-features.md) + - [MySQL Compatibility](/mysql-compatibility.md) + - [TiDB Limitations](/tidb-limitations.md) + - [Credits](/credits.md) + - [Roadmap](/tidb-roadmap.md) +- Quick Start + - [Try Out TiDB](/quick-start-with-tidb.md) + - [Try Out HTAP](/quick-start-with-htap.md) + - [Learn TiDB SQL](/basic-sql-operations.md) + - [Learn HTAP](/explore-htap.md) + - [Import Example Database](/import-example-data.md) +- Develop + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Serverless Cluster](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Example Applications + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - Connect to TiDB + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Data Using Time to Live](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Reference + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - Legacy Docs + - [For Django](/develop/dev-guide-outdated-for-django.md) + - Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) + - [ProxySQL Integration Guide](/develop/dev-guide-proxysql-integration.md) + - [Amazon AppFlow Integration Guide](/develop/dev-guide-aws-appflow-integration.md) +- Deploy + - [Software and Hardware Requirements](/hardware-and-software-requirements.md) + - [Environment Configuration Checklist](/check-before-deployment.md) + - Plan Cluster Topology + - [Minimal Topology](/minimal-deployment-topology.md) + - [TiFlash Topology](/tiflash-deployment-topology.md) + - [TiCDC Topology](/ticdc-deployment-topology.md) + - [TiDB Binlog Topology](/tidb-binlog-deployment-topology.md) + - [TiSpark Topology](/tispark-deployment-topology.md) + - [Cross-DC Topology](/geo-distributed-deployment-topology.md) + - [Hybrid Topology](/hybrid-deployment-topology.md) + - Install and Start + - [Use TiUP](/production-deployment-using-tiup.md) + - [Deploy on Kubernetes](/tidb-in-kubernetes.md) + - [Verify Cluster Status](/post-installation-check.md) + - Test Cluster Performance + - [Test TiDB Using Sysbench](/benchmark/benchmark-tidb-using-sysbench.md) + - [Test TiDB Using TPC-C](/benchmark/benchmark-tidb-using-tpcc.md) + - [Test TiDB Using CH-benCHmark](/benchmark/benchmark-tidb-using-ch.md) +- Migrate + - [Overview](/migration-overview.md) + - [Migration Tools](/migration-tools.md) + - Migration Scenarios + - [Migrate from Aurora](/migrate-aurora-to-tidb.md) + - [Migrate MySQL of Small Datasets](/migrate-small-mysql-to-tidb.md) + - [Migrate MySQL of Large Datasets](/migrate-large-mysql-to-tidb.md) + - [Migrate and Merge MySQL Shards of Small Datasets](/migrate-small-mysql-shards-to-tidb.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/migrate-large-mysql-shards-to-tidb.md) + - [Migrate from CSV Files](/migrate-from-csv-files-to-tidb.md) + - [Migrate from SQL Files](/migrate-from-sql-files-to-tidb.md) + - [Migrate from Parquet Files](/migrate-from-parquet-files-to-tidb.md) + - [Migrate from One TiDB Cluster to Another TiDB Cluster](/migrate-from-tidb-to-tidb.md) + - [Migrate from TiDB to MySQL-compatible Databases](/migrate-from-tidb-to-mysql.md) + - Advanced Migration + - [Continuous Replication with gh-ost or pt-osc](/migrate-with-pt-ghost.md) + - [Migrate to a Downstream Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Filter Binlog Events](/filter-binlog-event.md) + - [Filter DML Events Using SQL Expressions](/filter-dml-event.md) +- Integrate + - [Overview](/integration-overview.md) + - Integration Scenarios + - [Integrate with Confluent and Snowflake](/ticdc/integrate-confluent-using-ticdc.md) + - [Integrate with Apache Kafka and Apache Flink](/replicate-data-to-kafka.md) +- Maintain + - Upgrade + - [Use TiUP](/upgrade-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [TiDB Smooth Upgrade](/smooth-upgrade-tidb.md) + - [TiFlash v6.2.0 Upgrade Guide](/tiflash-620-upgrade-guide.md) + - Scale + - [Use TiUP (Recommended)](/scale-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - Backup and Restore + - [Overview](/br/backup-and-restore-overview.md) + - Architecture + - [Architecture Overview](/br/backup-and-restore-design.md) + - [Snapshot Backup and Restore Architecture](/br/br-snapshot-architecture.md) + - [Log Backup and PITR Architecture](/br/br-log-architecture.md) + - Use BR + - [Use Overview](/br/br-use-overview.md) + - [Snapshot Backup and Restore Guide](/br/br-snapshot-guide.md) + - [Log Backup and PITR Guide](/br/br-pitr-guide.md) + - [Use Cases](/br/backup-and-restore-use-cases.md) + - [Backup Storages](/br/backup-and-restore-storages.md) + - BR CLI Manuals + - [Overview](/br/use-br-command-line-tool.md) + - [Snapshot Backup and Restore Command Manual](/br/br-snapshot-manual.md) + - [Log Backup and PITR Command Manual](/br/br-pitr-manual.md) + - References + - BR Features + - [Backup Auto-Tune](/br/br-auto-tune.md) + - [Batch Create Table](/br/br-batch-create-table.md) + - [Checkpoint Backup](/br/br-checkpoint-backup.md) + - [Checkpoint Restore](/br/br-checkpoint-restore.md) + - [Back up and Restore Data Using Dumpling and TiDB Lightning](/backup-and-restore-using-dumpling-lightning.md) + - [Back Up and Restore RawKV](/br/rawkv-backup-and-restore.md) + - [Incremental Backup and Restore](/br/br-incremental-guide.md) + - Cluster Disaster Recovery (DR) + - [DR Solutions Overview](/dr-solution-introduction.md) + - [Primary-Secondary DR](/dr-secondary-cluster.md) + - [Multi-Replica Cluster DR](/dr-multi-replica.md) + - [BR-based DR](/dr-backup-restore.md) + - [Resource Control](/tidb-resource-control.md) + - [Configure Time Zone](/configure-time-zone.md) + - [Daily Checklist](/daily-check.md) + - [Maintain TiFlash](/tiflash/maintain-tiflash.md) + - [Maintain TiDB Using TiUP](/maintain-tidb-using-tiup.md) + - [Modify Configuration Dynamically](/dynamic-config.md) + - [Online Unsafe Recovery](/online-unsafe-recovery.md) + - [Replicate Data Between Primary and Secondary Clusters](/replicate-between-primary-and-secondary-clusters.md) +- Monitor and Alert + - [Monitoring Framework Overview](/tidb-monitoring-framework.md) + - [Monitoring API](/tidb-monitoring-api.md) + - [Deploy Monitoring Services](/deploy-monitoring-services.md) + - [Export Grafana Snapshots](/exporting-grafana-snapshots.md) + - [TiDB Cluster Alert Rules](/alert-rules.md) + - [TiFlash Alert Rules](/tiflash/tiflash-alert-rules.md) + - [Customize Configurations of Monitoring Servers](/tiup/customized-montior-in-tiup-environment.md) + - [BR Monitoring and Alert](/br/br-monitoring-and-alert.md) +- Troubleshoot + - Issue Summary + - [TiDB Troubleshooting Map](/tidb-troubleshooting-map.md) + - [Troubleshoot TiDB Cluster Setup](/troubleshoot-tidb-cluster.md) + - [Troubleshoot TiFlash](/tiflash/troubleshoot-tiflash.md) + - Issue Scenarios + - Slow Queries + - [Identify Slow Queries](/identify-slow-queries.md) + - [Analyze Slow Queries](/analyze-slow-queries.md) + - [TiDB OOM](/troubleshoot-tidb-oom.md) + - [Hotspot](/troubleshoot-hot-spot-issues.md) + - [Increased Read and Write Latency](/troubleshoot-cpu-issues.md) + - [Write Conflicts in Optimistic Transactions](/troubleshoot-write-conflicts.md) + - [High Disk I/O Usage](/troubleshoot-high-disk-io.md) + - [Lock Conflicts](/troubleshoot-lock-conflicts.md) + - [Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - Diagnostic Methods + - [SQL Diagnostics](/information-schema/information-schema-sql-diagnostics.md) + - [Statement Summary Tables](/statement-summary-tables.md) + - [Identify Expensive Queries Using Top SQL](/dashboard/top-sql.md) + - [Identify Expensive Queries Using Logs](/identify-expensive-queries.md) + - [Save and Restore the On-Site Information of a Cluster](/sql-plan-replayer.md) + - [Support Resources](/support.md) +- Performance Tuning + - Tuning Guide + - [Performance Tuning Overview](/performance-tuning-overview.md) + - [Performance Analysis and Tuning](/performance-tuning-methods.md) + - [Performance Tuning Practices for OLTP Scenarios](/performance-tuning-practices.md) + - [Latency Breakdown](/latency-breakdown.md) + - [TiDB Best Practices on Public Cloud](/best-practices-on-public-cloud.md) + - Configuration Tuning + - [Tune Operating System Performance](/tune-operating-system.md) + - [Tune TiDB Memory](/configure-memory-usage.md) + - [Tune TiKV Threads](/tune-tikv-thread-performance.md) + - [Tune TiKV Memory](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Tune Region Performance](/tune-region-performance.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - SQL Tuning + - [Overview](/sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [Derive TopN or Limit from Window Functions](/derive-topn-from-window.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Prepared Execution Plan Cache](/sql-prepared-plan-cache.md) + - [Non-Prepared Execution Plan Cache](/sql-non-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) +- Tutorials + - [Multiple Availability Zones in One Region Deployment](/multi-data-centers-in-one-city-deployment.md) + - [Three Availability Zones in Two Regions Deployment](/three-data-centers-in-two-cities-deployment.md) + - [Two Availability Zones in One Region Deployment](/two-data-centers-in-one-city-deployment.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - Best Practices + - [Use TiDB](/best-practices/tidb-best-practices.md) + - [Java Application Development](/best-practices/java-app-best-practices.md) + - [Use HAProxy](/best-practices/haproxy-best-practices.md) + - [Highly Concurrent Write](/best-practices/high-concurrency-best-practices.md) + - [Grafana Monitoring](/best-practices/grafana-monitor-best-practices.md) + - [PD Scheduling](/best-practices/pd-scheduling-best-practices.md) + - [TiKV Performance Tuning with Massive Regions](/best-practices/massive-regions-best-practices.md) + - [Three-node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) + - [Local Read Under Three Data Centers Deployment](/best-practices/three-dc-local-read.md) + - [Use UUIDs](/best-practices/uuid.md) + - [Read-Only Storage Nodes](/best-practices/readonly-nodes.md) + - [Use Placement Rules](/configure-placement-rules.md) + - [Use Load Base Split](/configure-load-base-split.md) + - [Use Store Limit](/configure-store-limit.md) + - [DDL Execution Principles and Best Practices](/ddl-introduction.md) +- TiDB Tools + - [Overview](/ecosystem-tool-user-guide.md) + - [Use Cases](/ecosystem-tool-user-case.md) + - [Download](/download-ecosystem-tools.md) + - TiUP + - [Documentation Map](/tiup/tiup-documentation-guide.md) + - [Overview](/tiup/tiup-overview.md) + - [Terminology and Concepts](/tiup/tiup-terminology-and-concepts.md) + - [Manage TiUP Components](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [Troubleshooting Guide](/tiup/tiup-troubleshooting-guide.md) + - Command Reference + - [Overview](/tiup/tiup-reference.md) + - TiUP Commands + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [Overview](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster Commands + - [Overview](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM Commands + - [Overview](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB Cluster Topology Reference](/tiup/tiup-cluster-topology-reference.md) + - [DM Cluster Topology Reference](/tiup/tiup-dm-topology-reference.md) + - [Mirror Reference Guide](/tiup/tiup-mirror-reference.md) + - TiUP Components + - [tiup-playground](/tiup/tiup-playground.md) + - [tiup-cluster](/tiup/tiup-cluster.md) + - [tiup-mirror](/tiup/tiup-mirror.md) + - [tiup-bench](/tiup/tiup-bench.md) + - [TiDB Operator](/tidb-operator-overview.md) + - TiDB Data Migration + - [About TiDB Data Migration](/dm/dm-overview.md) + - [Architecture](/dm/dm-arch.md) + - [Quick Start](/dm/quick-start-with-dm.md) + - [Best Practices](/dm/dm-best-practices.md) + - Deploy a DM cluster + - [Hardware and Software Requirements](/dm/dm-hardware-and-software-requirements.md) + - [Use TiUP (Recommended)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [Use TiUP Offline](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [Use Binary](/dm/deploy-a-dm-cluster-using-binary.md) + - [Use Kubernetes](https://docs.pingcap.com/tidb-in-kubernetes/dev/deploy-tidb-dm) + - Tutorials + - [Create a Data Source](/dm/quick-start-create-source.md) + - [Manage Data Sources](/dm/dm-manage-source.md) + - [Configure Tasks](/dm/dm-task-configuration-guide.md) + - [Shard Merge](/dm/dm-shard-merge.md) + - [Table Routing](/dm/dm-table-routing.md) + - [Block and Allow Lists](/dm/dm-block-allow-table-lists.md) + - [Binlog Event Filter](/dm/dm-binlog-event-filter.md) + - [Filter DMLs Using SQL Expressions](/dm/feature-expression-filter.md) + - [Online DDL Tool Support](/dm/dm-online-ddl-tool-support.md) + - Manage a Data Migration Task + - [Precheck a Task](/dm/dm-precheck.md) + - [Create a Task](/dm/dm-create-task.md) + - [Query Status](/dm/dm-query-status.md) + - [Pause a Task](/dm/dm-pause-task.md) + - [Resume a Task](/dm/dm-resume-task.md) + - [Stop a Task](/dm/dm-stop-task.md) + - Advanced Tutorials + - Merge and Migrate Data from Sharded Tables + - [Overview](/dm/feature-shard-merge.md) + - [Pessimistic Mode](/dm/feature-shard-merge-pessimistic.md) + - [Optimistic Mode](/dm/feature-shard-merge-optimistic.md) + - [Manually Handle Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [Migrate from MySQL Databases that Use GH-ost/PT-osc](/dm/feature-online-ddl.md) + - [Migrate Data to a Downstream TiDB Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Continuous Data Validation](/dm/dm-continuous-data-validation.md) + - Maintain + - Cluster Upgrade + - [Maintain DM Clusters Using TiUP (Recommended)](/dm/maintain-dm-using-tiup.md) + - [Manually Upgrade from v1.0.x to v2.0+](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - Tools + - [Manage Using WebUI](/dm/dm-webui-guide.md) + - [Manage Using dmctl](/dm/dmctl-introduction.md) + - Performance Tuning + - [Benchmarks](/dm/dm-benchmark-v5.4.0.md) + - [Optimize Configurations](/dm/dm-tune-configuration.md) + - [Test DM Performance](/dm/dm-performance-test.md) + - [Handle Performance Issues](/dm/dm-handle-performance-issues.md) + - Manage Data Sources + - [Switch the MySQL Instance to Be Migrated](/dm/usage-scenario-master-slave-switch.md) + - Manage Tasks + - [Handle Failed DDL Statements](/dm/handle-failed-ddl-statements.md) + - [Manage Schemas of Tables to be Migrated](/dm/dm-manage-schema.md) + - [Export and Import Data Sources and Task Configurations of Clusters](/dm/dm-export-import-config.md) + - [Handle Alerts](/dm/dm-handle-alerts.md) + - [Daily Check](/dm/dm-daily-check.md) + - Reference + - Architecture + - [DM-worker](/dm/dm-worker-intro.md) + - [Safe Mode](/dm/dm-safe-mode.md) + - [Relay Log](/dm/relay-log.md) + - [DDL Handling](/dm/dm-ddl-compatible.md) + - Mechanism + - [DML Replication Mechanism](/dm/dm-replication-logic.md) + - Command Line + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - Configuration Files + - [Overview](/dm/dm-config-overview.md) + - [Upstream Database Configurations](/dm/dm-source-configuration-file.md) + - [Task Configurations](/dm/task-configuration-file-full.md) + - [DM-master Configuration](/dm/dm-master-configuration-file.md) + - [DM-worker Configuration](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [Compatibility Catalog](/dm/dm-compatibility-catalog.md) + - Secure + - [Enable TLS for DM Connections](/dm/dm-enable-tls.md) + - [Generate Self-signed Certificates](/dm/dm-generate-self-signed-certificates.md) + - Monitoring and Alerts + - [Monitoring Metrics](/dm/monitor-a-dm-cluster.md) + - [Alert Rules](/dm/dm-alert-rules.md) + - [Error Codes](/dm/dm-error-handling.md#handle-common-errors) + - [Glossary](/dm/dm-glossary.md) + - Example + - [Migrate Data Using DM](/dm/migrate-data-using-dm.md) + - [Create a Data Migration Task](/dm/quick-start-create-task.md) + - [Best Practices of Data Migration in the Shard Merge Scenario](/dm/shard-merge-best-practices.md) + - Troubleshoot + - [FAQ](/dm/dm-faq.md) + - [Handle Errors](/dm/dm-error-handling.md) + - [Release Notes](/dm/dm-release-notes.md) + - TiDB Lightning + - [Overview](/tidb-lightning/tidb-lightning-overview.md) + - [Get Started](/get-started-with-tidb-lightning.md) + - [Deploy TiDB Lightning](/tidb-lightning/deploy-tidb-lightning.md) + - [Target Database Requirements](/tidb-lightning/tidb-lightning-requirements.md) + - Data Sources + - [Data Match Rules](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [Customized File](/tidb-lightning/tidb-lightning-data-source.md#match-customized-files) + - Physical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [Use Physical Import Mode](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - Logical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [Use Logical Import Mode](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - [Prechecks](/tidb-lightning/tidb-lightning-prechecks.md) + - [Table Filter](/table-filter.md) + - [Checkpoints](/tidb-lightning/tidb-lightning-checkpoints.md) + - [Import Data in Parallel](/tidb-lightning/tidb-lightning-distributed-import.md) + - [Error Resolution](/tidb-lightning/tidb-lightning-error-resolution.md) + - [Troubleshooting](/tidb-lightning/troubleshoot-tidb-lightning.md) + - Reference + - [Configuration File](/tidb-lightning/tidb-lightning-configuration.md) + - [Command Line Flags](/tidb-lightning/tidb-lightning-command-line-full.md) + - [Monitoring](/tidb-lightning/monitor-tidb-lightning.md) + - [Web Interface](/tidb-lightning/tidb-lightning-web-interface.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [Glossary](/tidb-lightning/tidb-lightning-glossary.md) + - [Dumpling](/dumpling-overview.md) + - TiCDC + - [Overview](/ticdc/ticdc-overview.md) + - [Deploy and Maintain](/ticdc/deploy-ticdc.md) + - Changefeed + - [Overview](/ticdc/ticdc-changefeed-overview.md) + - Create Changefeeds + - [Replicate Data to MySQL-compatible Databases](/ticdc/ticdc-sink-to-mysql.md) + - [Replicate Data to Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [Replicate Data to Storage Services](/ticdc/ticdc-sink-to-cloud-storage.md) + - [Manage Changefeeds](/ticdc/ticdc-manage-changefeed.md) + - [Log Filter](/ticdc/ticdc-filter.md) + - [Bidirectional Replication](/ticdc/ticdc-bidirectional-replication.md) + - [Data Integrity Validation for Single-Row Data](/ticdc/ticdc-integrity-check.md) + - Monitor and Alert + - [Monitoring Metrics Summary](/ticdc/ticdc-summary-monitor.md) + - [Monitoring Metrics Details](/ticdc/monitor-ticdc.md) + - [Alert Rules](/ticdc/ticdc-alert-rules.md) + - Reference + - [Architecture](/ticdc/ticdc-architecture.md) + - [TiCDC Server Configurations](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed Configurations](/ticdc/ticdc-changefeed-config.md) + - Output Protocols + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API v2](/ticdc/ticdc-open-api-v2.md) + - [TiCDC Open API v1](/ticdc/ticdc-open-api.md) + - [Guide for Developing a Storage Sink Consumer](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [Compatibility](/ticdc/ticdc-compatibility.md) + - [Troubleshoot](/ticdc/troubleshoot-ticdc.md) + - [FAQs](/ticdc/ticdc-faq.md) + - [Glossary](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [Overview](/tidb-binlog/tidb-binlog-overview.md) + - [Quick Start](/tidb-binlog/get-started-with-tidb-binlog.md) + - [Deploy](/tidb-binlog/deploy-tidb-binlog.md) + - [Maintain](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [Configure](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [Upgrade](/tidb-binlog/upgrade-tidb-binlog.md) + - [Monitor](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [Reparo](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl](/tidb-binlog/binlog-control.md) + - [Binlog Consumer Client](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [Bidirectional Replication Between TiDB Clusters](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [Glossary](/tidb-binlog/tidb-binlog-glossary.md) + - Troubleshoot + - [Troubleshoot](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [Handle Errors](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - PingCAP Clinic Diagnostic Service + - [Overview](/clinic/clinic-introduction.md) + - [Quick Start](/clinic/quick-start-with-clinic.md) + - [Troubleshoot Clusters Using PingCAP Clinic](/clinic/clinic-user-guide-for-tiup.md) + - [PingCAP Clinic Diagnostic Data](/clinic/clinic-data-instruction-for-tiup.md) + - TiSpark + - [User Guide](/tispark-overview.md) + - sync-diff-inspector + - [Overview](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [Data Check for Tables with Different Schema/Table Names](/sync-diff-inspector/route-diff.md) + - [Data Check in the Sharding Scenario](/sync-diff-inspector/shard-diff.md) + - [Data Check for TiDB Upstream/Downstream Clusters](/sync-diff-inspector/upstream-downstream-diff.md) + - [Data Check in the DM Replication Scenario](/sync-diff-inspector/dm-diff.md) +- Reference + - Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - Storage Engine - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - [Titan Overview](/storage-engine/titan-overview.md) + - [Titan Configuration](/storage-engine/titan-configuration.md) + - [Partitioned Raft KV](/partitioned-raft-kv.md) + - Storage Engine - TiFlash + - [Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Use TiDB to Read TiFlash Replicas](/tiflash/use-tidb-to-read-tiflash.md) + - [Use TiSpark to Read TiFlash Replicas](/tiflash/use-tispark-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Use FastScan](/tiflash/use-fastscan.md) + - [Disaggregated Storage and Compute Architecture and S3 Support](/tiflash/tiflash-disaggregated-and-s3.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [TiFlash Late Materialization](/tiflash/tiflash-late-materialization.md) + - [Spill to Disk](/tiflash/tiflash-spill-disk.md) + - [Data Validation](/tiflash/tiflash-data-validation.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Pipeline Execution Model](/tiflash/tiflash-pipeline-model.md) + - [System Variables](/system-variables.md) + - Configuration File Parameters + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - Command Line Flags + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - Key Monitoring Metrics + - [Overview](/grafana-overview-dashboard.md) + - [Performance Overview](/grafana-performance-overview-dashboard.md) + - [TiDB](/grafana-tidb-dashboard.md) + - [PD](/grafana-pd-dashboard.md) + - [TiKV](/grafana-tikv-dashboard.md) + - [TiFlash](/tiflash/monitor-tiflash.md) + - [TiCDC](/ticdc/monitor-ticdc.md) + - [Resource Control](/grafana-resource-control-dashboard.md) + - Security + - [Enable TLS Between TiDB Clients and Servers](/enable-tls-between-clients-and-servers.md) + - [Enable TLS Between TiDB Components](/enable-tls-between-components.md) + - [Generate Self-signed Certificates](/generate-self-signed-certificates.md) + - [Encryption at Rest](/encryption-at-rest.md) + - [Enable Encryption for Disk Spill](/enable-disk-spill-encrypt.md) + - [Log Redaction](/log-redaction.md) + - Privileges + - [Security Compatibility with MySQL](/security-compatibility-with-mysql.md) + - [Privilege Management](/privilege-management.md) + - [User Account Management](/user-account-management.md) + - [TiDB Password Management](/password-management.md) + - [Role-Based Access Control](/role-based-access-control.md) + - [Certificate-Based Authentication](/certificate-authentication.md) + - SQL + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER RESOURCE GROUP`](/sql-statements/sql-statement-alter-resource-group.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CALIBRATE RESOURCE`](/sql-statements/sql-statement-calibrate-resource.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE RESOURCE GROUP`](/sql-statements/sql-statement-create-resource-group.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP RESOURCE GROUP`](/sql-statements/sql-statement-drop-resource-group.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET RESOURCE GROUP`](/sql-statements/sql-statement-set-resource-group.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE RESOURCE GROUP`](/sql-statements/sql-statement-show-create-resource-group.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Comparisons between Functions and Syntax of Oracle and TiDB](/oracle-functions-to-tidb.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - [FOREIGN KEY Constraints](/foreign-key.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`RESOURCE_GROUPS`](/information-schema/information-schema-resource-groups.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [Metadata Lock](/metadata-lock.md) + - UI + - TiDB Dashboard + - [Overview](/dashboard/dashboard-intro.md) + - Maintain + - [Deploy](/dashboard/dashboard-ops-deploy.md) + - [Reverse Proxy](/dashboard/dashboard-ops-reverse-proxy.md) + - [User Management](/dashboard/dashboard-user.md) + - [Secure](/dashboard/dashboard-ops-security.md) + - [Access](/dashboard/dashboard-access.md) + - [Overview Page](/dashboard/dashboard-overview.md) + - [Cluster Info Page](/dashboard/dashboard-cluster-info.md) + - [Top SQL Page](/dashboard/top-sql.md) + - [Key Visualizer Page](/dashboard/dashboard-key-visualizer.md) + - [Metrics Relation Graph](/dashboard/dashboard-metrics-relation.md) + - SQL Statements Analysis + - [SQL Statements Page](/dashboard/dashboard-statement-list.md) + - [SQL Details Page](/dashboard/dashboard-statement-details.md) + - [Slow Queries Page](/dashboard/dashboard-slow-query.md) + - Cluster Diagnostics + - [Access Cluster Diagnostics Page](/dashboard/dashboard-diagnostics-access.md) + - [View Diagnostics Report](/dashboard/dashboard-diagnostics-report.md) + - [Use Diagnostics](/dashboard/dashboard-diagnostics-usage.md) + - [Monitoring Page](/dashboard/dashboard-monitoring.md) + - [Search Logs Page](/dashboard/dashboard-log-search.md) + - [Resource Manager Page](/dashboard/dashboard-resource-manager.md) + - Instance Profiling + - [Manual Profiling](/dashboard/dashboard-profiling.md) + - [Continuous Profiling](/dashboard/continuous-profiling.md) + - Session Management and Configuration + - [Share Session](/dashboard/dashboard-session-share.md) + - [Configure SSO](/dashboard/dashboard-session-sso.md) + - [FAQ](/dashboard/dashboard-faq.md) + - [Telemetry](/telemetry.md) + - [Errors Codes](/error-codes.md) + - [Table Filter](/table-filter.md) + - [Schedule Replicas by Topology Labels](/schedule-replicas-by-topology-labels.md) + - Internal Components + - [TiDB Backend Task Distributed Execution Framework](/tidb-distributed-execution-framework.md) +- FAQs + - [FAQ Summary](/faq/faq-overview.md) + - [TiDB FAQs](/faq/tidb-faq.md) + - [SQL FAQs](/faq/sql-faq.md) + - [Deployment FAQs](/faq/deploy-and-maintain-faq.md) + - [Migration FAQs](/faq/migration-tidb-faq.md) + - [Upgrade FAQs](/faq/upgrade-faq.md) + - [Monitoring FAQs](/faq/monitor-faq.md) + - [Cluster Management FAQs](/faq/manage-cluster-faq.md) + - [High Availability FAQs](/faq/high-availability-faq.md) + - [High Reliability FAQs](/faq/high-reliability-faq.md) + - [Backup and Restore FAQs](/faq/backup-and-restore-faq.md) +- Release Notes + - [All Releases](/releases/release-notes.md) + - [Release Timeline](/releases/release-timeline.md) + - [TiDB Versioning](/releases/versioning.md) + - [TiDB Installation Packages](/binary-package.md) + - v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) + - v7.1 + - [7.1.0](/releases/release-7.1.0.md) + - v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) + - v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) + - v6.5 + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0.8](/releases/release-1.0.8.md) + - [1.0.7](/releases/release-1.0.7.md) + - [1.0.6](/releases/release-1.0.6.md) + - [1.0.5](/releases/release-1.0.5.md) + - [1.0.4](/releases/release-1.0.4.md) + - [1.0.3](/releases/release-1.0.3.md) + - [1.0.2](/releases/release-1.0.2.md) + - [1.0.1](/releases/release-1.0.1.md) + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [Glossary](/glossary.md) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-7.2.0.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-7.2.0.md new file mode 100644 index 00000000..15d25c97 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-7.2.0.md @@ -0,0 +1,328 @@ +--- +title: TiDB 7.2.0 Release Notes +summary: Learn about the new features, compatibility changes, improvements, and bug fixes in TiDB 7.2.0. +--- + +# TiDB 7.2.0 Release Notes + +Release date: June 29, 2023 + +TiDB version: 7.2.0 + +Quick access: [Quick start](https://docs.pingcap.com/tidb/v7.2/quick-start-with-tidb) | [Installation packages](https://www.pingcap.com/download/?version=v7.2.0#version-list) + +7.2.0 introduces the following key features and improvements: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryFeatureDescription
Scalability and PerformanceResource groups support managing runaway queries (experimental)You can now manage query timeout with more granularity, allowing for different behaviors based on query classifications. Queries meeting your specified threshold can be deprioritized or terminated. +
TiFlash supports the pipeline execution model (experimental)TiFlash supports a pipeline execution model to optimize thread resource control.
SQLSupport a new SQL statement, IMPORT INTO, to enable data import using the TiDB service, itself (experimental)To simplify the deployment and maintenance of TiDB Lightning, TiDB introduces a new SQL statement IMPORT INTO, which integrates physical import mode of TiDB Lightning, including remote import from Amazon S3 or Google Cloud Storage (GCS) directly into TiDB.
DB Operations and ObservabilityDDL supports pause and resume operations (experimental)This new capability lets you temporarily suspend resource-intensive DDL operations, such as index creation, to conserve resources and minimize the impact on online traffic. You can seamlessly resume these operations when ready, without the need to cancel and restart. This feature enhances resource utilization, improves user experience, and streamlines schema changes.
+ +## Feature details + +### Performance + +* Support pushing down the following two [window functions](/tiflash/tiflash-supported-pushdown-calculations.md) to TiFlash [#7427](https://github.com/pingcap/tiflash/issues/7427) @[xzhangxian1008](https://github.com/xzhangxian1008) + + * `FIRST_VALUE` + * `LAST_VALUE` + +* TiFlash supports the pipeline execution model (experimental) [#6518](https://github.com/pingcap/tiflash/issues/6518) @[SeaRise](https://github.com/SeaRise) + + Prior to v7.2.0, each task in the TiFlash engine must individually request thread resources during execution. TiFlash controls the number of tasks to limit thread resource usage and prevent overuse, but this issue could not be completely eliminated. To address this problem, starting from v7.2.0, TiFlash introduces a pipeline execution model. This model centrally manages all thread resources and schedules task execution uniformly, maximizing the utilization of thread resources while avoiding resource overuse. To enable or disable the pipeline execution model, modify the [`tidb_enable_tiflash_pipeline_model`](/system-variables.md#tidb_enable_tiflash_pipeline_model-new-in-v720) system variable. + + For more information, see [documentation](/tiflash/tiflash-pipeline-model.md). + +* TiFlash reduces the latency of schema replication [#7630](https://github.com/pingcap/tiflash/issues/7630) @[hongyunyan](https://github.com/hongyunyan) + + When the schema of a table changes, TiFlash needs to replicate the latest schema from TiKV in a timely manner. Before v7.2.0, when TiFlash accesses table data and detects a table schema change within a database, TiFlash needs to replicate the schemas of all tables in this database again, including those tables without TiFlash replicas. As a result, in a database with a large number of tables, even if you only need to read data from a single table using TiFlash, you might experience significant latency to wait for TiFlash to complete the schema replication of all tables. + + In v7.2.0, TiFlash optimizes the schema replication mechanism and supports only replicating schemas of tables with TiFlash replicas. When a schema change is detected for a table with TiFlash replicas, TiFlash only replicates the schema of that table, which reduces the latency of schema replication of TiFlash and minimizes the impact of DDL operations on TiFlash data replication. This optimization is automatically applied and does not require any manual configuration. + +* Improve the performance of statistics collection [#44725](https://github.com/pingcap/tidb/issues/44725) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + + TiDB v7.2.0 optimizes the statistics collection strategy, skipping some of the duplicate information and information that is of little value to the optimizer. The overall speed of statistics collection has been improved by 30%. This improvement allows TiDB to update the statistics of the database in a more timely manner, making the generated execution plans more accurate, thus improving the overall database performance. + + By default, statistics collection skips the columns of the `JSON`, `BLOB`, `MEDIUMBLOB`, and `LONGBLOB` types. You can modify the default behavior by setting the [`tidb_analyze_skip_column_types`](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720) system variable. TiDB supports skipping the `JSON`, `BLOB`, and `TEXT` types and their subtypes. + + For more information, see [documentation](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720). + +* Improve the performance of checking data and index consistency [#43693](https://github.com/pingcap/tidb/issues/43693) @[wjhuang2016](https://github.com/wjhuang2016) + + The [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) statement is used to check the consistency between data in a table and its corresponding indexes. In v7.2.0, TiDB optimizes the method for checking data consistency and improves the execution efficiency of [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) greatly. In scenarios with large amounts of data, this optimization can provide a performance boost of hundreds of times. + + The optimization is enabled by default ([`tidb_enable_fast_table_check`](/system-variables.md#tidb_enable_fast_table_check-new-in-v720) is `ON` by default) to greatly reduce the time required for data consistency checks in large-scale tables and enhance operational efficiency. + + For more information, see [documentation](/system-variables.md#tidb_enable_fast_table_check-new-in-v720). + +### Reliability + +* Automatically manage queries that consume more resources than expected (experimental) [#43691](https://github.com/pingcap/tidb/issues/43691) @[Connor1996](https://github.com/Connor1996) @[CabinfeverB](https://github.com/CabinfeverB) @[glorv](https://github.com/glorv) @[HuSharp](https://github.com/HuSharp) @[nolouch](https://github.com/nolouch) + + The most common challenge to database stability is the degradation of overall database performance caused by abrupt SQL performance problems. There are many causes for SQL performance issues, such as new SQL statements that have not been fully tested, drastic changes in data volume, and abrupt changes in execution plans. These issues are difficult to completely avoid at the root. TiDB v7.2.0 provides the ability to manage queries that consume more resources than expected. This feature can quickly reduce the scope of impact when a performance issue occurs. + + To manage these queries, you can set the maximum execution time of queries for a resource group. When the execution time of a query exceeds this limit, the query is automatically deprioritized or cancelled. You can also set a period of time to immediately match identified queries by text or execution plan. This helps prevent high concurrency of the problematic queries during the identification phase that could consume more resources than expected. + + Automatic management of queries that consume more resources than expected provides you with an effective means to quickly respond to unexpected query performance problems. This feature can reduce the impact of the problem on overall database performance, thereby improving database stability. + + For more information, see [documentation](/tidb-resource-control.md#manage-queries-that-consume-more-resources-than-expected-runaway-queries). + +* Enhance the capability of creating a binding according to a historical execution plan [#39199](https://github.com/pingcap/tidb/issues/39199) @[qw4990](https://github.com/qw4990) + + TiDB v7.2.0 enhances the capability of [creating a binding according to a historical execution plan](/sql-plan-management.md#create-a-binding-according-to-a-historical-execution-plan). This feature improves the parsing and binding process for complex statements, making the bindings more stable, and supports the following new hints: + + - [`AGG_TO_COP()`](/optimizer-hints.md#agg_to_cop) + - [`LIMIT_TO_COP()`](/optimizer-hints.md#limit_to_cop) + - [`ORDER_INDEX`](/optimizer-hints.md#order_indext1_name-idx1_name--idx2_name-) + - [`NO_ORDER_INDEX()`](/optimizer-hints.md#no_order_indext1_name-idx1_name--idx2_name-) + + For more information, see [documentation](/sql-plan-management.md). + +* Introduce the Optimizer Fix Controls mechanism to provide fine-grained control over optimizer behaviors [#43169](https://github.com/pingcap/tidb/issues/43169) @[time-and-fate](https://github.com/time-and-fate) + + To generate more reasonable execution plans, the behavior of the TiDB optimizer evolves over product iterations. However, in some particular scenarios, the changes might lead to performance regression. TiDB v7.2.0 introduces Optimizer Fix Controls to let you control some of the fine-grained behaviors of the optimizer. This enables you to roll back or control some new changes. + + Each controllable behavior is described by a GitHub issue corresponding to the fix number. All controllable behaviors are listed in [Optimizer Fix Controls](/optimizer-fix-controls.md). You can set a target value for one or more behaviors by setting the [`tidb_opt_fix_control`](/system-variables.md#tidb_opt_fix_control-new-in-v710) system variable to achieve behavior control. + + The Optimizer Fix Controls mechanism helps you control the TiDB optimizer at a granular level. It provides a new means of fixing performance issues caused by the upgrade process and improves the stability of TiDB. + + For more information, see [documentation](/optimizer-fix-controls.md). + +* Lightweight statistics initialization becomes generally available (GA) [#42160](https://github.com/pingcap/tidb/issues/42160) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + + Starting from v7.2.0, the lightweight statistics initialization feature becomes GA. Lightweight statistics initialization can significantly reduce the number of statistics that must be loaded during startup, thus improving the speed of loading statistics. This feature increases the stability of TiDB in complex runtime environments and reduces the impact on the overall service when TiDB nodes restart. + + For newly created clusters of v7.2.0 or later versions, TiDB loads lightweight statistics by default during TiDB startup and will wait for the loading to finish before providing services. For clusters upgraded from earlier versions, you can set the TiDB configuration items [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710) and [`force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v710) to `true` to enable this feature. + + For more information, see [documentation](/statistics.md#load-statistics). + +### SQL + +* Support the `CHECK` constraints [#41711](https://github.com/pingcap/tidb/issues/41711) @[fzzf678](https://github.com/fzzf678) + + Starting from v7.2.0, you can use `CHECK` constraints to restrict the values of one or more columns in a table to meet your specified conditions. When a `CHECK` constraint is added to a table, TiDB checks whether the constraint is satisfied before inserting or updating data in the table. Only the data that satisfies the constraint can be written. + + This feature is disabled by default. You can set the [`tidb_enable_check_constraint`](/system-variables.md#tidb_enable_check_constraint-new-in-v720) system variable to `ON` to enable it. + + For more information, see [documentation](/constraints.md#check). + +### DB operations + +* DDL jobs support pause and resume operations (experimental) [#18015](https://github.com/pingcap/tidb/issues/18015) @[godouxm](https://github.com/godouxm) + + Before TiDB v7.2.0, when a DDL job encounters a business peak during execution, you can only manually cancel the DDL job to reduce its impact on the business. In v7.2.0, TiDB introduces pause and resume operations for DDL jobs. These operations let you pause DDL jobs during a peak and resume them after the peak ends, thus avoiding impact on your application workloads. + + For example, you can pause and resume multiple DDL jobs using `ADMIN PAUSE DDL JOBS` or `ADMIN RESUME DDL JOBS`: + + ```sql + ADMIN PAUSE DDL JOBS 1,2; + ADMIN RESUME DDL JOBS 1,2; + ``` + + For more information, see [documentation](/ddl-introduction.md#ddl-related-commands). + +### Data migration + +* Introduce a new SQL statement `IMPORT INTO` to improve data import efficiency greatly (experimental) [#42930](https://github.com/pingcap/tidb/issues/42930) @[D3Hunter](https://github.com/D3Hunter) + + The `IMPORT INTO` statement integrates the [Physical Import Mode](/tidb-lightning/tidb-lightning-physical-import-mode.md) capability of TiDB Lightning. With this statement, you can quickly import data in formats such as CSV, SQL, and PARQUET into an empty table in TiDB. This import method eliminates the need for a separate deployment and management of TiDB Lightning, thereby reducing the complexity of data import and greatly improving import efficiency. + + For data files stored in Amazon S3 or GCS, when the [Backend task distributed execution framework](/tidb-distributed-execution-framework.md) is enabled, `IMPORT INTO` also supports splitting a data import job into multiple sub-jobs and scheduling them to multiple TiDB nodes for parallel import, which further enhances import performance. + + For more information, see [documentation](/sql-statements/sql-statement-import-into.md). + +* TiDB Lightning supports importing source files with the Latin-1 character set into TiDB [#44434](https://github.com/pingcap/tidb/issues/44434) @[lance6716](https://github.com/lance6716) + + With this feature, you can directly import source files with the Latin-1 character set into TiDB using TiDB Lightning. Before v7.2.0, importing such files requires your additional preprocessing or conversion. Starting from v7.2.0, you only need to specify `character-set = "latin1"` when configuring the TiDB Lightning import task. Then, TiDB Lightning automatically handles the character set conversion during the import process to ensure data integrity and accuracy. + + For more information, see [documentation](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task). + +## Compatibility changes + +> **Note:** +> +> This section provides compatibility changes you need to know when you upgrade from v7.1.0 to the current version (v7.2.0). If you are upgrading from v7.0.0 or earlier versions to the current version, you might also need to check the compatibility changes introduced in intermediate versions. + +### System variables + +| Variable name | Change type | Description | +|--------|------------------------------|------| +| [`last_insert_id`](/system-variables.md#last_insert_id) | Modified | Changes the maximum value from `9223372036854775807` to `18446744073709551615` to be consistent with that of MySQL. | +| [`tidb_enable_non_prepared_plan_cache`](/system-variables.md#tidb_enable_non_prepared_plan_cache) | Modified | Changes the default value from `OFF` to `ON` after further tests, meaning that non-prepared execution plan cache is enabled. | +| [`tidb_remove_orderby_in_subquery`](/system-variables.md#tidb_remove_orderby_in_subquery-new-in-v610) | Modified | Changes the default value from `OFF` to `ON` after further tests, meaning that the optimizer removes the `ORDER BY` clause in a subquery. | +| [`tidb_analyze_skip_column_types`](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720) | Newly added | Controls which types of columns are skipped for statistics collection when executing the `ANALYZE` command to collect statistics. The variable is only applicable for [`tidb_analyze_version = 2`](/system-variables.md#tidb_analyze_version-new-in-v510). When using the syntax of `ANALYZE TABLE t COLUMNS c1, ..., cn`, if the type of a specified column is included in `tidb_analyze_skip_column_types`, the statistics of this column will not be collected. | +| [`tidb_enable_check_constraint`](/system-variables.md#tidb_enable_check_constraint-new-in-v720) | Newly added | Controls whether to enable `CHECK` constraints. The default value is `OFF`, which means this feature is disabled. | +| [`tidb_enable_fast_table_check`](/system-variables.md#tidb_enable_fast_table_check-new-in-v720) | Newly added | Controls whether to use a checksum-based approach to quickly check the consistency of data and indexes in a table. The default value is `ON`, which means this feature is enabled. | +| [`tidb_enable_tiflash_pipeline_model`](/system-variables.md#tidb_enable_tiflash_pipeline_model-new-in-v720) | Newly added | Controls whether to enable the new execution model of TiFlash, the [pipeline model](/tiflash/tiflash-pipeline-model.md). The default value is `OFF`, which means the pipeline model is disabled. | +| [`tidb_expensive_txn_time_threshold`](/system-variables.md#tidb_expensive_txn_time_threshold-new-in-v720) | Newly added | Controls the threshold for logging expensive transactions, which is 600 seconds by default. When the duration of a transaction exceeds the threshold, and the transaction is neither committed nor rolled back, it is considered an expensive transaction and will be logged. | + +### Configuration file parameters + +| Configuration file | Configuration parameter | Change type | Description | +| -------- | -------- | -------- | -------- | +| TiDB | [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710) | Modified | Changes the default value from `false` to `true` after further tests, meaning that TiDB uses lightweight statistics initialization by default during TiDB startup to improve the initialization efficiency. | +| TiDB | [`force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v710) | Modified | Changes the default value from `false` to `true` to align with [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710), meaning that TiDB waits for statistics initialization to finish before providing services during TiDB startup. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].compaction-guard-min-output-file-size](/tikv-configuration-file.md#compaction-guard-min-output-file-size) | Modified | Changes the default value from `"8MB"` to `"1MB"` to reduce the data volume of compaction tasks in RocksDB. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].optimize-filters-for-memory](/tikv-configuration-file.md#optimize-filters-for-memory-new-in-v720) | Newly added | Controls whether to generate Bloom/Ribbon filters that minimize memory internal fragmentation. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].periodic-compaction-seconds](/tikv-configuration-file.md#periodic-compaction-seconds-new-in-v720) | Newly added | Controls the time interval for periodic compaction. SST files with updates older than this value will be selected for compaction and rewritten to the same level where these SST files originally reside. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].ribbon-filter-above-level](/tikv-configuration-file.md#ribbon-filter-above-level-new-in-v720) | Newly added | Controls whether to use Ribbon filters for levels greater than or equal to this value and use non-block-based bloom filters for levels less than this value. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].ttl](/tikv-configuration-file.md#ttl-new-in-v720) | Newly added | SST files with updates older than the TTL will be automatically selected for compaction. | +| TiDB Lightning | `send-kv-pairs` | Deprecated | Starting from v7.2.0, the parameter `send-kv-pairs` is deprecated. You can use [`send-kv-size`](/tidb-lightning/tidb-lightning-configuration.md) to control the maximum size of one request when sending data to TiKV in physical import mode. | +| TiDB Lightning | [`character-set`](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task) | Modified | Introduces a new value option `latin1` for the supported character sets of data import. You can use this option to import source files with the Latin-1 character set. | +| TiDB Lightning | [`send-kv-size`](/tidb-lightning/tidb-lightning-configuration.md) | Newly added | Specify the maximum size of one request when sending data to TiKV in physical import mode. When the size of key-value pairs reaches the specified threshold, TiDB Lightning will immediately send them to TiKV. This avoids the OOM problems caused by TiDB Lightning nodes accumulating too many key-value pairs in memory when importing large wide tables. By adjusting this parameter, you can find a balance between memory usage and import speed, improving the stability and efficiency of the import process. | +| Data Migration | [`strict-optimistic-shard-mode`](/dm/feature-shard-merge-optimistic.md) | Newly added | This configuration item is used to be compatible with the DDL shard merge behavior in TiDB Data Migration v2.0. You can enable this configuration item in optimistic mode. After this is enabled, the replication task will be interrupted when it encounters a Type 2 DDL statement. In scenarios where there are dependencies between DDL changes in multiple tables, a timely interruption can be made. You need to manually process the DDL statements of each table before resuming the replication task to ensure data consistency between the upstream and the downstream. | +| TiCDC | [`sink.protocol`](/ticdc/ticdc-changefeed-config.md) | Modified | Introduces a new value option `"open-protocol"` when the downstream is Kafka. Specifies the protocol format used for encoding messages. | +| TiCDC | [`sink.delete-only-output-handle-key-columns`](/ticdc/ticdc-changefeed-config.md) | Newly added | Specifies the output of DELETE events. This parameter is valid only for `"canal-json"` and `"open-protocol"` protocols. The default value is `false`, which means outputting all columns. When you set it to `true`, only primary key columns or unique index columns are output. | + +## Improvements + ++ TiDB + + - Optimize the logic of constructing index scan range so that it supports converting complex conditions into index scan range [#41572](https://github.com/pingcap/tidb/issues/41572) [#44389](https://github.com/pingcap/tidb/issues/44389) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + - Add new monitoring metrics `Stale Read OPS` and `Stale Read Traffic` [#43325](https://github.com/pingcap/tidb/issues/43325) @[you06](https://github.com/you06) + - When the retry leader of stale read encounters a lock, TiDB forcibly retries with the leader after resolving the lock, which avoids unnecessary overhead [#43659](https://github.com/pingcap/tidb/issues/43659) @[you06](https://github.com/you06) + - Use estimated time to calculate stale read ts and reduce the overhead of stale read [#44215](https://github.com/pingcap/tidb/issues/44215) @[you06](https://github.com/you06) + - Add logs and system variables for long-running transactions [#41471](https://github.com/pingcap/tidb/issues/41471) @[crazycs520](https://github.com/crazycs520) + - Support connecting to TiDB through the compressed MySQL protocol, which improves the performance of data-intensive queries under low bandwidth networks and saves bandwidth costs. This supports both `zlib` and `zstd` based compression. [#22605](https://github.com/pingcap/tidb/issues/22605) @[dveeden](https://github.com/dveeden) + - Recognize both `utf8` and `utf8bm3` as the legacy three-byte UTF-8 character set encodings, which facilitates the migration of tables with legacy UTF-8 encodings from MySQL 8.0 to TiDB [#26226](https://github.com/pingcap/tidb/issues/26226) @[dveeden](https://github.com/dveeden) + - Support using `:=` for assignment in `UPDATE` statements [#44751](https://github.com/pingcap/tidb/issues/44751) @[CbcWestwolf](https://github.com/CbcWestwolf) + ++ TiKV + + - Support configuring the retry interval of PD connections in scenarios such as connection request failures using `pd.retry-interval` [#14964](https://github.com/tikv/tikv/issues/14964) @[rleungx](https://github.com/rleungx) + - Optimize the resource control scheduling algorithm by incorporating the global resource usage [#14604](https://github.com/tikv/tikv/issues/14604) @[Connor1996](https://github.com/Connor1996) + - Use gzip compression for `check_leader` requests to reduce traffic [#14553](https://github.com/tikv/tikv/issues/14553) @[you06](https://github.com/you06) + - Add related metrics for `check_leader` requests [#14658](https://github.com/tikv/tikv/issues/14658) @[you06](https://github.com/you06) + - Provide detailed time information during TiKV handling write commands [#12362](https://github.com/tikv/tikv/issues/12362) @[cfzjywxk](https://github.com/cfzjywxk) + ++ PD + + - Use a separate gRPC connection for PD leader election to prevent the impact of other requests [#6403](https://github.com/tikv/pd/issues/6403) @[rleungx](https://github.com/rleungx) + - Enable the bucket splitting by default to mitigate hotspot issues in multi-Region scenarios [#6433](https://github.com/tikv/pd/issues/6433) @[bufferflies](https://github.com/bufferflies) + ++ Tools + + + Backup & Restore (BR) + + - Support access to Azure Blob Storage by shared access signature (SAS) [#44199](https://github.com/pingcap/tidb/issues/44199) @[Leavrth](https://github.com/Leavrth) + + + TiCDC + + - Optimize the structure of the directory where data files are stored when a DDL operation occurs in the scenario of replication to an object storage service [#8891](https://github.com/pingcap/tiflow/issues/8891) @[CharlesCheung96](https://github.com/CharlesCheung96) + - Support the OAUTHBEARER authentication in the scenario of replication to Kafka [#8865](https://github.com/pingcap/tiflow/issues/8865) @[hi-rustin](https://github.com/hi-rustin) + - Add the option of outputting only the handle keys for the `DELETE` operation in the scenario of replication to Kafka [#9143](https://github.com/pingcap/tiflow/issues/9143) @[3AceShowHand](https://github.com/3AceShowHand) + + + TiDB Data Migration (DM) + + - Support reading compressed binlogs in MySQL 8.0 as a data source for incremental replication [#6381](https://github.com/pingcap/tiflow/issues/6381) @[dveeden](https://github.com/dveeden) + + + TiDB Lightning + + - Optimize the retry mechanism during import to avoid errors caused by leader switching [#44478](https://github.com/pingcap/tidb/pull/44478) @[lance6716](https://github.com/lance6716) + - Verify checksum throught SQL after import to improve stability of verification [#41941](https://github.com/pingcap/tidb/issues/41941) @[GMHDBJD](https://github.com/GMHDBJD) + - Optimize TiDB Lightning OOM issues when importing wide tables [43853](https://github.com/pingcap/tidb/issues/43853) @[D3Hunter](https://github.com/D3Hunter) + +## Bug fixes + ++ TiDB + + - Fix the issue that the query with CTE causes TiDB to hang [#43749](https://github.com/pingcap/tidb/issues/43749) [#36896](https://github.com/pingcap/tidb/issues/36896) @[guo-shaoge](https://github.com/guo-shaoge) + - Fix the issue that the `min, max` query result is incorrect [#43805](https://github.com/pingcap/tidb/issues/43805) @[wshwsh12](https://github.com/wshwsh12) + - Fix the issue that the `SHOW PROCESSLIST` statement cannot display the TxnStart of the transaction of the statement with a long subquery time [#40851](https://github.com/pingcap/tidb/issues/40851) @[crazycs520](https://github.com/crazycs520) + - Fix the issue that the stale read global optimization does not take effect due to the lack of `TxnScope` in Coprocessor tasks [#43365](https://github.com/pingcap/tidb/issues/43365) @[you06](https://github.com/you06) + - Fix the issue that follower read does not handle flashback errors before retrying, which causes query errors [#43673](https://github.com/pingcap/tidb/issues/43673) @[you06](https://github.com/you06) + - Fix the issue that data and indexes are inconsistent when the `ON UPDATE` statement does not correctly update the primary key [#44565](https://github.com/pingcap/tidb/issues/44565) @[zyguan](https://github.com/zyguan) + - Modify the upper limit of the `UNIX_TIMESTAMP()` function to `3001-01-19 03:14:07.999999 UTC` to be consistent with that of MySQL 8.0.28 or later versions [#43987](https://github.com/pingcap/tidb/issues/43987) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that adding an index fails in the ingest mode [#44137](https://github.com/pingcap/tidb/issues/44137) @[tangenta](https://github.com/tangenta) + - Fix the issue that canceling a DDL task in the rollback state causes errors in related metadata [#44143](https://github.com/pingcap/tidb/issues/44143) @[wjhuang2016](https://github.com/wjhuang2016) + - Fix the issue that using `memTracker` with cursor fetch causes memory leaks [#44254](https://github.com/pingcap/tidb/issues/44254) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that dropping a database causes slow GC progress [#33069](https://github.com/pingcap/tidb/issues/33069) @[tiancaiamao](https://github.com/tiancaiamao) + - Fix the issue that TiDB returns an error when the corresponding rows in partitioned tables cannot be found in the probe phase of index join [#43686](https://github.com/pingcap/tidb/issues/43686) @[AilinKid](https://github.com/AilinKid) @[mjonss](https://github.com/mjonss) + - Fix the issue that there is no warning when using `SUBPARTITION` to create partitioned tables [#41198](https://github.com/pingcap/tidb/issues/41198) [#41200](https://github.com/pingcap/tidb/issues/41200) @[mjonss](https://github.com/mjonss) + - Fix the issue that when a query is killed because it exceeds `MAX_EXECUTION_TIME`, the returned error message is inconsistent with that of MySQL [#43031](https://github.com/pingcap/tidb/issues/43031) @[dveeden](https://github.com/dveeden) + - Fix the issue that the `LEADING` hint does not support querying block aliases [#44645](https://github.com/pingcap/tidb/issues/44645) @[qw4990](https://github.com/qw4990) + - Modify the return type of the `LAST_INSERT_ID()` function from VARCHAR to LONGLONG to be consistent with that of MySQL [#44574](https://github.com/pingcap/tidb/issues/44574) @[Defined2014](https://github.com/Defined2014) + - Fix the issue that incorrect results might be returned when using a common table expression (CTE) in statements with non-correlated subqueries [#44051](https://github.com/pingcap/tidb/issues/44051) @[winoros](https://github.com/winoros) + - Fix the issue that Join Reorder might cause incorrect outer join results [#44314](https://github.com/pingcap/tidb/issues/44314) @[AilinKid](https://github.com/AilinKid) + - Fix the issue that `PREPARE stmt FROM "ANALYZE TABLE xxx"` might be killed by `tidb_mem_quota_query` [#44320](https://github.com/pingcap/tidb/issues/44320) @[chrysan](https://github.com/chrysan) + ++ TiKV + + - Fix the issue that the transaction returns an incorrect value when TiKV handles stale pessimistic lock conflicts [#13298](https://github.com/tikv/tikv/issues/13298) @[cfzjywxk](https://github.com/cfzjywxk) + - Fix the issue that in-memory pessimistic lock might cause flashback failures and data inconsistency [#13303](https://github.com/tikv/tikv/issues/13303) @[JmPotato](https://github.com/JmPotato) + - Fix the issue that the fair lock might be incorrect when TiKV handles stale requests [#13298](https://github.com/tikv/tikv/issues/13298) @[cfzjywxk](https://github.com/cfzjywxk) + - Fix the issue that `autocommit` and `point get replica read` might break linearizability [#14715](https://github.com/tikv/tikv/issues/14715) @[cfzjywxk](https://github.com/cfzjywxk) + ++ PD + + - Fix the issue that redundant replicas cannot be automatically repaired in some corner cases [#6573](https://github.com/tikv/pd/issues/6573) @[nolouch](https://github.com/nolouch) + ++ TiFlash + + - Fix the issue that queries might consume more memory than needed when the data on the Join build side is very large and contains many small string type columns [#7416](https://github.com/pingcap/tiflash/issues/7416) @[yibin87](https://github.com/yibin87) + ++ Tools + + + Backup & Restore (BR) + + - Fix the issue that `checksum mismatch` is falsely reported in some cases [#44472](https://github.com/pingcap/tidb/issues/44472) @[Leavrth](https://github.com/Leavrth) + - Fix the issue that `resolved lock timeout` is falsely reported in some cases [#43236](https://github.com/pingcap/tidb/issues/43236) @[YuJuncen](https://github.com/YuJuncen) + - Fix the issue that TiDB might panic when restoring statistics information [#44490](https://github.com/pingcap/tidb/issues/44490) @[tangenta](https://github.com/tangenta) + + + TiCDC + + - Fix the issue that Resolved TS does not advance properly in some cases [#8963](https://github.com/pingcap/tiflow/issues/8963) @[CharlesCheung96](https://github.com/CharlesCheung96) + - Fix the issue that the `UPDATE` operation cannot output old values when the Avro or CSV protocol is used [#9086](https://github.com/pingcap/tiflow/issues/9086) @[3AceShowHand](https://github.com/3AceShowHand) + - Fix the issue of excessive downstream pressure caused by reading downstream metadata too frequently when replicating data to Kafka [#8959](https://github.com/pingcap/tiflow/issues/8959) @[hi-rustin](https://github.com/hi-rustin) + - Fix the issue of too many downstream logs caused by frequently setting the downstream bidirectional replication-related variables when replicating data to TiDB or MySQL [#9180](https://github.com/pingcap/tiflow/issues/9180) @[asddongmen](https://github.com/asddongmen) + - Fix the issue that the PD node crashing causes the TiCDC node to restart [#8868](https://github.com/pingcap/tiflow/issues/8868) @[asddongmen](https://github.com/asddongmen) + - Fix the issue that TiCDC cannot create a changefeed with a downstream Kafka-on-Pulsar [#8892](https://github.com/pingcap/tiflow/issues/8892) @[hi-rustin](https://github.com/hi-rustin) + + + TiDB Lightning + + - Fix the TiDB Lightning panic issue when `experimental.allow-expression-index` is enabled and the default value is UUID [#44497](https://github.com/pingcap/tidb/issues/44497) @[lichunzhu](https://github.com/lichunzhu) + - Fix the TiDB Lightning panic issue when a task exits while dividing a data file [#43195](https://github.com/pingcap/tidb/issues/43195) @[lance6716](https://github.com/lance6716) + +## Contributors + +We would like to thank the following contributors from the TiDB community: + +- [asjdf](https://github.com/asjdf) +- [blacktear23](https://github.com/blacktear23) +- [Cavan-xu](https://github.com/Cavan-xu) +- [darraes](https://github.com/darraes) +- [demoManito](https://github.com/demoManito) +- [dhysum](https://github.com/dhysum) +- [HappyUncle](https://github.com/HappyUncle) +- [jiyfhust](https://github.com/jiyfhust) +- [L-maple](https://github.com/L-maple) +- [nyurik](https://github.com/nyurik) +- [SeigeC](https://github.com/SeigeC) +- [tangjingyu97](https://github.com/tangjingyu97) \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-notes.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-notes.md new file mode 100644 index 00000000..923f3cb2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-notes.md @@ -0,0 +1,230 @@ +--- +title: Release Notes +aliases: ['/docs/dev/releases/release-notes/','/docs/dev/releases/rn/'] +--- + +# TiDB Release Notes + +## 7.2 + +- [7.2.0-DMR](/releases/release-7.2.0.md): 2023-06-29 + +## 7.1 + +- [7.1.0](/releases/release-7.1.0.md): 2023-05-31 + +## 7.0 + +- [7.0.0-DMR](/releases/release-7.0.0.md): 2023-03-30 + +## 6.6 + +- [6.6.0-DMR](/releases/release-6.6.0.md): 2023-02-20 + +## 6.5 + +- [6.5.3](/releases/release-6.5.3.md): 2023-06-14 +- [6.5.2](/releases/release-6.5.2.md): 2023-04-21 +- [6.5.1](/releases/release-6.5.1.md): 2023-03-10 +- [6.5.0](/releases/release-6.5.0.md): 2022-12-29 + +## 6.4 + +- [6.4.0-DMR](/releases/release-6.4.0.md): 2022-11-17 + +## 6.3 + +- [6.3.0-DMR](/releases/release-6.3.0.md): 2022-09-30 + +## 6.2 + +- [6.2.0-DMR](/releases/release-6.2.0.md): 2022-08-23 + +## 6.1 + +- [6.1.6](/releases/release-6.1.6.md): 2023-04-12 +- [6.1.5](/releases/release-6.1.5.md): 2023-02-28 +- [6.1.4](/releases/release-6.1.4.md): 2023-02-08 +- [6.1.3](/releases/release-6.1.3.md): 2022-12-05 +- [6.1.2](/releases/release-6.1.2.md): 2022-10-24 +- [6.1.1](/releases/release-6.1.1.md): 2022-09-01 +- [6.1.0](/releases/release-6.1.0.md): 2022-06-13 + +## 6.0 + +- [6.0.0-DMR](/releases/release-6.0.0-dmr.md): 2022-04-07 + +## 5.4 + +- [5.4.3](/releases/release-5.4.3.md): 2022-10-13 +- [5.4.2](/releases/release-5.4.2.md): 2022-07-08 +- [5.4.1](/releases/release-5.4.1.md): 2022-05-13 +- [5.4.0](/releases/release-5.4.0.md): 2022-02-15 + +## 5.3 + +- [5.3.4](/releases/release-5.3.4.md): 2022-11-24 +- [5.3.3](/releases/release-5.3.3.md): 2022-09-14 +- [5.3.2](/releases/release-5.3.2.md): 2022-06-29 +- [5.3.1](/releases/release-5.3.1.md): 2022-03-03 +- [5.3.0](/releases/release-5.3.0.md): 2021-11-30 + +## 5.2 + +- [5.2.4](/releases/release-5.2.4.md): 2022-04-26 +- [5.2.3](/releases/release-5.2.3.md): 2021-12-03 +- [5.2.2](/releases/release-5.2.2.md): 2021-10-29 +- [5.2.1](/releases/release-5.2.1.md): 2021-09-09 +- [5.2.0](/releases/release-5.2.0.md): 2021-08-27 + +## 5.1 + +- [5.1.5](/releases/release-5.1.5.md): 2022-12-28 +- [5.1.4](/releases/release-5.1.4.md): 2022-02-22 +- [5.1.3](/releases/release-5.1.3.md): 2021-12-03 +- [5.1.2](/releases/release-5.1.2.md): 2021-09-27 +- [5.1.1](/releases/release-5.1.1.md): 2021-07-30 +- [5.1.0](/releases/release-5.1.0.md): 2021-06-24 + +## 5.0 + +- [5.0.6](/releases/release-5.0.6.md): 2021-12-31 +- [5.0.5](/releases/release-5.0.5.md): 2021-12-03 +- [5.0.4](/releases/release-5.0.4.md): 2021-09-27 +- [5.0.3](/releases/release-5.0.3.md): 2021-07-02 +- [5.0.2](/releases/release-5.0.2.md): 2021-06-10 +- [5.0.1](/releases/release-5.0.1.md): 2021-04-24 +- [5.0.0](/releases/release-5.0.0.md): 2021-04-07 +- [5.0.0-rc](/releases/release-5.0.0-rc.md): 2021-01-12 + +## 4.0 + +- [4.0.16](/releases/release-4.0.16.md): 2021-12-17 +- [4.0.15](/releases/release-4.0.15.md): 2021-09-27 +- [4.0.14](/releases/release-4.0.14.md): 2021-07-27 +- [4.0.13](/releases/release-4.0.13.md): 2021-05-28 +- [4.0.12](/releases/release-4.0.12.md): 2021-04-02 +- [4.0.11](/releases/release-4.0.11.md): 2021-02-26 +- [4.0.10](/releases/release-4.0.10.md): 2021-01-15 +- [4.0.9](/releases/release-4.0.9.md): 2020-12-21 +- [4.0.8](/releases/release-4.0.8.md): 2020-10-30 +- [4.0.7](/releases/release-4.0.7.md): 2020-09-29 +- [4.0.6](/releases/release-4.0.6.md): 2020-09-15 +- [4.0.5](/releases/release-4.0.5.md): 2020-08-31 +- [4.0.4](/releases/release-4.0.4.md): 2020-07-31 +- [4.0.3](/releases/release-4.0.3.md): 2020-07-24 +- [4.0.2](/releases/release-4.0.2.md): 2020-07-01 +- [4.0.1](/releases/release-4.0.1.md): 2020-06-12 +- [4.0.0](/releases/release-4.0-ga.md): 2020-05-28 +- [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md): 2020-05-15 +- [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md): 2020-04-28 +- [4.0.0-rc](/releases/release-4.0.0-rc.md): 2020-04-08 +- [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md): 2020-03-18 +- [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md): 2020-02-28 +- [4.0.0-beta](/releases/release-4.0.0-beta.md): 2020-01-17 + +## 3.1 + +- [3.1.2](/releases/release-3.1.2.md): 2020-06-04 +- [3.1.1](/releases/release-3.1.1.md): 2020-04-30 +- [3.1.0](/releases/release-3.1.0-ga.md): 2020-04-16 +- [3.1.0-rc](/releases/release-3.1.0-rc.md): 2020-04-02 +- [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md): 2020-03-09 +- [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md): 2020-01-10 +- [3.1.0-beta](/releases/release-3.1.0-beta.md): 2019-12-20 + +## 3.0 + +- [3.0.20](/releases/release-3.0.20.md): 2020-12-25 +- [3.0.19](/releases/release-3.0.19.md): 2020-09-25 +- [3.0.18](/releases/release-3.0.18.md): 2020-08-21 +- [3.0.17](/releases/release-3.0.17.md): 2020-08-03 +- [3.0.16](/releases/release-3.0.16.md): 2020-07-03 +- [3.0.15](/releases/release-3.0.15.md): 2020-06-05 +- [3.0.14](/releases/release-3.0.14.md): 2020-05-09 +- [3.0.13](/releases/release-3.0.13.md): 2020-04-22 +- [3.0.12](/releases/release-3.0.12.md): 2020-03-16 +- [3.0.11](/releases/release-3.0.11.md): 2020-03-04 +- [3.0.10](/releases/release-3.0.10.md): 2020-02-20 +- [3.0.9](/releases/release-3.0.9.md): 2020-01-14 +- [3.0.8](/releases/release-3.0.8.md): 2019-12-31 +- [3.0.7](/releases/release-3.0.7.md): 2019-12-04 +- [3.0.6](/releases/release-3.0.6.md): 2019-11-28 +- [3.0.5](/releases/release-3.0.5.md): 2019-10-25 +- [3.0.4](/releases/release-3.0.4.md): 2019-10-08 +- [3.0.3](/releases/release-3.0.3.md): 2019-08-29 +- [3.0.2](/releases/release-3.0.2.md): 2019-08-07 +- [3.0.1](/releases/release-3.0.1.md): 2019-07-16 +- [3.0.0](/releases/release-3.0-ga.md): 2019-06-28 +- [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md): 2019-06-21 +- [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md): 2019-05-28 +- [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md): 2019-05-10 +- [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md): 2019-03-26 +- [3.0.0-beta](/releases/release-3.0-beta.md): 2019-01-19 + +## 2.1 + +- [2.1.19](/releases/release-2.1.19.md): 2019-12-27 +- [2.1.18](/releases/release-2.1.18.md): 2019-11-04 +- [2.1.17](/releases/release-2.1.17.md): 2019-09-11 +- [2.1.16](/releases/release-2.1.16.md): 2019-08-15 +- [2.1.15](/releases/release-2.1.15.md): 2019-07-18 +- [2.1.14](/releases/release-2.1.14.md): 2019-07-04 +- [2.1.13](/releases/release-2.1.13.md): 2019-06-21 +- [2.1.12](/releases/release-2.1.12.md): 2019-06-13 +- [2.1.11](/releases/release-2.1.11.md): 2019-06-03 +- [2.1.10](/releases/release-2.1.10.md): 2019-05-22 +- [2.1.9](/releases/release-2.1.9.md): 2019-05-06 +- [2.1.8](/releases/release-2.1.8.md): 2019-04-12 +- [2.1.7](/releases/release-2.1.7.md): 2019-03-28 +- [2.1.6](/releases/release-2.1.6.md): 2019-03-15 +- [2.1.5](/releases/release-2.1.5.md): 2019-02-28 +- [2.1.4](/releases/release-2.1.4.md): 2019-02-15 +- [2.1.3](/releases/release-2.1.3.md): 2019-01-28 +- [2.1.2](/releases/release-2.1.2.md): 2018-12-22 +- [2.1.1](/releases/release-2.1.1.md): 2018-12-12 +- [2.1.0](/releases/release-2.1-ga.md): 2018-11-30 +- [2.1.0-rc.5](/releases/release-2.1-rc.5.md): 2018-11-12 +- [2.1.0-rc.4](/releases/release-2.1-rc.4.md): 2018-10-23 +- [2.1.0-rc.3](/releases/release-2.1-rc.3.md): 2018-09-29 +- [2.1.0-rc.2](/releases/release-2.1-rc.2.md): 2018-09-14 +- [2.1.0-rc.1](/releases/release-2.1-rc.1.md): 2018-08-24 +- [2.1.0-beta](/releases/release-2.1-beta.md): 2018-06-29 + +## 2.0 + +- [2.0.11](/releases/release-2.0.11.md): 2019-01-03 +- [2.0.10](/releases/release-2.0.10.md): 2018-12-18 +- [2.0.9](/releases/release-2.0.9.md): 2018-11-19 +- [2.0.8](/releases/release-2.0.8.md): 2018-10-16 +- [2.0.7](/releases/release-2.0.7.md): 2018-09-07 +- [2.0.6](/releases/release-2.0.6.md): 2018-08-06 +- [2.0.5](/releases/release-2.0.5.md): 2018-07-06 +- [2.0.4](/releases/release-2.0.4.md): 2018-06-15 +- [2.0.3](/releases/release-2.0.3.md): 2018-06-01 +- [2.0.2](/releases/release-2.0.2.md): 2018-05-21 +- [2.0.1](/releases/release-2.0.1.md): 2018-05-16 +- [2.0.0](/releases/release-2.0-ga.md): 2018-04-27 +- [2.0.0-rc.5](/releases/release-2.0-rc.5.md): 2018-04-17 +- [2.0.0-rc.4](/releases/release-2.0-rc.4.md): 2018-03-30 +- [2.0.0-rc.3](/releases/release-2.0-rc.3.md): 2018-03-23 +- [2.0.0-rc.1](/releases/release-2.0-rc.1.md): 2018-03-09 +- [1.1.0-beta](/releases/release-1.1-beta.md): 2018-02-24 +- [1.1.0-alpha](/releases/release-1.1-alpha.md): 2018-01-19 + +## 1.0 + +- [1.0.8](/releases/release-1.0.8.md): 2018-02-11 +- [1.0.7](/releases/release-1.0.7.md): 2018-01-22 +- [1.0.6](/releases/release-1.0.6.md): 2018-01-08 +- [1.0.5](/releases/release-1.0.5.md): 2017-12-26 +- [1.0.4](/releases/release-1.0.4.md): 2017-12-11 +- [1.0.3](/releases/release-1.0.3.md): 2017-11-28 +- [1.0.2](/releases/release-1.0.2.md): 2017-11-13 +- [1.0.1](/releases/release-1.0.1.md): 2017-11-01 +- [1.0.0](/releases/release-1.0-ga.md): 2017-10-16 +- [Pre-GA](/releases/release-pre-ga.md): 2017-08-30 +- [rc4](/releases/release-rc.4.md): 2017-08-04 +- [rc3](/releases/release-rc.3.md): 2017-06-16 +- [rc2](/releases/release-rc.2.md): 2017-03-01 +- [rc1](/releases/release-rc.1.md): 2016-12-23 diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-timeline.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-timeline.md new file mode 100644 index 00000000..4ccf13b1 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/releases/release-timeline.md @@ -0,0 +1,172 @@ +--- +title: TiDB Release Timeline +summary: Learn about the TiDB release timeline. +--- + +# TiDB Release Timeline + +This document shows all the released TiDB versions in reverse chronological order. + +| Version | Release Date | +| :--- | :--- | +| [7.2.0-DMR](/releases/release-7.2.0.md) | 2023-06-29 | +| [6.5.3](/releases/release-6.5.3.md) | 2023-06-14 | +| [7.1.0](/releases/release-7.1.0.md) | 2023-05-31 | +| [6.5.2](/releases/release-6.5.2.md) | 2023-04-21 | +| [6.1.6](/releases/release-6.1.6.md) | 2023-04-12 | +| [7.0.0-DMR](/releases/release-7.0.0.md) | 2023-03-30 | +| [6.5.1](/releases/release-6.5.1.md) | 2023-03-10 | +| [6.1.5](/releases/release-6.1.5.md) | 2023-02-28 | +| [6.6.0-DMR](/releases/release-6.6.0.md) | 2023-02-20 | +| [6.1.4](/releases/release-6.1.4.md) | 2023-02-08 | +| [6.5.0](/releases/release-6.5.0.md) | 2022-12-29 | +| [5.1.5](/releases/release-5.1.5.md) | 2022-12-28 | +| [6.1.3](/releases/release-6.1.3.md) | 2022-12-05 | +| [5.3.4](/releases/release-5.3.4.md) | 2022-11-24 | +| [6.4.0-DMR](/releases/release-6.4.0.md) | 2022-11-17 | +| [6.1.2](/releases/release-6.1.2.md) | 2022-10-24 | +| [5.4.3](/releases/release-5.4.3.md) | 2022-10-13 | +| [6.3.0-DMR](/releases/release-6.3.0.md) | 2022-09-30 | +| [5.3.3](/releases/release-5.3.3.md) | 2022-09-14 | +| [6.1.1](/releases/release-6.1.1.md) | 2022-09-01 | +| [6.2.0-DMR](/releases/release-6.2.0.md) | 2022-08-23 | +| [5.4.2](/releases/release-5.4.2.md) | 2022-07-08 | +| [5.3.2](/releases/release-5.3.2.md) | 2022-06-29 | +| [6.1.0](/releases/release-6.1.0.md) | 2022-06-13 | +| [5.4.1](/releases/release-5.4.1.md) | 2022-05-13 | +| [5.2.4](/releases/release-5.2.4.md) | 2022-04-26 | +| [6.0.0-DMR](/releases/release-6.0.0-dmr.md) | 2022-04-07 | +| [5.3.1](/releases/release-5.3.1.md) | 2022-03-03 | +| [5.1.4](/releases/release-5.1.4.md) | 2022-02-22 | +| [5.4.0](/releases/release-5.4.0.md) | 2022-02-15 | +| [5.0.6](/releases/release-5.0.6.md) | 2021-12-31 | +| [4.0.16](/releases/release-4.0.16.md) | 2021-12-17 | +| [5.1.3](/releases/release-5.1.3.md) | 2021-12-03 | +| [5.0.5](/releases/release-5.0.5.md) | 2021-12-03 | +| [5.2.3](/releases/release-5.2.3.md) | 2021-12-03 | +| [5.3.0](/releases/release-5.3.0.md) | 2021-11-30 | +| [5.2.2](/releases/release-5.2.2.md) | 2021-10-29 | +| [5.1.2](/releases/release-5.1.2.md) | 2021-09-27 | +| [5.0.4](/releases/release-5.0.4.md) | 2021-09-27 | +| [4.0.15](/releases/release-4.0.15.md) | 2021-09-27 | +| [5.2.1](/releases/release-5.2.1.md) | 2021-09-09 | +| [5.2.0](/releases/release-5.2.0.md) | 2021-08-27 | +| [5.1.1](/releases/release-5.1.1.md) | 2021-07-30 | +| [4.0.14](/releases/release-4.0.14.md) | 2021-07-27 | +| [5.0.3](/releases/release-5.0.3.md) | 2021-07-02 | +| [5.1.0](/releases/release-5.1.0.md) | 2021-06-24 | +| [5.0.2](/releases/release-5.0.2.md) | 2021-06-10 | +| [4.0.13](/releases/release-4.0.13.md) | 2021-05-28 | +| [5.0.1](/releases/release-5.0.1.md) | 2021-04-24 | +| [5.0.0](/releases/release-5.0.0.md) | 2021-04-07 | +| [4.0.12](/releases/release-4.0.12.md) | 2021-04-02 | +| [4.0.11](/releases/release-4.0.11.md) | 2021-02-26 | +| [4.0.10](/releases/release-4.0.10.md) | 2021-01-15 | +| [5.0.0-rc](/releases/release-5.0.0-rc.md) | 2021-01-12 | +| [3.0.20](/releases/release-3.0.20.md) | 2020-12-25 | +| [4.0.9](/releases/release-4.0.9.md) | 2020-12-21 | +| [4.0.8](/releases/release-4.0.8.md) | 2020-10-30 | +| [4.0.7](/releases/release-4.0.7.md) | 2020-09-29 | +| [3.0.19](/releases/release-3.0.19.md) | 2020-09-25 | +| [4.0.6](/releases/release-4.0.6.md) | 2020-09-15 | +| [4.0.5](/releases/release-4.0.5.md) | 2020-08-31 | +| [3.0.18](/releases/release-3.0.18.md) | 2020-08-21 | +| [3.0.17](/releases/release-3.0.17.md) | 2020-08-03 | +| [4.0.4](/releases/release-4.0.4.md) | 2020-07-31 | +| [4.0.3](/releases/release-4.0.3.md) | 2020-07-24 | +| [3.0.16](/releases/release-3.0.16.md) | 2020-07-03 | +| [4.0.2](/releases/release-4.0.2.md) | 2020-07-01 | +| [4.0.1](/releases/release-4.0.1.md) | 2020-06-12 | +| [3.0.15](/releases/release-3.0.15.md) | 2020-06-05 | +| [3.1.2](/releases/release-3.1.2.md) | 2020-06-04 | +| [4.0.0](/releases/release-4.0-ga.md) | 2020-05-28 | +| [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) | 2020-05-15 | +| [3.0.14](/releases/release-3.0.14.md) | 2020-05-09 | +| [3.1.1](/releases/release-3.1.1.md) | 2020-04-30 | +| [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) | 2020-04-28 | +| [3.0.13](/releases/release-3.0.13.md) | 2020-04-22 | +| [3.1.0](/releases/release-3.1.0-ga.md) | 2020-04-16 | +| [4.0.0-rc](/releases/release-4.0.0-rc.md) | 2020-04-08 | +| [3.1.0-rc](/releases/release-3.1.0-rc.md) | 2020-04-02 | +| [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) | 2020-03-18 | +| [3.0.12](/releases/release-3.0.12.md) | 2020-03-16 | +| [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) | 2020-03-09 | +| [3.0.11](/releases/release-3.0.11.md) | 2020-03-04 | +| [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) | 2020-02-28 | +| [3.0.10](/releases/release-3.0.10.md) | 2020-02-20 | +| [4.0.0-beta](/releases/release-4.0.0-beta.md) | 2020-01-17 | +| [3.0.9](/releases/release-3.0.9.md) | 2020-01-14 | +| [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) | 2020-01-10 | +| [3.0.8](/releases/release-3.0.8.md) | 2019-12-31 | +| [2.1.19](/releases/release-2.1.19.md) | 2019-12-27 | +| [3.1.0-beta](/releases/release-3.1.0-beta.md) | 2019-12-20 | +| [3.0.7](/releases/release-3.0.7.md) | 2019-12-04 | +| [3.0.6](/releases/release-3.0.6.md) | 2019-11-28 | +| [2.1.18](/releases/release-2.1.18.md) | 2019-11-04 | +| [3.0.5](/releases/release-3.0.5.md) | 2019-10-25 | +| [3.0.4](/releases/release-3.0.4.md) | 2019-10-08 | +| [2.1.17](/releases/release-2.1.17.md) | 2019-09-11 | +| [3.0.3](/releases/release-3.0.3.md) | 2019-08-29 | +| [2.1.16](/releases/release-2.1.16.md) | 2019-08-15 | +| [3.0.2](/releases/release-3.0.2.md) | 2019-08-07 | +| [2.1.15](/releases/release-2.1.15.md) | 2019-07-18 | +| [3.0.1](/releases/release-3.0.1.md) | 2019-07-16 | +| [2.1.14](/releases/release-2.1.14.md) | 2019-07-04 | +| [3.0.0](/releases/release-3.0-ga.md) | 2019-06-28 | +| [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) | 2019-06-21 | +| [2.1.13](/releases/release-2.1.13.md) | 2019-06-21 | +| [2.1.12](/releases/release-2.1.12.md) | 2019-06-13 | +| [2.1.11](/releases/release-2.1.11.md) | 2019-06-03 | +| [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) | 2019-05-28 | +| [2.1.10](/releases/release-2.1.10.md) | 2019-05-22 | +| [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) | 2019-05-10 | +| [2.1.9](/releases/release-2.1.9.md) | 2019-05-06 | +| [2.1.8](/releases/release-2.1.8.md) | 2019-04-12 | +| [2.1.7](/releases/release-2.1.7.md) | 2019-03-28 | +| [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) | 2019-03-26 | +| [2.1.6](/releases/release-2.1.6.md) | 2019-03-15 | +| [2.1.5](/releases/release-2.1.5.md) | 2019-02-28 | +| [2.1.4](/releases/release-2.1.4.md) | 2019-02-15 | +| [2.1.3](/releases/release-2.1.3.md) | 2019-01-28 | +| [3.0.0-beta](/releases/release-3.0-beta.md) | 2019-01-19 | +| [2.0.11](/releases/release-2.0.11.md) | 2019-01-03 | +| [2.1.2](/releases/release-2.1.2.md) | 2018-12-22 | +| [2.0.10](/releases/release-2.0.10.md) | 2018-12-18 | +| [2.1.1](/releases/release-2.1.1.md) | 2018-12-12 | +| [2.1.0](/releases/release-2.1-ga.md) | 2018-11-30 | +| [2.0.9](/releases/release-2.0.9.md) | 2018-11-19 | +| [2.1.0-rc.5](/releases/release-2.1-rc.5.md) | 2018-11-12 | +| [2.1.0-rc.4](/releases/release-2.1-rc.4.md) | 2018-10-23 | +| [2.0.8](/releases/release-2.0.8.md) | 2018-10-16 | +| [2.1.0-rc.3](/releases/release-2.1-rc.3.md) | 2018-09-29 | +| [2.1.0-rc.2](/releases/release-2.1-rc.2.md) | 2018-09-14 | +| [2.0.7](/releases/release-2.0.7.md) | 2018-09-07 | +| [2.1.0-rc.1](/releases/release-2.1-rc.1.md) | 2018-08-24 | +| [2.0.6](/releases/release-2.0.6.md) | 2018-08-06 | +| [2.0.5](/releases/release-2.0.5.md) | 2018-07-06 | +| [2.1.0-beta](/releases/release-2.1-beta.md) | 2018-06-29 | +| [2.0.4](/releases/release-2.0.4.md) | 2018-06-15 | +| [2.0.3](/releases/release-2.0.3.md) | 2018-06-01 | +| [2.0.2](/releases/release-2.0.2.md) | 2018-05-21 | +| [2.0.1](/releases/release-2.0.1.md) | 2018-05-16 | +| [2.0.0](/releases/release-2.0-ga.md) | 2018-04-27 | +| [2.0.0-rc.5](/releases/release-2.0-rc.5.md) | 2018-04-17 | +| [2.0.0-rc.4](/releases/release-2.0-rc.4.md) | 2018-03-30 | +| [2.0.0-rc.3](/releases/release-2.0-rc.3.md) | 2018-03-23 | +| [2.0.0-rc.1](/releases/release-2.0-rc.1.md) | 2018-03-09 | +| [1.1.0-beta](/releases/release-1.1-beta.md) | 2018-02-24 | +| [1.0.8](/releases/release-1.0.8.md) | 2018-02-11 | +| [1.0.7](/releases/release-1.0.7.md) | 2018-01-22 | +| [1.1.0-alpha](/releases/release-1.1-alpha.md) | 2018-01-19 | +| [1.0.6](/releases/release-1.0.6.md) | 2018-01-08 | +| [1.0.5](/releases/release-1.0.5.md) | 2017-12-26 | +| [1.0.4](/releases/release-1.0.4.md) | 2017-12-11 | +| [1.0.3](/releases/release-1.0.3.md) | 2017-11-28 | +| [1.0.2](/releases/release-1.0.2.md) | 2017-11-13 | +| [1.0.1](/releases/release-1.0.1.md) | 2017-11-01 | +| [1.0.0](/releases/release-1.0-ga.md) | 2017-10-16 | +| [Pre-GA](/releases/release-pre-ga.md) | 2017-08-30 | +| [rc4](/releases/release-rc.4.md) | 2017-08-04 | +| [rc3](/releases/release-rc.3.md) | 2017-06-16 | +| [rc2](/releases/release-rc.2.md) | 2017-03-01 | +| [rc1](/releases/release-rc.1.md) | 2016-12-23 | \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/master/upgrade-tidb-using-tiup.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/upgrade-tidb-using-tiup.md new file mode 100644 index 00000000..700c2d41 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/master/upgrade-tidb-using-tiup.md @@ -0,0 +1,292 @@ +--- +title: Upgrade TiDB Using TiUP +summary: Learn how to upgrade TiDB using TiUP. +aliases: ['/docs/dev/upgrade-tidb-using-tiup/','/docs/dev/how-to/upgrade/using-tiup/','/tidb/dev/upgrade-tidb-using-tiup-offline','/docs/dev/upgrade-tidb-using-tiup-offline/'] +--- + +# Upgrade TiDB Using TiUP + +This document is targeted for the following upgrade paths: + +- Upgrade from TiDB 4.0 versions to TiDB 7.1. +- Upgrade from TiDB 5.0-5.4 versions to TiDB 7.1. +- Upgrade from TiDB 6.0-6.6 to TiDB 7.1. +- Upgrade from TiDB 7.0 to TiDB 7.1. + +> **Warning:** +> +> 1. You cannot upgrade TiFlash online from versions earlier than 5.3 to 5.3 or later. Instead, you must first stop all the TiFlash instances of the early version, and then upgrade the cluster offline. If other components (such as TiDB and TiKV) do not support an online upgrade, follow the instructions in warnings in [Online upgrade](#online-upgrade). +> 2. **DO NOT** run DDL statements during the upgrade process. Otherwise, the issue of undefined behavior might occur. +> 3. **DO NOT** upgrade a TiDB cluster when a DDL statement is being executed in the cluster (usually for the time-consuming DDL statements such as `ADD INDEX` and the column type changes). Before the upgrade, it is recommended to use the [`ADMIN SHOW DDL`](/sql-statements/sql-statement-admin-show-ddl.md) command to check whether the TiDB cluster has an ongoing DDL job. If the cluster has a DDL job, to upgrade the cluster, wait until the DDL execution is finished or use the [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) command to cancel the DDL job before you upgrade the cluster. +> +> If the TiDB version before upgrade is v7.1.0 or later, you can ignore the preceding warnings 2 and 3. For more information, see [TiDB Smooth Upgrade](/smooth-upgrade-tidb.md). + +> **Note:** +> +> If your cluster to be upgraded is v3.1 or an earlier version (v3.0 or v2.1), the direct upgrade to v7.1.0 is not supported. You need to upgrade your cluster first to v4.0 and then to v7.1.0. + +## Upgrade caveat + +- TiDB currently does not support version downgrade or rolling back to an earlier version after the upgrade. +- For the v4.0 cluster managed using TiDB Ansible, you need to import the cluster to TiUP (`tiup cluster`) for new management according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). Then you can upgrade the cluster to v7.1.0 according to this document. +- To update versions earlier than v3.0 to v7.1.0: + 1. Update this version to 3.0 using [TiDB Ansible](https://docs.pingcap.com/tidb/v3.0/upgrade-tidb-using-ansible). + 2. Use TiUP (`tiup cluster`) to import the TiDB Ansible configuration. + 3. Update the 3.0 version to 4.0 according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). + 4. Upgrade the cluster to v7.1.0 according to this document. +- Support upgrading the versions of TiDB Binlog, TiCDC, TiFlash, and other components. +- When upgrading TiFlash from versions earlier than v6.3.0 to v6.3.0 and later versions, note that the CPU must support the AVX2 instruction set under the Linux AMD64 architecture and the ARMv8 instruction set architecture under the Linux ARM64 architecture. For details, see the description in [v6.3.0 Release Notes](/releases/release-6.3.0.md#others). +- For detailed compatibility changes of different versions, see the [Release Notes](/releases/release-notes.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. +- For clusters that upgrade from versions earlier than v5.3 to v5.3 or later versions, the default deployed Prometheus will upgrade from v2.8.1 to v2.27.1. Prometheus v2.27.1 provides more features and fixes a security issue. Compared with v2.8.1, alert time representation in v2.27.1 is changed. For more details, see [Prometheus commit](https://github.com/prometheus/prometheus/commit/7646cbca328278585be15fa615e22f2a50b47d06) for more details. + +## Preparations + +This section introduces the preparation works needed before upgrading your TiDB cluster, including upgrading TiUP and the TiUP Cluster component. + +### Step 1: Review compatibility changes + +Review [the compatibility changes](/releases/release-7.2.0.md#compatibility-changes) in TiDB v7.2.0 release notes. If any changes affect your upgrade, take actions accordingly. + +### Step 2: Upgrade TiUP or TiUP offline mirror + +Before upgrading your TiDB cluster, you first need to upgrade TiUP or TiUP mirror. + +#### Upgrade TiUP and TiUP Cluster + +> **Note:** +> +> If the control machine of the cluster to upgrade cannot access `https://tiup-mirrors.pingcap.com`, skip this section and see [Upgrade TiUP offline mirror](#upgrade-tiup-offline-mirror). + +1. Upgrade the TiUP version. It is recommended that the TiUP version is `1.11.3` or later. + + {{< copyable "shell-regular" >}} + + ```shell + tiup update --self + tiup --version + ``` + +2. Upgrade the TiUP Cluster version. It is recommended that the TiUP Cluster version is `1.11.3` or later. + + {{< copyable "shell-regular" >}} + + ```shell + tiup update cluster + tiup cluster --version + ``` + +#### Upgrade TiUP offline mirror + +> **Note:** +> +> If the cluster to upgrade was deployed not using the offline method, skip this step. + +Refer to [Deploy a TiDB Cluster Using TiUP - Deploy TiUP offline](/production-deployment-using-tiup.md#deploy-tiup-offline) to download the TiUP mirror of the new version and upload it to the control machine. After executing `local_install.sh`, TiUP will complete the overwrite upgrade. + +{{< copyable "shell-regular" >}} + +```shell +tar xzvf tidb-community-server-${version}-linux-amd64.tar.gz +sh tidb-community-server-${version}-linux-amd64/local_install.sh +source /home/tidb/.bash_profile +``` + +After the overwrite upgrade, run the following command to merge the server and toolkit offline mirrors to the server directory: + +{{< copyable "shell-regular" >}} + +```bash +tar xf tidb-community-toolkit-${version}-linux-amd64.tar.gz +ls -ld tidb-community-server-${version}-linux-amd64 tidb-community-toolkit-${version}-linux-amd64 +cd tidb-community-server-${version}-linux-amd64/ +cp -rp keys ~/.tiup/ +tiup mirror merge ../tidb-community-toolkit-${version}-linux-amd64 +``` + +After merging the mirrors, run the following command to upgrade the TiUP Cluster component: + +{{< copyable "shell-regular" >}} + +```shell +tiup update cluster +``` + +Now, the offline mirror has been upgraded successfully. If an error occurs during TiUP operation after the overwriting, it might be that the `manifest` is not updated. You can try `rm -rf ~/.tiup/manifests/*` before running TiUP again. + +### Step 3: Edit TiUP topology configuration file + +> **Note:** +> +> Skip this step if one of the following situations applies: +> +> + You have not modified the configuration parameters of the original cluster. Or you have modified the configuration parameters using `tiup cluster` but no more modification is needed. +> + After the upgrade, you want to use v7.1.0's default parameter values for the unmodified configuration items. + +1. Enter the `vi` editing mode to edit the topology file: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster edit-config + ``` + +2. Refer to the format of [topology](https://github.com/pingcap/tiup/blob/master/embed/examples/cluster/topology.example.yaml) configuration template and fill the parameters you want to modify in the `server_configs` section of the topology file. + +3. After the modification, enter : + w + q to save the change and exit the editing mode. Enter Y to confirm the change. + +> **Note:** +> +> Before you upgrade the cluster to v6.6.0, make sure that the parameters you have modified in v4.0 are compatible in v7.1.0. For details, see [TiKV Configuration File](/tikv-configuration-file.md). + +### Step 4: Check the health status of the current cluster + +To avoid the undefined behaviors or other issues during the upgrade, it is recommended to check the health status of Regions of the current cluster before the upgrade. To do that, you can use the `check` sub-command. + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster check --cluster +``` + +After the command is executed, the "Region status" check result will be output. + ++ If the result is "All Regions are healthy", all Regions in the current cluster are healthy and you can continue the upgrade. ++ If the result is "Regions are not fully healthy: m miss-peer, n pending-peer" with the "Please fix unhealthy regions before other operations." prompt, some Regions in the current cluster are abnormal. You need to troubleshoot the anomalies until the check result becomes "All Regions are healthy". Then you can continue the upgrade. + +### Step 5: Check the DDL and backup status of the cluster + +To avoid undefined behaviors or other unexpected problems during the upgrade, it is recommended to check the following items before the upgrade. + +- Cluster DDLs: It is recommended to execute the [`ADMIN SHOW DDL`](/sql-statements/sql-statement-admin-show-ddl.md) statement to check whether there is an ongoing DDL job. If yes, wait for its execution or cancel it by executing the [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) statement before performing an upgrade. +- Cluster backup: It is recommended to execute the [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) statement to check whether there is an ongoing backup or restore task in the cluster. If yes, wait for its completion before performing an upgrade. + +## Upgrade the TiDB cluster + +This section describes how to upgrade the TiDB cluster and verify the version after the upgrade. + +### Upgrade the TiDB cluster to a specified version + +You can upgrade your cluster in one of the two ways: online upgrade and offline upgrade. + +By default, TiUP Cluster upgrades the TiDB cluster using the online method, which means that the TiDB cluster can still provide services during the upgrade process. With the online method, the leaders are migrated one by one on each node before the upgrade and restart. Therefore, for a large-scale cluster, it takes a long time to complete the entire upgrade operation. + +If your application has a maintenance window for the database to be stopped for maintenance, you can use the offline upgrade method to quickly perform the upgrade operation. + +#### Online upgrade + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade +``` + +For example, if you want to upgrade the cluster to v7.1.0: + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade v7.1.0 +``` + +> **Note:** +> +> + An online upgrade upgrades all components one by one. During the upgrade of TiKV, all leaders in a TiKV instance are evicted before stopping the instance. The default timeout time is 5 minutes (300 seconds). The instance is directly stopped after this timeout time. +> +> + You can use the `--force` parameter to upgrade the cluster immediately without evicting the leader. However, the errors that occur during the upgrade will be ignored, which means that you are not notified of any upgrade failure. Therefore, use the `--force` parameter with caution. +> +> + To keep a stable performance, make sure that all leaders in a TiKV instance are evicted before stopping the instance. You can set `--transfer-timeout` to a larger value, for example, `--transfer-timeout 3600` (unit: second). +> +> + To upgrade TiFlash from versions earlier than 5.3 to 5.3 or later, you should stop TiFlash and then upgrade it. The following steps help you upgrade TiFlash without interrupting other components: +> 1. Stop the TiFlash instance: `tiup cluster stop -R tiflash` +> 2. Upgrade the TiDB cluster without restarting it (only updating the files): `tiup cluster upgrade --offline`, such as `tiup cluster upgrade v6.3.0 --offline` +> 3. Reload the TiDB cluster: `tiup cluster reload `. After the reload, the TiFlash instance is started and you do not need to manually start it. +> +> + Try to avoid creating a new clustered index table when you apply rolling updates to the clusters using TiDB Binlog. + +#### Offline upgrade + +1. Before the offline upgrade, you first need to stop the entire cluster. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster stop + ``` + +2. Use the `upgrade` command with the `--offline` option to perform the offline upgrade. Fill in the name of your cluster for `` and the version to upgrade to for ``, such as `v7.1.0`. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster upgrade --offline + ``` + +3. After the upgrade, the cluster will not be automatically restarted. You need to use the `start` command to restart it. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster start + ``` + +### Verify the cluster version + +Execute the `display` command to view the latest cluster version `TiDB Version`: + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster display +``` + +``` +Cluster type: tidb +Cluster name: +Cluster version: v7.1.0 +``` + +## FAQ + +This section describes common problems encountered when updating the TiDB cluster using TiUP. + +### If an error occurs and the upgrade is interrupted, how to resume the upgrade after fixing this error? + +Re-execute the `tiup cluster upgrade` command to resume the upgrade. The upgrade operation restarts the nodes that have been previously upgraded. If you do not want the upgraded nodes to be restarted, use the `replay` sub-command to retry the operation: + +1. Execute `tiup cluster audit` to see the operation records: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster audit + ``` + + Find the failed upgrade operation record and keep the ID of this operation record. The ID is the `` value in the next step. + +2. Execute `tiup cluster replay ` to retry the corresponding operation: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster replay + ``` + +### The evict leader has waited too long during the upgrade. How to skip this step for a quick upgrade? + +You can specify `--force`. Then the processes of transferring PD leader and evicting TiKV leader are skipped during the upgrade. The cluster is directly restarted to update the version, which has a great impact on the cluster that runs online. In the following command, `` is the version to upgrade to, such as `v7.1.0`. + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade --force +``` + +### How to update the version of tools such as pd-ctl after upgrading the TiDB cluster? + +You can upgrade the tool version by using TiUP to install the `ctl` component of the corresponding version: + +{{< copyable "shell-regular" >}} + +```shell +tiup install ctl:v7.1.0 +``` diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/TOC.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/TOC.md new file mode 100644 index 00000000..fb1aa16b --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/TOC.md @@ -0,0 +1,1169 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB + - [TiDB Introduction](/overview.md) + - [TiDB 7.2 Release Notes](/releases/release-7.2.0.md) + - [Features](/basic-features.md) + - [MySQL Compatibility](/mysql-compatibility.md) + - [TiDB Limitations](/tidb-limitations.md) + - [Credits](/credits.md) + - [Roadmap](/tidb-roadmap.md) +- Quick Start + - [Try Out TiDB](/quick-start-with-tidb.md) + - [Try Out HTAP](/quick-start-with-htap.md) + - [Learn TiDB SQL](/basic-sql-operations.md) + - [Learn HTAP](/explore-htap.md) + - [Import Example Database](/import-example-data.md) +- Develop + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Serverless Cluster](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Example Applications + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - Connect to TiDB + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Data Using Time to Live](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Reference + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - Legacy Docs + - [For Django](/develop/dev-guide-outdated-for-django.md) + - Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) + - [ProxySQL Integration Guide](/develop/dev-guide-proxysql-integration.md) + - [Amazon AppFlow Integration Guide](/develop/dev-guide-aws-appflow-integration.md) +- Deploy + - [Software and Hardware Requirements](/hardware-and-software-requirements.md) + - [Environment Configuration Checklist](/check-before-deployment.md) + - Plan Cluster Topology + - [Minimal Topology](/minimal-deployment-topology.md) + - [TiFlash Topology](/tiflash-deployment-topology.md) + - [TiCDC Topology](/ticdc-deployment-topology.md) + - [TiDB Binlog Topology](/tidb-binlog-deployment-topology.md) + - [TiSpark Topology](/tispark-deployment-topology.md) + - [Cross-DC Topology](/geo-distributed-deployment-topology.md) + - [Hybrid Topology](/hybrid-deployment-topology.md) + - Install and Start + - [Use TiUP](/production-deployment-using-tiup.md) + - [Deploy on Kubernetes](/tidb-in-kubernetes.md) + - [Verify Cluster Status](/post-installation-check.md) + - Test Cluster Performance + - [Test TiDB Using Sysbench](/benchmark/benchmark-tidb-using-sysbench.md) + - [Test TiDB Using TPC-C](/benchmark/benchmark-tidb-using-tpcc.md) + - [Test TiDB Using CH-benCHmark](/benchmark/benchmark-tidb-using-ch.md) +- Migrate + - [Overview](/migration-overview.md) + - [Migration Tools](/migration-tools.md) + - Migration Scenarios + - [Migrate from Aurora](/migrate-aurora-to-tidb.md) + - [Migrate MySQL of Small Datasets](/migrate-small-mysql-to-tidb.md) + - [Migrate MySQL of Large Datasets](/migrate-large-mysql-to-tidb.md) + - [Migrate and Merge MySQL Shards of Small Datasets](/migrate-small-mysql-shards-to-tidb.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/migrate-large-mysql-shards-to-tidb.md) + - [Migrate from CSV Files](/migrate-from-csv-files-to-tidb.md) + - [Migrate from SQL Files](/migrate-from-sql-files-to-tidb.md) + - [Migrate from Parquet Files](/migrate-from-parquet-files-to-tidb.md) + - [Migrate from One TiDB Cluster to Another TiDB Cluster](/migrate-from-tidb-to-tidb.md) + - [Migrate from TiDB to MySQL-compatible Databases](/migrate-from-tidb-to-mysql.md) + - Advanced Migration + - [Continuous Replication with gh-ost or pt-osc](/migrate-with-pt-ghost.md) + - [Migrate to a Downstream Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Filter Binlog Events](/filter-binlog-event.md) + - [Filter DML Events Using SQL Expressions](/filter-dml-event.md) +- Integrate + - [Overview](/integration-overview.md) + - Integration Scenarios + - [Integrate with Confluent and Snowflake](/ticdc/integrate-confluent-using-ticdc.md) + - [Integrate with Apache Kafka and Apache Flink](/replicate-data-to-kafka.md) +- Maintain + - Upgrade + - [Use TiUP](/upgrade-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [TiDB Smooth Upgrade](/smooth-upgrade-tidb.md) + - [TiFlash v6.2.0 Upgrade Guide](/tiflash-620-upgrade-guide.md) + - Scale + - [Use TiUP (Recommended)](/scale-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - Backup and Restore + - [Overview](/br/backup-and-restore-overview.md) + - Architecture + - [Architecture Overview](/br/backup-and-restore-design.md) + - [Snapshot Backup and Restore Architecture](/br/br-snapshot-architecture.md) + - [Log Backup and PITR Architecture](/br/br-log-architecture.md) + - Use BR + - [Use Overview](/br/br-use-overview.md) + - [Snapshot Backup and Restore Guide](/br/br-snapshot-guide.md) + - [Log Backup and PITR Guide](/br/br-pitr-guide.md) + - [Use Cases](/br/backup-and-restore-use-cases.md) + - [Backup Storages](/br/backup-and-restore-storages.md) + - BR CLI Manuals + - [Overview](/br/use-br-command-line-tool.md) + - [Snapshot Backup and Restore Command Manual](/br/br-snapshot-manual.md) + - [Log Backup and PITR Command Manual](/br/br-pitr-manual.md) + - References + - BR Features + - [Backup Auto-Tune](/br/br-auto-tune.md) + - [Batch Create Table](/br/br-batch-create-table.md) + - [Checkpoint Backup](/br/br-checkpoint-backup.md) + - [Checkpoint Restore](/br/br-checkpoint-restore.md) + - [Back up and Restore Data Using Dumpling and TiDB Lightning](/backup-and-restore-using-dumpling-lightning.md) + - [Back Up and Restore RawKV](/br/rawkv-backup-and-restore.md) + - [Incremental Backup and Restore](/br/br-incremental-guide.md) + - Cluster Disaster Recovery (DR) + - [DR Solutions Overview](/dr-solution-introduction.md) + - [Primary-Secondary DR](/dr-secondary-cluster.md) + - [Multi-Replica Cluster DR](/dr-multi-replica.md) + - [BR-based DR](/dr-backup-restore.md) + - [Resource Control](/tidb-resource-control.md) + - [Configure Time Zone](/configure-time-zone.md) + - [Daily Checklist](/daily-check.md) + - [Maintain TiFlash](/tiflash/maintain-tiflash.md) + - [Maintain TiDB Using TiUP](/maintain-tidb-using-tiup.md) + - [Modify Configuration Dynamically](/dynamic-config.md) + - [Online Unsafe Recovery](/online-unsafe-recovery.md) + - [Replicate Data Between Primary and Secondary Clusters](/replicate-between-primary-and-secondary-clusters.md) +- Monitor and Alert + - [Monitoring Framework Overview](/tidb-monitoring-framework.md) + - [Monitoring API](/tidb-monitoring-api.md) + - [Deploy Monitoring Services](/deploy-monitoring-services.md) + - [Export Grafana Snapshots](/exporting-grafana-snapshots.md) + - [TiDB Cluster Alert Rules](/alert-rules.md) + - [TiFlash Alert Rules](/tiflash/tiflash-alert-rules.md) + - [Customize Configurations of Monitoring Servers](/tiup/customized-montior-in-tiup-environment.md) + - [BR Monitoring and Alert](/br/br-monitoring-and-alert.md) +- Troubleshoot + - Issue Summary + - [TiDB Troubleshooting Map](/tidb-troubleshooting-map.md) + - [Troubleshoot TiDB Cluster Setup](/troubleshoot-tidb-cluster.md) + - [Troubleshoot TiFlash](/tiflash/troubleshoot-tiflash.md) + - Issue Scenarios + - Slow Queries + - [Identify Slow Queries](/identify-slow-queries.md) + - [Analyze Slow Queries](/analyze-slow-queries.md) + - [TiDB OOM](/troubleshoot-tidb-oom.md) + - [Hotspot](/troubleshoot-hot-spot-issues.md) + - [Increased Read and Write Latency](/troubleshoot-cpu-issues.md) + - [Write Conflicts in Optimistic Transactions](/troubleshoot-write-conflicts.md) + - [High Disk I/O Usage](/troubleshoot-high-disk-io.md) + - [Lock Conflicts](/troubleshoot-lock-conflicts.md) + - [Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - Diagnostic Methods + - [SQL Diagnostics](/information-schema/information-schema-sql-diagnostics.md) + - [Statement Summary Tables](/statement-summary-tables.md) + - [Identify Expensive Queries Using Top SQL](/dashboard/top-sql.md) + - [Identify Expensive Queries Using Logs](/identify-expensive-queries.md) + - [Save and Restore the On-Site Information of a Cluster](/sql-plan-replayer.md) + - [Support Resources](/support.md) +- Performance Tuning + - Tuning Guide + - [Performance Tuning Overview](/performance-tuning-overview.md) + - [Performance Analysis and Tuning](/performance-tuning-methods.md) + - [Performance Tuning Practices for OLTP Scenarios](/performance-tuning-practices.md) + - [Latency Breakdown](/latency-breakdown.md) + - [TiDB Best Practices on Public Cloud](/best-practices-on-public-cloud.md) + - Configuration Tuning + - [Tune Operating System Performance](/tune-operating-system.md) + - [Tune TiDB Memory](/configure-memory-usage.md) + - [Tune TiKV Threads](/tune-tikv-thread-performance.md) + - [Tune TiKV Memory](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Tune Region Performance](/tune-region-performance.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - SQL Tuning + - [Overview](/sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [Derive TopN or Limit from Window Functions](/derive-topn-from-window.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Prepared Execution Plan Cache](/sql-prepared-plan-cache.md) + - [Non-Prepared Execution Plan Cache](/sql-non-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) +- Tutorials + - [Multiple Availability Zones in One Region Deployment](/multi-data-centers-in-one-city-deployment.md) + - [Three Availability Zones in Two Regions Deployment](/three-data-centers-in-two-cities-deployment.md) + - [Two Availability Zones in One Region Deployment](/two-data-centers-in-one-city-deployment.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - Best Practices + - [Use TiDB](/best-practices/tidb-best-practices.md) + - [Java Application Development](/best-practices/java-app-best-practices.md) + - [Use HAProxy](/best-practices/haproxy-best-practices.md) + - [Highly Concurrent Write](/best-practices/high-concurrency-best-practices.md) + - [Grafana Monitoring](/best-practices/grafana-monitor-best-practices.md) + - [PD Scheduling](/best-practices/pd-scheduling-best-practices.md) + - [TiKV Performance Tuning with Massive Regions](/best-practices/massive-regions-best-practices.md) + - [Three-node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) + - [Local Read Under Three Data Centers Deployment](/best-practices/three-dc-local-read.md) + - [Use UUIDs](/best-practices/uuid.md) + - [Read-Only Storage Nodes](/best-practices/readonly-nodes.md) + - [Use Placement Rules](/configure-placement-rules.md) + - [Use Load Base Split](/configure-load-base-split.md) + - [Use Store Limit](/configure-store-limit.md) + - [DDL Execution Principles and Best Practices](/ddl-introduction.md) +- TiDB Tools + - [Overview](/ecosystem-tool-user-guide.md) + - [Use Cases](/ecosystem-tool-user-case.md) + - [Download](/download-ecosystem-tools.md) + - TiUP + - [Documentation Map](/tiup/tiup-documentation-guide.md) + - [Overview](/tiup/tiup-overview.md) + - [Terminology and Concepts](/tiup/tiup-terminology-and-concepts.md) + - [Manage TiUP Components](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [Troubleshooting Guide](/tiup/tiup-troubleshooting-guide.md) + - Command Reference + - [Overview](/tiup/tiup-reference.md) + - TiUP Commands + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [Overview](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster Commands + - [Overview](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM Commands + - [Overview](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB Cluster Topology Reference](/tiup/tiup-cluster-topology-reference.md) + - [DM Cluster Topology Reference](/tiup/tiup-dm-topology-reference.md) + - [Mirror Reference Guide](/tiup/tiup-mirror-reference.md) + - TiUP Components + - [tiup-playground](/tiup/tiup-playground.md) + - [tiup-cluster](/tiup/tiup-cluster.md) + - [tiup-mirror](/tiup/tiup-mirror.md) + - [tiup-bench](/tiup/tiup-bench.md) + - [TiDB Operator](/tidb-operator-overview.md) + - TiDB Data Migration + - [About TiDB Data Migration](/dm/dm-overview.md) + - [Architecture](/dm/dm-arch.md) + - [Quick Start](/dm/quick-start-with-dm.md) + - [Best Practices](/dm/dm-best-practices.md) + - Deploy a DM cluster + - [Hardware and Software Requirements](/dm/dm-hardware-and-software-requirements.md) + - [Use TiUP (Recommended)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [Use TiUP Offline](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [Use Binary](/dm/deploy-a-dm-cluster-using-binary.md) + - [Use Kubernetes](https://docs.pingcap.com/tidb-in-kubernetes/dev/deploy-tidb-dm) + - Tutorials + - [Create a Data Source](/dm/quick-start-create-source.md) + - [Manage Data Sources](/dm/dm-manage-source.md) + - [Configure Tasks](/dm/dm-task-configuration-guide.md) + - [Shard Merge](/dm/dm-shard-merge.md) + - [Table Routing](/dm/dm-table-routing.md) + - [Block and Allow Lists](/dm/dm-block-allow-table-lists.md) + - [Binlog Event Filter](/dm/dm-binlog-event-filter.md) + - [Filter DMLs Using SQL Expressions](/dm/feature-expression-filter.md) + - [Online DDL Tool Support](/dm/dm-online-ddl-tool-support.md) + - Manage a Data Migration Task + - [Precheck a Task](/dm/dm-precheck.md) + - [Create a Task](/dm/dm-create-task.md) + - [Query Status](/dm/dm-query-status.md) + - [Pause a Task](/dm/dm-pause-task.md) + - [Resume a Task](/dm/dm-resume-task.md) + - [Stop a Task](/dm/dm-stop-task.md) + - Advanced Tutorials + - Merge and Migrate Data from Sharded Tables + - [Overview](/dm/feature-shard-merge.md) + - [Pessimistic Mode](/dm/feature-shard-merge-pessimistic.md) + - [Optimistic Mode](/dm/feature-shard-merge-optimistic.md) + - [Manually Handle Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [Migrate from MySQL Databases that Use GH-ost/PT-osc](/dm/feature-online-ddl.md) + - [Migrate Data to a Downstream TiDB Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Continuous Data Validation](/dm/dm-continuous-data-validation.md) + - Maintain + - Cluster Upgrade + - [Maintain DM Clusters Using TiUP (Recommended)](/dm/maintain-dm-using-tiup.md) + - [Manually Upgrade from v1.0.x to v2.0+](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - Tools + - [Manage Using WebUI](/dm/dm-webui-guide.md) + - [Manage Using dmctl](/dm/dmctl-introduction.md) + - Performance Tuning + - [Benchmarks](/dm/dm-benchmark-v5.4.0.md) + - [Optimize Configurations](/dm/dm-tune-configuration.md) + - [Test DM Performance](/dm/dm-performance-test.md) + - [Handle Performance Issues](/dm/dm-handle-performance-issues.md) + - Manage Data Sources + - [Switch the MySQL Instance to Be Migrated](/dm/usage-scenario-master-slave-switch.md) + - Manage Tasks + - [Handle Failed DDL Statements](/dm/handle-failed-ddl-statements.md) + - [Manage Schemas of Tables to be Migrated](/dm/dm-manage-schema.md) + - [Export and Import Data Sources and Task Configurations of Clusters](/dm/dm-export-import-config.md) + - [Handle Alerts](/dm/dm-handle-alerts.md) + - [Daily Check](/dm/dm-daily-check.md) + - Reference + - Architecture + - [DM-worker](/dm/dm-worker-intro.md) + - [Safe Mode](/dm/dm-safe-mode.md) + - [Relay Log](/dm/relay-log.md) + - [DDL Handling](/dm/dm-ddl-compatible.md) + - Mechanism + - [DML Replication Mechanism](/dm/dm-replication-logic.md) + - Command Line + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - Configuration Files + - [Overview](/dm/dm-config-overview.md) + - [Upstream Database Configurations](/dm/dm-source-configuration-file.md) + - [Task Configurations](/dm/task-configuration-file-full.md) + - [DM-master Configuration](/dm/dm-master-configuration-file.md) + - [DM-worker Configuration](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [Compatibility Catalog](/dm/dm-compatibility-catalog.md) + - Secure + - [Enable TLS for DM Connections](/dm/dm-enable-tls.md) + - [Generate Self-signed Certificates](/dm/dm-generate-self-signed-certificates.md) + - Monitoring and Alerts + - [Monitoring Metrics](/dm/monitor-a-dm-cluster.md) + - [Alert Rules](/dm/dm-alert-rules.md) + - [Error Codes](/dm/dm-error-handling.md#handle-common-errors) + - [Glossary](/dm/dm-glossary.md) + - Example + - [Migrate Data Using DM](/dm/migrate-data-using-dm.md) + - [Create a Data Migration Task](/dm/quick-start-create-task.md) + - [Best Practices of Data Migration in the Shard Merge Scenario](/dm/shard-merge-best-practices.md) + - Troubleshoot + - [FAQ](/dm/dm-faq.md) + - [Handle Errors](/dm/dm-error-handling.md) + - [Release Notes](/dm/dm-release-notes.md) + - TiDB Lightning + - [Overview](/tidb-lightning/tidb-lightning-overview.md) + - [Get Started](/get-started-with-tidb-lightning.md) + - [Deploy TiDB Lightning](/tidb-lightning/deploy-tidb-lightning.md) + - [Target Database Requirements](/tidb-lightning/tidb-lightning-requirements.md) + - Data Sources + - [Data Match Rules](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [Customized File](/tidb-lightning/tidb-lightning-data-source.md#match-customized-files) + - Physical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [Use Physical Import Mode](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - Logical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [Use Logical Import Mode](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - [Prechecks](/tidb-lightning/tidb-lightning-prechecks.md) + - [Table Filter](/table-filter.md) + - [Checkpoints](/tidb-lightning/tidb-lightning-checkpoints.md) + - [Import Data in Parallel](/tidb-lightning/tidb-lightning-distributed-import.md) + - [Error Resolution](/tidb-lightning/tidb-lightning-error-resolution.md) + - [Troubleshooting](/tidb-lightning/troubleshoot-tidb-lightning.md) + - Reference + - [Configuration File](/tidb-lightning/tidb-lightning-configuration.md) + - [Command Line Flags](/tidb-lightning/tidb-lightning-command-line-full.md) + - [Monitoring](/tidb-lightning/monitor-tidb-lightning.md) + - [Web Interface](/tidb-lightning/tidb-lightning-web-interface.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [Glossary](/tidb-lightning/tidb-lightning-glossary.md) + - [Dumpling](/dumpling-overview.md) + - TiCDC + - [Overview](/ticdc/ticdc-overview.md) + - [Deploy and Maintain](/ticdc/deploy-ticdc.md) + - Changefeed + - [Overview](/ticdc/ticdc-changefeed-overview.md) + - Create Changefeeds + - [Replicate Data to MySQL-compatible Databases](/ticdc/ticdc-sink-to-mysql.md) + - [Replicate Data to Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [Replicate Data to Storage Services](/ticdc/ticdc-sink-to-cloud-storage.md) + - [Manage Changefeeds](/ticdc/ticdc-manage-changefeed.md) + - [Log Filter](/ticdc/ticdc-filter.md) + - [Bidirectional Replication](/ticdc/ticdc-bidirectional-replication.md) + - [Data Integrity Validation for Single-Row Data](/ticdc/ticdc-integrity-check.md) + - Monitor and Alert + - [Monitoring Metrics Summary](/ticdc/ticdc-summary-monitor.md) + - [Monitoring Metrics Details](/ticdc/monitor-ticdc.md) + - [Alert Rules](/ticdc/ticdc-alert-rules.md) + - Reference + - [Architecture](/ticdc/ticdc-architecture.md) + - [TiCDC Server Configurations](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed Configurations](/ticdc/ticdc-changefeed-config.md) + - Output Protocols + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API v2](/ticdc/ticdc-open-api-v2.md) + - [TiCDC Open API v1](/ticdc/ticdc-open-api.md) + - [Guide for Developing a Storage Sink Consumer](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [Compatibility](/ticdc/ticdc-compatibility.md) + - [Troubleshoot](/ticdc/troubleshoot-ticdc.md) + - [FAQs](/ticdc/ticdc-faq.md) + - [Glossary](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [Overview](/tidb-binlog/tidb-binlog-overview.md) + - [Quick Start](/tidb-binlog/get-started-with-tidb-binlog.md) + - [Deploy](/tidb-binlog/deploy-tidb-binlog.md) + - [Maintain](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [Configure](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [Upgrade](/tidb-binlog/upgrade-tidb-binlog.md) + - [Monitor](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [Reparo](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl](/tidb-binlog/binlog-control.md) + - [Binlog Consumer Client](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [Bidirectional Replication Between TiDB Clusters](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [Glossary](/tidb-binlog/tidb-binlog-glossary.md) + - Troubleshoot + - [Troubleshoot](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [Handle Errors](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - PingCAP Clinic Diagnostic Service + - [Overview](/clinic/clinic-introduction.md) + - [Quick Start](/clinic/quick-start-with-clinic.md) + - [Troubleshoot Clusters Using PingCAP Clinic](/clinic/clinic-user-guide-for-tiup.md) + - [PingCAP Clinic Diagnostic Data](/clinic/clinic-data-instruction-for-tiup.md) + - TiSpark + - [User Guide](/tispark-overview.md) + - sync-diff-inspector + - [Overview](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [Data Check for Tables with Different Schema/Table Names](/sync-diff-inspector/route-diff.md) + - [Data Check in the Sharding Scenario](/sync-diff-inspector/shard-diff.md) + - [Data Check for TiDB Upstream/Downstream Clusters](/sync-diff-inspector/upstream-downstream-diff.md) + - [Data Check in the DM Replication Scenario](/sync-diff-inspector/dm-diff.md) +- Reference + - Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - Storage Engine - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - [Titan Overview](/storage-engine/titan-overview.md) + - [Titan Configuration](/storage-engine/titan-configuration.md) + - [Partitioned Raft KV](/partitioned-raft-kv.md) + - Storage Engine - TiFlash + - [Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Use TiDB to Read TiFlash Replicas](/tiflash/use-tidb-to-read-tiflash.md) + - [Use TiSpark to Read TiFlash Replicas](/tiflash/use-tispark-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Use FastScan](/tiflash/use-fastscan.md) + - [Disaggregated Storage and Compute Architecture and S3 Support](/tiflash/tiflash-disaggregated-and-s3.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [TiFlash Late Materialization](/tiflash/tiflash-late-materialization.md) + - [Spill to Disk](/tiflash/tiflash-spill-disk.md) + - [Data Validation](/tiflash/tiflash-data-validation.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Pipeline Execution Model](/tiflash/tiflash-pipeline-model.md) + - [System Variables](/system-variables.md) + - Configuration File Parameters + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - Command Line Flags + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - Key Monitoring Metrics + - [Overview](/grafana-overview-dashboard.md) + - [Performance Overview](/grafana-performance-overview-dashboard.md) + - [TiDB](/grafana-tidb-dashboard.md) + - [PD](/grafana-pd-dashboard.md) + - [TiKV](/grafana-tikv-dashboard.md) + - [TiFlash](/tiflash/monitor-tiflash.md) + - [TiCDC](/ticdc/monitor-ticdc.md) + - [Resource Control](/grafana-resource-control-dashboard.md) + - Security + - [Enable TLS Between TiDB Clients and Servers](/enable-tls-between-clients-and-servers.md) + - [Enable TLS Between TiDB Components](/enable-tls-between-components.md) + - [Generate Self-signed Certificates](/generate-self-signed-certificates.md) + - [Encryption at Rest](/encryption-at-rest.md) + - [Enable Encryption for Disk Spill](/enable-disk-spill-encrypt.md) + - [Log Redaction](/log-redaction.md) + - Privileges + - [Security Compatibility with MySQL](/security-compatibility-with-mysql.md) + - [Privilege Management](/privilege-management.md) + - [User Account Management](/user-account-management.md) + - [TiDB Password Management](/password-management.md) + - [Role-Based Access Control](/role-based-access-control.md) + - [Certificate-Based Authentication](/certificate-authentication.md) + - SQL + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER RESOURCE GROUP`](/sql-statements/sql-statement-alter-resource-group.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CALIBRATE RESOURCE`](/sql-statements/sql-statement-calibrate-resource.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE RESOURCE GROUP`](/sql-statements/sql-statement-create-resource-group.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP RESOURCE GROUP`](/sql-statements/sql-statement-drop-resource-group.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET RESOURCE GROUP`](/sql-statements/sql-statement-set-resource-group.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE RESOURCE GROUP`](/sql-statements/sql-statement-show-create-resource-group.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Comparisons between Functions and Syntax of Oracle and TiDB](/oracle-functions-to-tidb.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - [FOREIGN KEY Constraints](/foreign-key.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`RESOURCE_GROUPS`](/information-schema/information-schema-resource-groups.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [Metadata Lock](/metadata-lock.md) + - UI + - TiDB Dashboard + - [Overview](/dashboard/dashboard-intro.md) + - Maintain + - [Deploy](/dashboard/dashboard-ops-deploy.md) + - [Reverse Proxy](/dashboard/dashboard-ops-reverse-proxy.md) + - [User Management](/dashboard/dashboard-user.md) + - [Secure](/dashboard/dashboard-ops-security.md) + - [Access](/dashboard/dashboard-access.md) + - [Overview Page](/dashboard/dashboard-overview.md) + - [Cluster Info Page](/dashboard/dashboard-cluster-info.md) + - [Top SQL Page](/dashboard/top-sql.md) + - [Key Visualizer Page](/dashboard/dashboard-key-visualizer.md) + - [Metrics Relation Graph](/dashboard/dashboard-metrics-relation.md) + - SQL Statements Analysis + - [SQL Statements Page](/dashboard/dashboard-statement-list.md) + - [SQL Details Page](/dashboard/dashboard-statement-details.md) + - [Slow Queries Page](/dashboard/dashboard-slow-query.md) + - Cluster Diagnostics + - [Access Cluster Diagnostics Page](/dashboard/dashboard-diagnostics-access.md) + - [View Diagnostics Report](/dashboard/dashboard-diagnostics-report.md) + - [Use Diagnostics](/dashboard/dashboard-diagnostics-usage.md) + - [Monitoring Page](/dashboard/dashboard-monitoring.md) + - [Search Logs Page](/dashboard/dashboard-log-search.md) + - [Resource Manager Page](/dashboard/dashboard-resource-manager.md) + - Instance Profiling + - [Manual Profiling](/dashboard/dashboard-profiling.md) + - [Continuous Profiling](/dashboard/continuous-profiling.md) + - Session Management and Configuration + - [Share Session](/dashboard/dashboard-session-share.md) + - [Configure SSO](/dashboard/dashboard-session-sso.md) + - [FAQ](/dashboard/dashboard-faq.md) + - [Telemetry](/telemetry.md) + - [Errors Codes](/error-codes.md) + - [Table Filter](/table-filter.md) + - [Schedule Replicas by Topology Labels](/schedule-replicas-by-topology-labels.md) + - Internal Components + - [TiDB Backend Task Distributed Execution Framework](/tidb-distributed-execution-framework.md) +- FAQs + - [FAQ Summary](/faq/faq-overview.md) + - [TiDB FAQs](/faq/tidb-faq.md) + - [SQL FAQs](/faq/sql-faq.md) + - [Deployment FAQs](/faq/deploy-and-maintain-faq.md) + - [Migration FAQs](/faq/migration-tidb-faq.md) + - [Upgrade FAQs](/faq/upgrade-faq.md) + - [Monitoring FAQs](/faq/monitor-faq.md) + - [Cluster Management FAQs](/faq/manage-cluster-faq.md) + - [High Availability FAQs](/faq/high-availability-faq.md) + - [High Reliability FAQs](/faq/high-reliability-faq.md) + - [Backup and Restore FAQs](/faq/backup-and-restore-faq.md) +- Release Notes + - [All Releases](/releases/release-notes.md) + - [Release Timeline](/releases/release-timeline.md) + - [TiDB Versioning](/releases/versioning.md) + - [TiDB Installation Packages](/binary-package.md) + - v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) + - v7.1 + - [7.1.0](/releases/release-7.1.0.md) + - v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) + - v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) + - v6.5 + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0.8](/releases/release-1.0.8.md) + - [1.0.7](/releases/release-1.0.7.md) + - [1.0.6](/releases/release-1.0.6.md) + - [1.0.5](/releases/release-1.0.5.md) + - [1.0.4](/releases/release-1.0.4.md) + - [1.0.3](/releases/release-1.0.3.md) + - [1.0.2](/releases/release-1.0.2.md) + - [1.0.1](/releases/release-1.0.1.md) + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [Glossary](/glossary.md) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-7.2.0.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-7.2.0.md new file mode 100644 index 00000000..15d25c97 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-7.2.0.md @@ -0,0 +1,328 @@ +--- +title: TiDB 7.2.0 Release Notes +summary: Learn about the new features, compatibility changes, improvements, and bug fixes in TiDB 7.2.0. +--- + +# TiDB 7.2.0 Release Notes + +Release date: June 29, 2023 + +TiDB version: 7.2.0 + +Quick access: [Quick start](https://docs.pingcap.com/tidb/v7.2/quick-start-with-tidb) | [Installation packages](https://www.pingcap.com/download/?version=v7.2.0#version-list) + +7.2.0 introduces the following key features and improvements: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryFeatureDescription
Scalability and PerformanceResource groups support managing runaway queries (experimental)You can now manage query timeout with more granularity, allowing for different behaviors based on query classifications. Queries meeting your specified threshold can be deprioritized or terminated. +
TiFlash supports the pipeline execution model (experimental)TiFlash supports a pipeline execution model to optimize thread resource control.
SQLSupport a new SQL statement, IMPORT INTO, to enable data import using the TiDB service, itself (experimental)To simplify the deployment and maintenance of TiDB Lightning, TiDB introduces a new SQL statement IMPORT INTO, which integrates physical import mode of TiDB Lightning, including remote import from Amazon S3 or Google Cloud Storage (GCS) directly into TiDB.
DB Operations and ObservabilityDDL supports pause and resume operations (experimental)This new capability lets you temporarily suspend resource-intensive DDL operations, such as index creation, to conserve resources and minimize the impact on online traffic. You can seamlessly resume these operations when ready, without the need to cancel and restart. This feature enhances resource utilization, improves user experience, and streamlines schema changes.
+ +## Feature details + +### Performance + +* Support pushing down the following two [window functions](/tiflash/tiflash-supported-pushdown-calculations.md) to TiFlash [#7427](https://github.com/pingcap/tiflash/issues/7427) @[xzhangxian1008](https://github.com/xzhangxian1008) + + * `FIRST_VALUE` + * `LAST_VALUE` + +* TiFlash supports the pipeline execution model (experimental) [#6518](https://github.com/pingcap/tiflash/issues/6518) @[SeaRise](https://github.com/SeaRise) + + Prior to v7.2.0, each task in the TiFlash engine must individually request thread resources during execution. TiFlash controls the number of tasks to limit thread resource usage and prevent overuse, but this issue could not be completely eliminated. To address this problem, starting from v7.2.0, TiFlash introduces a pipeline execution model. This model centrally manages all thread resources and schedules task execution uniformly, maximizing the utilization of thread resources while avoiding resource overuse. To enable or disable the pipeline execution model, modify the [`tidb_enable_tiflash_pipeline_model`](/system-variables.md#tidb_enable_tiflash_pipeline_model-new-in-v720) system variable. + + For more information, see [documentation](/tiflash/tiflash-pipeline-model.md). + +* TiFlash reduces the latency of schema replication [#7630](https://github.com/pingcap/tiflash/issues/7630) @[hongyunyan](https://github.com/hongyunyan) + + When the schema of a table changes, TiFlash needs to replicate the latest schema from TiKV in a timely manner. Before v7.2.0, when TiFlash accesses table data and detects a table schema change within a database, TiFlash needs to replicate the schemas of all tables in this database again, including those tables without TiFlash replicas. As a result, in a database with a large number of tables, even if you only need to read data from a single table using TiFlash, you might experience significant latency to wait for TiFlash to complete the schema replication of all tables. + + In v7.2.0, TiFlash optimizes the schema replication mechanism and supports only replicating schemas of tables with TiFlash replicas. When a schema change is detected for a table with TiFlash replicas, TiFlash only replicates the schema of that table, which reduces the latency of schema replication of TiFlash and minimizes the impact of DDL operations on TiFlash data replication. This optimization is automatically applied and does not require any manual configuration. + +* Improve the performance of statistics collection [#44725](https://github.com/pingcap/tidb/issues/44725) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + + TiDB v7.2.0 optimizes the statistics collection strategy, skipping some of the duplicate information and information that is of little value to the optimizer. The overall speed of statistics collection has been improved by 30%. This improvement allows TiDB to update the statistics of the database in a more timely manner, making the generated execution plans more accurate, thus improving the overall database performance. + + By default, statistics collection skips the columns of the `JSON`, `BLOB`, `MEDIUMBLOB`, and `LONGBLOB` types. You can modify the default behavior by setting the [`tidb_analyze_skip_column_types`](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720) system variable. TiDB supports skipping the `JSON`, `BLOB`, and `TEXT` types and their subtypes. + + For more information, see [documentation](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720). + +* Improve the performance of checking data and index consistency [#43693](https://github.com/pingcap/tidb/issues/43693) @[wjhuang2016](https://github.com/wjhuang2016) + + The [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) statement is used to check the consistency between data in a table and its corresponding indexes. In v7.2.0, TiDB optimizes the method for checking data consistency and improves the execution efficiency of [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) greatly. In scenarios with large amounts of data, this optimization can provide a performance boost of hundreds of times. + + The optimization is enabled by default ([`tidb_enable_fast_table_check`](/system-variables.md#tidb_enable_fast_table_check-new-in-v720) is `ON` by default) to greatly reduce the time required for data consistency checks in large-scale tables and enhance operational efficiency. + + For more information, see [documentation](/system-variables.md#tidb_enable_fast_table_check-new-in-v720). + +### Reliability + +* Automatically manage queries that consume more resources than expected (experimental) [#43691](https://github.com/pingcap/tidb/issues/43691) @[Connor1996](https://github.com/Connor1996) @[CabinfeverB](https://github.com/CabinfeverB) @[glorv](https://github.com/glorv) @[HuSharp](https://github.com/HuSharp) @[nolouch](https://github.com/nolouch) + + The most common challenge to database stability is the degradation of overall database performance caused by abrupt SQL performance problems. There are many causes for SQL performance issues, such as new SQL statements that have not been fully tested, drastic changes in data volume, and abrupt changes in execution plans. These issues are difficult to completely avoid at the root. TiDB v7.2.0 provides the ability to manage queries that consume more resources than expected. This feature can quickly reduce the scope of impact when a performance issue occurs. + + To manage these queries, you can set the maximum execution time of queries for a resource group. When the execution time of a query exceeds this limit, the query is automatically deprioritized or cancelled. You can also set a period of time to immediately match identified queries by text or execution plan. This helps prevent high concurrency of the problematic queries during the identification phase that could consume more resources than expected. + + Automatic management of queries that consume more resources than expected provides you with an effective means to quickly respond to unexpected query performance problems. This feature can reduce the impact of the problem on overall database performance, thereby improving database stability. + + For more information, see [documentation](/tidb-resource-control.md#manage-queries-that-consume-more-resources-than-expected-runaway-queries). + +* Enhance the capability of creating a binding according to a historical execution plan [#39199](https://github.com/pingcap/tidb/issues/39199) @[qw4990](https://github.com/qw4990) + + TiDB v7.2.0 enhances the capability of [creating a binding according to a historical execution plan](/sql-plan-management.md#create-a-binding-according-to-a-historical-execution-plan). This feature improves the parsing and binding process for complex statements, making the bindings more stable, and supports the following new hints: + + - [`AGG_TO_COP()`](/optimizer-hints.md#agg_to_cop) + - [`LIMIT_TO_COP()`](/optimizer-hints.md#limit_to_cop) + - [`ORDER_INDEX`](/optimizer-hints.md#order_indext1_name-idx1_name--idx2_name-) + - [`NO_ORDER_INDEX()`](/optimizer-hints.md#no_order_indext1_name-idx1_name--idx2_name-) + + For more information, see [documentation](/sql-plan-management.md). + +* Introduce the Optimizer Fix Controls mechanism to provide fine-grained control over optimizer behaviors [#43169](https://github.com/pingcap/tidb/issues/43169) @[time-and-fate](https://github.com/time-and-fate) + + To generate more reasonable execution plans, the behavior of the TiDB optimizer evolves over product iterations. However, in some particular scenarios, the changes might lead to performance regression. TiDB v7.2.0 introduces Optimizer Fix Controls to let you control some of the fine-grained behaviors of the optimizer. This enables you to roll back or control some new changes. + + Each controllable behavior is described by a GitHub issue corresponding to the fix number. All controllable behaviors are listed in [Optimizer Fix Controls](/optimizer-fix-controls.md). You can set a target value for one or more behaviors by setting the [`tidb_opt_fix_control`](/system-variables.md#tidb_opt_fix_control-new-in-v710) system variable to achieve behavior control. + + The Optimizer Fix Controls mechanism helps you control the TiDB optimizer at a granular level. It provides a new means of fixing performance issues caused by the upgrade process and improves the stability of TiDB. + + For more information, see [documentation](/optimizer-fix-controls.md). + +* Lightweight statistics initialization becomes generally available (GA) [#42160](https://github.com/pingcap/tidb/issues/42160) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + + Starting from v7.2.0, the lightweight statistics initialization feature becomes GA. Lightweight statistics initialization can significantly reduce the number of statistics that must be loaded during startup, thus improving the speed of loading statistics. This feature increases the stability of TiDB in complex runtime environments and reduces the impact on the overall service when TiDB nodes restart. + + For newly created clusters of v7.2.0 or later versions, TiDB loads lightweight statistics by default during TiDB startup and will wait for the loading to finish before providing services. For clusters upgraded from earlier versions, you can set the TiDB configuration items [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710) and [`force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v710) to `true` to enable this feature. + + For more information, see [documentation](/statistics.md#load-statistics). + +### SQL + +* Support the `CHECK` constraints [#41711](https://github.com/pingcap/tidb/issues/41711) @[fzzf678](https://github.com/fzzf678) + + Starting from v7.2.0, you can use `CHECK` constraints to restrict the values of one or more columns in a table to meet your specified conditions. When a `CHECK` constraint is added to a table, TiDB checks whether the constraint is satisfied before inserting or updating data in the table. Only the data that satisfies the constraint can be written. + + This feature is disabled by default. You can set the [`tidb_enable_check_constraint`](/system-variables.md#tidb_enable_check_constraint-new-in-v720) system variable to `ON` to enable it. + + For more information, see [documentation](/constraints.md#check). + +### DB operations + +* DDL jobs support pause and resume operations (experimental) [#18015](https://github.com/pingcap/tidb/issues/18015) @[godouxm](https://github.com/godouxm) + + Before TiDB v7.2.0, when a DDL job encounters a business peak during execution, you can only manually cancel the DDL job to reduce its impact on the business. In v7.2.0, TiDB introduces pause and resume operations for DDL jobs. These operations let you pause DDL jobs during a peak and resume them after the peak ends, thus avoiding impact on your application workloads. + + For example, you can pause and resume multiple DDL jobs using `ADMIN PAUSE DDL JOBS` or `ADMIN RESUME DDL JOBS`: + + ```sql + ADMIN PAUSE DDL JOBS 1,2; + ADMIN RESUME DDL JOBS 1,2; + ``` + + For more information, see [documentation](/ddl-introduction.md#ddl-related-commands). + +### Data migration + +* Introduce a new SQL statement `IMPORT INTO` to improve data import efficiency greatly (experimental) [#42930](https://github.com/pingcap/tidb/issues/42930) @[D3Hunter](https://github.com/D3Hunter) + + The `IMPORT INTO` statement integrates the [Physical Import Mode](/tidb-lightning/tidb-lightning-physical-import-mode.md) capability of TiDB Lightning. With this statement, you can quickly import data in formats such as CSV, SQL, and PARQUET into an empty table in TiDB. This import method eliminates the need for a separate deployment and management of TiDB Lightning, thereby reducing the complexity of data import and greatly improving import efficiency. + + For data files stored in Amazon S3 or GCS, when the [Backend task distributed execution framework](/tidb-distributed-execution-framework.md) is enabled, `IMPORT INTO` also supports splitting a data import job into multiple sub-jobs and scheduling them to multiple TiDB nodes for parallel import, which further enhances import performance. + + For more information, see [documentation](/sql-statements/sql-statement-import-into.md). + +* TiDB Lightning supports importing source files with the Latin-1 character set into TiDB [#44434](https://github.com/pingcap/tidb/issues/44434) @[lance6716](https://github.com/lance6716) + + With this feature, you can directly import source files with the Latin-1 character set into TiDB using TiDB Lightning. Before v7.2.0, importing such files requires your additional preprocessing or conversion. Starting from v7.2.0, you only need to specify `character-set = "latin1"` when configuring the TiDB Lightning import task. Then, TiDB Lightning automatically handles the character set conversion during the import process to ensure data integrity and accuracy. + + For more information, see [documentation](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task). + +## Compatibility changes + +> **Note:** +> +> This section provides compatibility changes you need to know when you upgrade from v7.1.0 to the current version (v7.2.0). If you are upgrading from v7.0.0 or earlier versions to the current version, you might also need to check the compatibility changes introduced in intermediate versions. + +### System variables + +| Variable name | Change type | Description | +|--------|------------------------------|------| +| [`last_insert_id`](/system-variables.md#last_insert_id) | Modified | Changes the maximum value from `9223372036854775807` to `18446744073709551615` to be consistent with that of MySQL. | +| [`tidb_enable_non_prepared_plan_cache`](/system-variables.md#tidb_enable_non_prepared_plan_cache) | Modified | Changes the default value from `OFF` to `ON` after further tests, meaning that non-prepared execution plan cache is enabled. | +| [`tidb_remove_orderby_in_subquery`](/system-variables.md#tidb_remove_orderby_in_subquery-new-in-v610) | Modified | Changes the default value from `OFF` to `ON` after further tests, meaning that the optimizer removes the `ORDER BY` clause in a subquery. | +| [`tidb_analyze_skip_column_types`](/system-variables.md#tidb_analyze_skip_column_types-new-in-v720) | Newly added | Controls which types of columns are skipped for statistics collection when executing the `ANALYZE` command to collect statistics. The variable is only applicable for [`tidb_analyze_version = 2`](/system-variables.md#tidb_analyze_version-new-in-v510). When using the syntax of `ANALYZE TABLE t COLUMNS c1, ..., cn`, if the type of a specified column is included in `tidb_analyze_skip_column_types`, the statistics of this column will not be collected. | +| [`tidb_enable_check_constraint`](/system-variables.md#tidb_enable_check_constraint-new-in-v720) | Newly added | Controls whether to enable `CHECK` constraints. The default value is `OFF`, which means this feature is disabled. | +| [`tidb_enable_fast_table_check`](/system-variables.md#tidb_enable_fast_table_check-new-in-v720) | Newly added | Controls whether to use a checksum-based approach to quickly check the consistency of data and indexes in a table. The default value is `ON`, which means this feature is enabled. | +| [`tidb_enable_tiflash_pipeline_model`](/system-variables.md#tidb_enable_tiflash_pipeline_model-new-in-v720) | Newly added | Controls whether to enable the new execution model of TiFlash, the [pipeline model](/tiflash/tiflash-pipeline-model.md). The default value is `OFF`, which means the pipeline model is disabled. | +| [`tidb_expensive_txn_time_threshold`](/system-variables.md#tidb_expensive_txn_time_threshold-new-in-v720) | Newly added | Controls the threshold for logging expensive transactions, which is 600 seconds by default. When the duration of a transaction exceeds the threshold, and the transaction is neither committed nor rolled back, it is considered an expensive transaction and will be logged. | + +### Configuration file parameters + +| Configuration file | Configuration parameter | Change type | Description | +| -------- | -------- | -------- | -------- | +| TiDB | [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710) | Modified | Changes the default value from `false` to `true` after further tests, meaning that TiDB uses lightweight statistics initialization by default during TiDB startup to improve the initialization efficiency. | +| TiDB | [`force-init-stats`](/tidb-configuration-file.md#force-init-stats-new-in-v710) | Modified | Changes the default value from `false` to `true` to align with [`lite-init-stats`](/tidb-configuration-file.md#lite-init-stats-new-in-v710), meaning that TiDB waits for statistics initialization to finish before providing services during TiDB startup. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].compaction-guard-min-output-file-size](/tikv-configuration-file.md#compaction-guard-min-output-file-size) | Modified | Changes the default value from `"8MB"` to `"1MB"` to reduce the data volume of compaction tasks in RocksDB. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].optimize-filters-for-memory](/tikv-configuration-file.md#optimize-filters-for-memory-new-in-v720) | Newly added | Controls whether to generate Bloom/Ribbon filters that minimize memory internal fragmentation. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].periodic-compaction-seconds](/tikv-configuration-file.md#periodic-compaction-seconds-new-in-v720) | Newly added | Controls the time interval for periodic compaction. SST files with updates older than this value will be selected for compaction and rewritten to the same level where these SST files originally reside. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].ribbon-filter-above-level](/tikv-configuration-file.md#ribbon-filter-above-level-new-in-v720) | Newly added | Controls whether to use Ribbon filters for levels greater than or equal to this value and use non-block-based bloom filters for levels less than this value. | +| TiKV | [rocksdb.\[defaultcf\|writecf\|lockcf\].ttl](/tikv-configuration-file.md#ttl-new-in-v720) | Newly added | SST files with updates older than the TTL will be automatically selected for compaction. | +| TiDB Lightning | `send-kv-pairs` | Deprecated | Starting from v7.2.0, the parameter `send-kv-pairs` is deprecated. You can use [`send-kv-size`](/tidb-lightning/tidb-lightning-configuration.md) to control the maximum size of one request when sending data to TiKV in physical import mode. | +| TiDB Lightning | [`character-set`](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task) | Modified | Introduces a new value option `latin1` for the supported character sets of data import. You can use this option to import source files with the Latin-1 character set. | +| TiDB Lightning | [`send-kv-size`](/tidb-lightning/tidb-lightning-configuration.md) | Newly added | Specify the maximum size of one request when sending data to TiKV in physical import mode. When the size of key-value pairs reaches the specified threshold, TiDB Lightning will immediately send them to TiKV. This avoids the OOM problems caused by TiDB Lightning nodes accumulating too many key-value pairs in memory when importing large wide tables. By adjusting this parameter, you can find a balance between memory usage and import speed, improving the stability and efficiency of the import process. | +| Data Migration | [`strict-optimistic-shard-mode`](/dm/feature-shard-merge-optimistic.md) | Newly added | This configuration item is used to be compatible with the DDL shard merge behavior in TiDB Data Migration v2.0. You can enable this configuration item in optimistic mode. After this is enabled, the replication task will be interrupted when it encounters a Type 2 DDL statement. In scenarios where there are dependencies between DDL changes in multiple tables, a timely interruption can be made. You need to manually process the DDL statements of each table before resuming the replication task to ensure data consistency between the upstream and the downstream. | +| TiCDC | [`sink.protocol`](/ticdc/ticdc-changefeed-config.md) | Modified | Introduces a new value option `"open-protocol"` when the downstream is Kafka. Specifies the protocol format used for encoding messages. | +| TiCDC | [`sink.delete-only-output-handle-key-columns`](/ticdc/ticdc-changefeed-config.md) | Newly added | Specifies the output of DELETE events. This parameter is valid only for `"canal-json"` and `"open-protocol"` protocols. The default value is `false`, which means outputting all columns. When you set it to `true`, only primary key columns or unique index columns are output. | + +## Improvements + ++ TiDB + + - Optimize the logic of constructing index scan range so that it supports converting complex conditions into index scan range [#41572](https://github.com/pingcap/tidb/issues/41572) [#44389](https://github.com/pingcap/tidb/issues/44389) @[xuyifangreeneyes](https://github.com/xuyifangreeneyes) + - Add new monitoring metrics `Stale Read OPS` and `Stale Read Traffic` [#43325](https://github.com/pingcap/tidb/issues/43325) @[you06](https://github.com/you06) + - When the retry leader of stale read encounters a lock, TiDB forcibly retries with the leader after resolving the lock, which avoids unnecessary overhead [#43659](https://github.com/pingcap/tidb/issues/43659) @[you06](https://github.com/you06) + - Use estimated time to calculate stale read ts and reduce the overhead of stale read [#44215](https://github.com/pingcap/tidb/issues/44215) @[you06](https://github.com/you06) + - Add logs and system variables for long-running transactions [#41471](https://github.com/pingcap/tidb/issues/41471) @[crazycs520](https://github.com/crazycs520) + - Support connecting to TiDB through the compressed MySQL protocol, which improves the performance of data-intensive queries under low bandwidth networks and saves bandwidth costs. This supports both `zlib` and `zstd` based compression. [#22605](https://github.com/pingcap/tidb/issues/22605) @[dveeden](https://github.com/dveeden) + - Recognize both `utf8` and `utf8bm3` as the legacy three-byte UTF-8 character set encodings, which facilitates the migration of tables with legacy UTF-8 encodings from MySQL 8.0 to TiDB [#26226](https://github.com/pingcap/tidb/issues/26226) @[dveeden](https://github.com/dveeden) + - Support using `:=` for assignment in `UPDATE` statements [#44751](https://github.com/pingcap/tidb/issues/44751) @[CbcWestwolf](https://github.com/CbcWestwolf) + ++ TiKV + + - Support configuring the retry interval of PD connections in scenarios such as connection request failures using `pd.retry-interval` [#14964](https://github.com/tikv/tikv/issues/14964) @[rleungx](https://github.com/rleungx) + - Optimize the resource control scheduling algorithm by incorporating the global resource usage [#14604](https://github.com/tikv/tikv/issues/14604) @[Connor1996](https://github.com/Connor1996) + - Use gzip compression for `check_leader` requests to reduce traffic [#14553](https://github.com/tikv/tikv/issues/14553) @[you06](https://github.com/you06) + - Add related metrics for `check_leader` requests [#14658](https://github.com/tikv/tikv/issues/14658) @[you06](https://github.com/you06) + - Provide detailed time information during TiKV handling write commands [#12362](https://github.com/tikv/tikv/issues/12362) @[cfzjywxk](https://github.com/cfzjywxk) + ++ PD + + - Use a separate gRPC connection for PD leader election to prevent the impact of other requests [#6403](https://github.com/tikv/pd/issues/6403) @[rleungx](https://github.com/rleungx) + - Enable the bucket splitting by default to mitigate hotspot issues in multi-Region scenarios [#6433](https://github.com/tikv/pd/issues/6433) @[bufferflies](https://github.com/bufferflies) + ++ Tools + + + Backup & Restore (BR) + + - Support access to Azure Blob Storage by shared access signature (SAS) [#44199](https://github.com/pingcap/tidb/issues/44199) @[Leavrth](https://github.com/Leavrth) + + + TiCDC + + - Optimize the structure of the directory where data files are stored when a DDL operation occurs in the scenario of replication to an object storage service [#8891](https://github.com/pingcap/tiflow/issues/8891) @[CharlesCheung96](https://github.com/CharlesCheung96) + - Support the OAUTHBEARER authentication in the scenario of replication to Kafka [#8865](https://github.com/pingcap/tiflow/issues/8865) @[hi-rustin](https://github.com/hi-rustin) + - Add the option of outputting only the handle keys for the `DELETE` operation in the scenario of replication to Kafka [#9143](https://github.com/pingcap/tiflow/issues/9143) @[3AceShowHand](https://github.com/3AceShowHand) + + + TiDB Data Migration (DM) + + - Support reading compressed binlogs in MySQL 8.0 as a data source for incremental replication [#6381](https://github.com/pingcap/tiflow/issues/6381) @[dveeden](https://github.com/dveeden) + + + TiDB Lightning + + - Optimize the retry mechanism during import to avoid errors caused by leader switching [#44478](https://github.com/pingcap/tidb/pull/44478) @[lance6716](https://github.com/lance6716) + - Verify checksum throught SQL after import to improve stability of verification [#41941](https://github.com/pingcap/tidb/issues/41941) @[GMHDBJD](https://github.com/GMHDBJD) + - Optimize TiDB Lightning OOM issues when importing wide tables [43853](https://github.com/pingcap/tidb/issues/43853) @[D3Hunter](https://github.com/D3Hunter) + +## Bug fixes + ++ TiDB + + - Fix the issue that the query with CTE causes TiDB to hang [#43749](https://github.com/pingcap/tidb/issues/43749) [#36896](https://github.com/pingcap/tidb/issues/36896) @[guo-shaoge](https://github.com/guo-shaoge) + - Fix the issue that the `min, max` query result is incorrect [#43805](https://github.com/pingcap/tidb/issues/43805) @[wshwsh12](https://github.com/wshwsh12) + - Fix the issue that the `SHOW PROCESSLIST` statement cannot display the TxnStart of the transaction of the statement with a long subquery time [#40851](https://github.com/pingcap/tidb/issues/40851) @[crazycs520](https://github.com/crazycs520) + - Fix the issue that the stale read global optimization does not take effect due to the lack of `TxnScope` in Coprocessor tasks [#43365](https://github.com/pingcap/tidb/issues/43365) @[you06](https://github.com/you06) + - Fix the issue that follower read does not handle flashback errors before retrying, which causes query errors [#43673](https://github.com/pingcap/tidb/issues/43673) @[you06](https://github.com/you06) + - Fix the issue that data and indexes are inconsistent when the `ON UPDATE` statement does not correctly update the primary key [#44565](https://github.com/pingcap/tidb/issues/44565) @[zyguan](https://github.com/zyguan) + - Modify the upper limit of the `UNIX_TIMESTAMP()` function to `3001-01-19 03:14:07.999999 UTC` to be consistent with that of MySQL 8.0.28 or later versions [#43987](https://github.com/pingcap/tidb/issues/43987) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that adding an index fails in the ingest mode [#44137](https://github.com/pingcap/tidb/issues/44137) @[tangenta](https://github.com/tangenta) + - Fix the issue that canceling a DDL task in the rollback state causes errors in related metadata [#44143](https://github.com/pingcap/tidb/issues/44143) @[wjhuang2016](https://github.com/wjhuang2016) + - Fix the issue that using `memTracker` with cursor fetch causes memory leaks [#44254](https://github.com/pingcap/tidb/issues/44254) @[YangKeao](https://github.com/YangKeao) + - Fix the issue that dropping a database causes slow GC progress [#33069](https://github.com/pingcap/tidb/issues/33069) @[tiancaiamao](https://github.com/tiancaiamao) + - Fix the issue that TiDB returns an error when the corresponding rows in partitioned tables cannot be found in the probe phase of index join [#43686](https://github.com/pingcap/tidb/issues/43686) @[AilinKid](https://github.com/AilinKid) @[mjonss](https://github.com/mjonss) + - Fix the issue that there is no warning when using `SUBPARTITION` to create partitioned tables [#41198](https://github.com/pingcap/tidb/issues/41198) [#41200](https://github.com/pingcap/tidb/issues/41200) @[mjonss](https://github.com/mjonss) + - Fix the issue that when a query is killed because it exceeds `MAX_EXECUTION_TIME`, the returned error message is inconsistent with that of MySQL [#43031](https://github.com/pingcap/tidb/issues/43031) @[dveeden](https://github.com/dveeden) + - Fix the issue that the `LEADING` hint does not support querying block aliases [#44645](https://github.com/pingcap/tidb/issues/44645) @[qw4990](https://github.com/qw4990) + - Modify the return type of the `LAST_INSERT_ID()` function from VARCHAR to LONGLONG to be consistent with that of MySQL [#44574](https://github.com/pingcap/tidb/issues/44574) @[Defined2014](https://github.com/Defined2014) + - Fix the issue that incorrect results might be returned when using a common table expression (CTE) in statements with non-correlated subqueries [#44051](https://github.com/pingcap/tidb/issues/44051) @[winoros](https://github.com/winoros) + - Fix the issue that Join Reorder might cause incorrect outer join results [#44314](https://github.com/pingcap/tidb/issues/44314) @[AilinKid](https://github.com/AilinKid) + - Fix the issue that `PREPARE stmt FROM "ANALYZE TABLE xxx"` might be killed by `tidb_mem_quota_query` [#44320](https://github.com/pingcap/tidb/issues/44320) @[chrysan](https://github.com/chrysan) + ++ TiKV + + - Fix the issue that the transaction returns an incorrect value when TiKV handles stale pessimistic lock conflicts [#13298](https://github.com/tikv/tikv/issues/13298) @[cfzjywxk](https://github.com/cfzjywxk) + - Fix the issue that in-memory pessimistic lock might cause flashback failures and data inconsistency [#13303](https://github.com/tikv/tikv/issues/13303) @[JmPotato](https://github.com/JmPotato) + - Fix the issue that the fair lock might be incorrect when TiKV handles stale requests [#13298](https://github.com/tikv/tikv/issues/13298) @[cfzjywxk](https://github.com/cfzjywxk) + - Fix the issue that `autocommit` and `point get replica read` might break linearizability [#14715](https://github.com/tikv/tikv/issues/14715) @[cfzjywxk](https://github.com/cfzjywxk) + ++ PD + + - Fix the issue that redundant replicas cannot be automatically repaired in some corner cases [#6573](https://github.com/tikv/pd/issues/6573) @[nolouch](https://github.com/nolouch) + ++ TiFlash + + - Fix the issue that queries might consume more memory than needed when the data on the Join build side is very large and contains many small string type columns [#7416](https://github.com/pingcap/tiflash/issues/7416) @[yibin87](https://github.com/yibin87) + ++ Tools + + + Backup & Restore (BR) + + - Fix the issue that `checksum mismatch` is falsely reported in some cases [#44472](https://github.com/pingcap/tidb/issues/44472) @[Leavrth](https://github.com/Leavrth) + - Fix the issue that `resolved lock timeout` is falsely reported in some cases [#43236](https://github.com/pingcap/tidb/issues/43236) @[YuJuncen](https://github.com/YuJuncen) + - Fix the issue that TiDB might panic when restoring statistics information [#44490](https://github.com/pingcap/tidb/issues/44490) @[tangenta](https://github.com/tangenta) + + + TiCDC + + - Fix the issue that Resolved TS does not advance properly in some cases [#8963](https://github.com/pingcap/tiflow/issues/8963) @[CharlesCheung96](https://github.com/CharlesCheung96) + - Fix the issue that the `UPDATE` operation cannot output old values when the Avro or CSV protocol is used [#9086](https://github.com/pingcap/tiflow/issues/9086) @[3AceShowHand](https://github.com/3AceShowHand) + - Fix the issue of excessive downstream pressure caused by reading downstream metadata too frequently when replicating data to Kafka [#8959](https://github.com/pingcap/tiflow/issues/8959) @[hi-rustin](https://github.com/hi-rustin) + - Fix the issue of too many downstream logs caused by frequently setting the downstream bidirectional replication-related variables when replicating data to TiDB or MySQL [#9180](https://github.com/pingcap/tiflow/issues/9180) @[asddongmen](https://github.com/asddongmen) + - Fix the issue that the PD node crashing causes the TiCDC node to restart [#8868](https://github.com/pingcap/tiflow/issues/8868) @[asddongmen](https://github.com/asddongmen) + - Fix the issue that TiCDC cannot create a changefeed with a downstream Kafka-on-Pulsar [#8892](https://github.com/pingcap/tiflow/issues/8892) @[hi-rustin](https://github.com/hi-rustin) + + + TiDB Lightning + + - Fix the TiDB Lightning panic issue when `experimental.allow-expression-index` is enabled and the default value is UUID [#44497](https://github.com/pingcap/tidb/issues/44497) @[lichunzhu](https://github.com/lichunzhu) + - Fix the TiDB Lightning panic issue when a task exits while dividing a data file [#43195](https://github.com/pingcap/tidb/issues/43195) @[lance6716](https://github.com/lance6716) + +## Contributors + +We would like to thank the following contributors from the TiDB community: + +- [asjdf](https://github.com/asjdf) +- [blacktear23](https://github.com/blacktear23) +- [Cavan-xu](https://github.com/Cavan-xu) +- [darraes](https://github.com/darraes) +- [demoManito](https://github.com/demoManito) +- [dhysum](https://github.com/dhysum) +- [HappyUncle](https://github.com/HappyUncle) +- [jiyfhust](https://github.com/jiyfhust) +- [L-maple](https://github.com/L-maple) +- [nyurik](https://github.com/nyurik) +- [SeigeC](https://github.com/SeigeC) +- [tangjingyu97](https://github.com/tangjingyu97) \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-notes.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-notes.md new file mode 100644 index 00000000..923f3cb2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-notes.md @@ -0,0 +1,230 @@ +--- +title: Release Notes +aliases: ['/docs/dev/releases/release-notes/','/docs/dev/releases/rn/'] +--- + +# TiDB Release Notes + +## 7.2 + +- [7.2.0-DMR](/releases/release-7.2.0.md): 2023-06-29 + +## 7.1 + +- [7.1.0](/releases/release-7.1.0.md): 2023-05-31 + +## 7.0 + +- [7.0.0-DMR](/releases/release-7.0.0.md): 2023-03-30 + +## 6.6 + +- [6.6.0-DMR](/releases/release-6.6.0.md): 2023-02-20 + +## 6.5 + +- [6.5.3](/releases/release-6.5.3.md): 2023-06-14 +- [6.5.2](/releases/release-6.5.2.md): 2023-04-21 +- [6.5.1](/releases/release-6.5.1.md): 2023-03-10 +- [6.5.0](/releases/release-6.5.0.md): 2022-12-29 + +## 6.4 + +- [6.4.0-DMR](/releases/release-6.4.0.md): 2022-11-17 + +## 6.3 + +- [6.3.0-DMR](/releases/release-6.3.0.md): 2022-09-30 + +## 6.2 + +- [6.2.0-DMR](/releases/release-6.2.0.md): 2022-08-23 + +## 6.1 + +- [6.1.6](/releases/release-6.1.6.md): 2023-04-12 +- [6.1.5](/releases/release-6.1.5.md): 2023-02-28 +- [6.1.4](/releases/release-6.1.4.md): 2023-02-08 +- [6.1.3](/releases/release-6.1.3.md): 2022-12-05 +- [6.1.2](/releases/release-6.1.2.md): 2022-10-24 +- [6.1.1](/releases/release-6.1.1.md): 2022-09-01 +- [6.1.0](/releases/release-6.1.0.md): 2022-06-13 + +## 6.0 + +- [6.0.0-DMR](/releases/release-6.0.0-dmr.md): 2022-04-07 + +## 5.4 + +- [5.4.3](/releases/release-5.4.3.md): 2022-10-13 +- [5.4.2](/releases/release-5.4.2.md): 2022-07-08 +- [5.4.1](/releases/release-5.4.1.md): 2022-05-13 +- [5.4.0](/releases/release-5.4.0.md): 2022-02-15 + +## 5.3 + +- [5.3.4](/releases/release-5.3.4.md): 2022-11-24 +- [5.3.3](/releases/release-5.3.3.md): 2022-09-14 +- [5.3.2](/releases/release-5.3.2.md): 2022-06-29 +- [5.3.1](/releases/release-5.3.1.md): 2022-03-03 +- [5.3.0](/releases/release-5.3.0.md): 2021-11-30 + +## 5.2 + +- [5.2.4](/releases/release-5.2.4.md): 2022-04-26 +- [5.2.3](/releases/release-5.2.3.md): 2021-12-03 +- [5.2.2](/releases/release-5.2.2.md): 2021-10-29 +- [5.2.1](/releases/release-5.2.1.md): 2021-09-09 +- [5.2.0](/releases/release-5.2.0.md): 2021-08-27 + +## 5.1 + +- [5.1.5](/releases/release-5.1.5.md): 2022-12-28 +- [5.1.4](/releases/release-5.1.4.md): 2022-02-22 +- [5.1.3](/releases/release-5.1.3.md): 2021-12-03 +- [5.1.2](/releases/release-5.1.2.md): 2021-09-27 +- [5.1.1](/releases/release-5.1.1.md): 2021-07-30 +- [5.1.0](/releases/release-5.1.0.md): 2021-06-24 + +## 5.0 + +- [5.0.6](/releases/release-5.0.6.md): 2021-12-31 +- [5.0.5](/releases/release-5.0.5.md): 2021-12-03 +- [5.0.4](/releases/release-5.0.4.md): 2021-09-27 +- [5.0.3](/releases/release-5.0.3.md): 2021-07-02 +- [5.0.2](/releases/release-5.0.2.md): 2021-06-10 +- [5.0.1](/releases/release-5.0.1.md): 2021-04-24 +- [5.0.0](/releases/release-5.0.0.md): 2021-04-07 +- [5.0.0-rc](/releases/release-5.0.0-rc.md): 2021-01-12 + +## 4.0 + +- [4.0.16](/releases/release-4.0.16.md): 2021-12-17 +- [4.0.15](/releases/release-4.0.15.md): 2021-09-27 +- [4.0.14](/releases/release-4.0.14.md): 2021-07-27 +- [4.0.13](/releases/release-4.0.13.md): 2021-05-28 +- [4.0.12](/releases/release-4.0.12.md): 2021-04-02 +- [4.0.11](/releases/release-4.0.11.md): 2021-02-26 +- [4.0.10](/releases/release-4.0.10.md): 2021-01-15 +- [4.0.9](/releases/release-4.0.9.md): 2020-12-21 +- [4.0.8](/releases/release-4.0.8.md): 2020-10-30 +- [4.0.7](/releases/release-4.0.7.md): 2020-09-29 +- [4.0.6](/releases/release-4.0.6.md): 2020-09-15 +- [4.0.5](/releases/release-4.0.5.md): 2020-08-31 +- [4.0.4](/releases/release-4.0.4.md): 2020-07-31 +- [4.0.3](/releases/release-4.0.3.md): 2020-07-24 +- [4.0.2](/releases/release-4.0.2.md): 2020-07-01 +- [4.0.1](/releases/release-4.0.1.md): 2020-06-12 +- [4.0.0](/releases/release-4.0-ga.md): 2020-05-28 +- [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md): 2020-05-15 +- [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md): 2020-04-28 +- [4.0.0-rc](/releases/release-4.0.0-rc.md): 2020-04-08 +- [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md): 2020-03-18 +- [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md): 2020-02-28 +- [4.0.0-beta](/releases/release-4.0.0-beta.md): 2020-01-17 + +## 3.1 + +- [3.1.2](/releases/release-3.1.2.md): 2020-06-04 +- [3.1.1](/releases/release-3.1.1.md): 2020-04-30 +- [3.1.0](/releases/release-3.1.0-ga.md): 2020-04-16 +- [3.1.0-rc](/releases/release-3.1.0-rc.md): 2020-04-02 +- [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md): 2020-03-09 +- [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md): 2020-01-10 +- [3.1.0-beta](/releases/release-3.1.0-beta.md): 2019-12-20 + +## 3.0 + +- [3.0.20](/releases/release-3.0.20.md): 2020-12-25 +- [3.0.19](/releases/release-3.0.19.md): 2020-09-25 +- [3.0.18](/releases/release-3.0.18.md): 2020-08-21 +- [3.0.17](/releases/release-3.0.17.md): 2020-08-03 +- [3.0.16](/releases/release-3.0.16.md): 2020-07-03 +- [3.0.15](/releases/release-3.0.15.md): 2020-06-05 +- [3.0.14](/releases/release-3.0.14.md): 2020-05-09 +- [3.0.13](/releases/release-3.0.13.md): 2020-04-22 +- [3.0.12](/releases/release-3.0.12.md): 2020-03-16 +- [3.0.11](/releases/release-3.0.11.md): 2020-03-04 +- [3.0.10](/releases/release-3.0.10.md): 2020-02-20 +- [3.0.9](/releases/release-3.0.9.md): 2020-01-14 +- [3.0.8](/releases/release-3.0.8.md): 2019-12-31 +- [3.0.7](/releases/release-3.0.7.md): 2019-12-04 +- [3.0.6](/releases/release-3.0.6.md): 2019-11-28 +- [3.0.5](/releases/release-3.0.5.md): 2019-10-25 +- [3.0.4](/releases/release-3.0.4.md): 2019-10-08 +- [3.0.3](/releases/release-3.0.3.md): 2019-08-29 +- [3.0.2](/releases/release-3.0.2.md): 2019-08-07 +- [3.0.1](/releases/release-3.0.1.md): 2019-07-16 +- [3.0.0](/releases/release-3.0-ga.md): 2019-06-28 +- [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md): 2019-06-21 +- [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md): 2019-05-28 +- [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md): 2019-05-10 +- [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md): 2019-03-26 +- [3.0.0-beta](/releases/release-3.0-beta.md): 2019-01-19 + +## 2.1 + +- [2.1.19](/releases/release-2.1.19.md): 2019-12-27 +- [2.1.18](/releases/release-2.1.18.md): 2019-11-04 +- [2.1.17](/releases/release-2.1.17.md): 2019-09-11 +- [2.1.16](/releases/release-2.1.16.md): 2019-08-15 +- [2.1.15](/releases/release-2.1.15.md): 2019-07-18 +- [2.1.14](/releases/release-2.1.14.md): 2019-07-04 +- [2.1.13](/releases/release-2.1.13.md): 2019-06-21 +- [2.1.12](/releases/release-2.1.12.md): 2019-06-13 +- [2.1.11](/releases/release-2.1.11.md): 2019-06-03 +- [2.1.10](/releases/release-2.1.10.md): 2019-05-22 +- [2.1.9](/releases/release-2.1.9.md): 2019-05-06 +- [2.1.8](/releases/release-2.1.8.md): 2019-04-12 +- [2.1.7](/releases/release-2.1.7.md): 2019-03-28 +- [2.1.6](/releases/release-2.1.6.md): 2019-03-15 +- [2.1.5](/releases/release-2.1.5.md): 2019-02-28 +- [2.1.4](/releases/release-2.1.4.md): 2019-02-15 +- [2.1.3](/releases/release-2.1.3.md): 2019-01-28 +- [2.1.2](/releases/release-2.1.2.md): 2018-12-22 +- [2.1.1](/releases/release-2.1.1.md): 2018-12-12 +- [2.1.0](/releases/release-2.1-ga.md): 2018-11-30 +- [2.1.0-rc.5](/releases/release-2.1-rc.5.md): 2018-11-12 +- [2.1.0-rc.4](/releases/release-2.1-rc.4.md): 2018-10-23 +- [2.1.0-rc.3](/releases/release-2.1-rc.3.md): 2018-09-29 +- [2.1.0-rc.2](/releases/release-2.1-rc.2.md): 2018-09-14 +- [2.1.0-rc.1](/releases/release-2.1-rc.1.md): 2018-08-24 +- [2.1.0-beta](/releases/release-2.1-beta.md): 2018-06-29 + +## 2.0 + +- [2.0.11](/releases/release-2.0.11.md): 2019-01-03 +- [2.0.10](/releases/release-2.0.10.md): 2018-12-18 +- [2.0.9](/releases/release-2.0.9.md): 2018-11-19 +- [2.0.8](/releases/release-2.0.8.md): 2018-10-16 +- [2.0.7](/releases/release-2.0.7.md): 2018-09-07 +- [2.0.6](/releases/release-2.0.6.md): 2018-08-06 +- [2.0.5](/releases/release-2.0.5.md): 2018-07-06 +- [2.0.4](/releases/release-2.0.4.md): 2018-06-15 +- [2.0.3](/releases/release-2.0.3.md): 2018-06-01 +- [2.0.2](/releases/release-2.0.2.md): 2018-05-21 +- [2.0.1](/releases/release-2.0.1.md): 2018-05-16 +- [2.0.0](/releases/release-2.0-ga.md): 2018-04-27 +- [2.0.0-rc.5](/releases/release-2.0-rc.5.md): 2018-04-17 +- [2.0.0-rc.4](/releases/release-2.0-rc.4.md): 2018-03-30 +- [2.0.0-rc.3](/releases/release-2.0-rc.3.md): 2018-03-23 +- [2.0.0-rc.1](/releases/release-2.0-rc.1.md): 2018-03-09 +- [1.1.0-beta](/releases/release-1.1-beta.md): 2018-02-24 +- [1.1.0-alpha](/releases/release-1.1-alpha.md): 2018-01-19 + +## 1.0 + +- [1.0.8](/releases/release-1.0.8.md): 2018-02-11 +- [1.0.7](/releases/release-1.0.7.md): 2018-01-22 +- [1.0.6](/releases/release-1.0.6.md): 2018-01-08 +- [1.0.5](/releases/release-1.0.5.md): 2017-12-26 +- [1.0.4](/releases/release-1.0.4.md): 2017-12-11 +- [1.0.3](/releases/release-1.0.3.md): 2017-11-28 +- [1.0.2](/releases/release-1.0.2.md): 2017-11-13 +- [1.0.1](/releases/release-1.0.1.md): 2017-11-01 +- [1.0.0](/releases/release-1.0-ga.md): 2017-10-16 +- [Pre-GA](/releases/release-pre-ga.md): 2017-08-30 +- [rc4](/releases/release-rc.4.md): 2017-08-04 +- [rc3](/releases/release-rc.3.md): 2017-06-16 +- [rc2](/releases/release-rc.2.md): 2017-03-01 +- [rc1](/releases/release-rc.1.md): 2016-12-23 diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-timeline.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-timeline.md new file mode 100644 index 00000000..4ccf13b1 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/releases/release-timeline.md @@ -0,0 +1,172 @@ +--- +title: TiDB Release Timeline +summary: Learn about the TiDB release timeline. +--- + +# TiDB Release Timeline + +This document shows all the released TiDB versions in reverse chronological order. + +| Version | Release Date | +| :--- | :--- | +| [7.2.0-DMR](/releases/release-7.2.0.md) | 2023-06-29 | +| [6.5.3](/releases/release-6.5.3.md) | 2023-06-14 | +| [7.1.0](/releases/release-7.1.0.md) | 2023-05-31 | +| [6.5.2](/releases/release-6.5.2.md) | 2023-04-21 | +| [6.1.6](/releases/release-6.1.6.md) | 2023-04-12 | +| [7.0.0-DMR](/releases/release-7.0.0.md) | 2023-03-30 | +| [6.5.1](/releases/release-6.5.1.md) | 2023-03-10 | +| [6.1.5](/releases/release-6.1.5.md) | 2023-02-28 | +| [6.6.0-DMR](/releases/release-6.6.0.md) | 2023-02-20 | +| [6.1.4](/releases/release-6.1.4.md) | 2023-02-08 | +| [6.5.0](/releases/release-6.5.0.md) | 2022-12-29 | +| [5.1.5](/releases/release-5.1.5.md) | 2022-12-28 | +| [6.1.3](/releases/release-6.1.3.md) | 2022-12-05 | +| [5.3.4](/releases/release-5.3.4.md) | 2022-11-24 | +| [6.4.0-DMR](/releases/release-6.4.0.md) | 2022-11-17 | +| [6.1.2](/releases/release-6.1.2.md) | 2022-10-24 | +| [5.4.3](/releases/release-5.4.3.md) | 2022-10-13 | +| [6.3.0-DMR](/releases/release-6.3.0.md) | 2022-09-30 | +| [5.3.3](/releases/release-5.3.3.md) | 2022-09-14 | +| [6.1.1](/releases/release-6.1.1.md) | 2022-09-01 | +| [6.2.0-DMR](/releases/release-6.2.0.md) | 2022-08-23 | +| [5.4.2](/releases/release-5.4.2.md) | 2022-07-08 | +| [5.3.2](/releases/release-5.3.2.md) | 2022-06-29 | +| [6.1.0](/releases/release-6.1.0.md) | 2022-06-13 | +| [5.4.1](/releases/release-5.4.1.md) | 2022-05-13 | +| [5.2.4](/releases/release-5.2.4.md) | 2022-04-26 | +| [6.0.0-DMR](/releases/release-6.0.0-dmr.md) | 2022-04-07 | +| [5.3.1](/releases/release-5.3.1.md) | 2022-03-03 | +| [5.1.4](/releases/release-5.1.4.md) | 2022-02-22 | +| [5.4.0](/releases/release-5.4.0.md) | 2022-02-15 | +| [5.0.6](/releases/release-5.0.6.md) | 2021-12-31 | +| [4.0.16](/releases/release-4.0.16.md) | 2021-12-17 | +| [5.1.3](/releases/release-5.1.3.md) | 2021-12-03 | +| [5.0.5](/releases/release-5.0.5.md) | 2021-12-03 | +| [5.2.3](/releases/release-5.2.3.md) | 2021-12-03 | +| [5.3.0](/releases/release-5.3.0.md) | 2021-11-30 | +| [5.2.2](/releases/release-5.2.2.md) | 2021-10-29 | +| [5.1.2](/releases/release-5.1.2.md) | 2021-09-27 | +| [5.0.4](/releases/release-5.0.4.md) | 2021-09-27 | +| [4.0.15](/releases/release-4.0.15.md) | 2021-09-27 | +| [5.2.1](/releases/release-5.2.1.md) | 2021-09-09 | +| [5.2.0](/releases/release-5.2.0.md) | 2021-08-27 | +| [5.1.1](/releases/release-5.1.1.md) | 2021-07-30 | +| [4.0.14](/releases/release-4.0.14.md) | 2021-07-27 | +| [5.0.3](/releases/release-5.0.3.md) | 2021-07-02 | +| [5.1.0](/releases/release-5.1.0.md) | 2021-06-24 | +| [5.0.2](/releases/release-5.0.2.md) | 2021-06-10 | +| [4.0.13](/releases/release-4.0.13.md) | 2021-05-28 | +| [5.0.1](/releases/release-5.0.1.md) | 2021-04-24 | +| [5.0.0](/releases/release-5.0.0.md) | 2021-04-07 | +| [4.0.12](/releases/release-4.0.12.md) | 2021-04-02 | +| [4.0.11](/releases/release-4.0.11.md) | 2021-02-26 | +| [4.0.10](/releases/release-4.0.10.md) | 2021-01-15 | +| [5.0.0-rc](/releases/release-5.0.0-rc.md) | 2021-01-12 | +| [3.0.20](/releases/release-3.0.20.md) | 2020-12-25 | +| [4.0.9](/releases/release-4.0.9.md) | 2020-12-21 | +| [4.0.8](/releases/release-4.0.8.md) | 2020-10-30 | +| [4.0.7](/releases/release-4.0.7.md) | 2020-09-29 | +| [3.0.19](/releases/release-3.0.19.md) | 2020-09-25 | +| [4.0.6](/releases/release-4.0.6.md) | 2020-09-15 | +| [4.0.5](/releases/release-4.0.5.md) | 2020-08-31 | +| [3.0.18](/releases/release-3.0.18.md) | 2020-08-21 | +| [3.0.17](/releases/release-3.0.17.md) | 2020-08-03 | +| [4.0.4](/releases/release-4.0.4.md) | 2020-07-31 | +| [4.0.3](/releases/release-4.0.3.md) | 2020-07-24 | +| [3.0.16](/releases/release-3.0.16.md) | 2020-07-03 | +| [4.0.2](/releases/release-4.0.2.md) | 2020-07-01 | +| [4.0.1](/releases/release-4.0.1.md) | 2020-06-12 | +| [3.0.15](/releases/release-3.0.15.md) | 2020-06-05 | +| [3.1.2](/releases/release-3.1.2.md) | 2020-06-04 | +| [4.0.0](/releases/release-4.0-ga.md) | 2020-05-28 | +| [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) | 2020-05-15 | +| [3.0.14](/releases/release-3.0.14.md) | 2020-05-09 | +| [3.1.1](/releases/release-3.1.1.md) | 2020-04-30 | +| [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) | 2020-04-28 | +| [3.0.13](/releases/release-3.0.13.md) | 2020-04-22 | +| [3.1.0](/releases/release-3.1.0-ga.md) | 2020-04-16 | +| [4.0.0-rc](/releases/release-4.0.0-rc.md) | 2020-04-08 | +| [3.1.0-rc](/releases/release-3.1.0-rc.md) | 2020-04-02 | +| [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) | 2020-03-18 | +| [3.0.12](/releases/release-3.0.12.md) | 2020-03-16 | +| [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) | 2020-03-09 | +| [3.0.11](/releases/release-3.0.11.md) | 2020-03-04 | +| [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) | 2020-02-28 | +| [3.0.10](/releases/release-3.0.10.md) | 2020-02-20 | +| [4.0.0-beta](/releases/release-4.0.0-beta.md) | 2020-01-17 | +| [3.0.9](/releases/release-3.0.9.md) | 2020-01-14 | +| [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) | 2020-01-10 | +| [3.0.8](/releases/release-3.0.8.md) | 2019-12-31 | +| [2.1.19](/releases/release-2.1.19.md) | 2019-12-27 | +| [3.1.0-beta](/releases/release-3.1.0-beta.md) | 2019-12-20 | +| [3.0.7](/releases/release-3.0.7.md) | 2019-12-04 | +| [3.0.6](/releases/release-3.0.6.md) | 2019-11-28 | +| [2.1.18](/releases/release-2.1.18.md) | 2019-11-04 | +| [3.0.5](/releases/release-3.0.5.md) | 2019-10-25 | +| [3.0.4](/releases/release-3.0.4.md) | 2019-10-08 | +| [2.1.17](/releases/release-2.1.17.md) | 2019-09-11 | +| [3.0.3](/releases/release-3.0.3.md) | 2019-08-29 | +| [2.1.16](/releases/release-2.1.16.md) | 2019-08-15 | +| [3.0.2](/releases/release-3.0.2.md) | 2019-08-07 | +| [2.1.15](/releases/release-2.1.15.md) | 2019-07-18 | +| [3.0.1](/releases/release-3.0.1.md) | 2019-07-16 | +| [2.1.14](/releases/release-2.1.14.md) | 2019-07-04 | +| [3.0.0](/releases/release-3.0-ga.md) | 2019-06-28 | +| [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) | 2019-06-21 | +| [2.1.13](/releases/release-2.1.13.md) | 2019-06-21 | +| [2.1.12](/releases/release-2.1.12.md) | 2019-06-13 | +| [2.1.11](/releases/release-2.1.11.md) | 2019-06-03 | +| [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) | 2019-05-28 | +| [2.1.10](/releases/release-2.1.10.md) | 2019-05-22 | +| [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) | 2019-05-10 | +| [2.1.9](/releases/release-2.1.9.md) | 2019-05-06 | +| [2.1.8](/releases/release-2.1.8.md) | 2019-04-12 | +| [2.1.7](/releases/release-2.1.7.md) | 2019-03-28 | +| [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) | 2019-03-26 | +| [2.1.6](/releases/release-2.1.6.md) | 2019-03-15 | +| [2.1.5](/releases/release-2.1.5.md) | 2019-02-28 | +| [2.1.4](/releases/release-2.1.4.md) | 2019-02-15 | +| [2.1.3](/releases/release-2.1.3.md) | 2019-01-28 | +| [3.0.0-beta](/releases/release-3.0-beta.md) | 2019-01-19 | +| [2.0.11](/releases/release-2.0.11.md) | 2019-01-03 | +| [2.1.2](/releases/release-2.1.2.md) | 2018-12-22 | +| [2.0.10](/releases/release-2.0.10.md) | 2018-12-18 | +| [2.1.1](/releases/release-2.1.1.md) | 2018-12-12 | +| [2.1.0](/releases/release-2.1-ga.md) | 2018-11-30 | +| [2.0.9](/releases/release-2.0.9.md) | 2018-11-19 | +| [2.1.0-rc.5](/releases/release-2.1-rc.5.md) | 2018-11-12 | +| [2.1.0-rc.4](/releases/release-2.1-rc.4.md) | 2018-10-23 | +| [2.0.8](/releases/release-2.0.8.md) | 2018-10-16 | +| [2.1.0-rc.3](/releases/release-2.1-rc.3.md) | 2018-09-29 | +| [2.1.0-rc.2](/releases/release-2.1-rc.2.md) | 2018-09-14 | +| [2.0.7](/releases/release-2.0.7.md) | 2018-09-07 | +| [2.1.0-rc.1](/releases/release-2.1-rc.1.md) | 2018-08-24 | +| [2.0.6](/releases/release-2.0.6.md) | 2018-08-06 | +| [2.0.5](/releases/release-2.0.5.md) | 2018-07-06 | +| [2.1.0-beta](/releases/release-2.1-beta.md) | 2018-06-29 | +| [2.0.4](/releases/release-2.0.4.md) | 2018-06-15 | +| [2.0.3](/releases/release-2.0.3.md) | 2018-06-01 | +| [2.0.2](/releases/release-2.0.2.md) | 2018-05-21 | +| [2.0.1](/releases/release-2.0.1.md) | 2018-05-16 | +| [2.0.0](/releases/release-2.0-ga.md) | 2018-04-27 | +| [2.0.0-rc.5](/releases/release-2.0-rc.5.md) | 2018-04-17 | +| [2.0.0-rc.4](/releases/release-2.0-rc.4.md) | 2018-03-30 | +| [2.0.0-rc.3](/releases/release-2.0-rc.3.md) | 2018-03-23 | +| [2.0.0-rc.1](/releases/release-2.0-rc.1.md) | 2018-03-09 | +| [1.1.0-beta](/releases/release-1.1-beta.md) | 2018-02-24 | +| [1.0.8](/releases/release-1.0.8.md) | 2018-02-11 | +| [1.0.7](/releases/release-1.0.7.md) | 2018-01-22 | +| [1.1.0-alpha](/releases/release-1.1-alpha.md) | 2018-01-19 | +| [1.0.6](/releases/release-1.0.6.md) | 2018-01-08 | +| [1.0.5](/releases/release-1.0.5.md) | 2017-12-26 | +| [1.0.4](/releases/release-1.0.4.md) | 2017-12-11 | +| [1.0.3](/releases/release-1.0.3.md) | 2017-11-28 | +| [1.0.2](/releases/release-1.0.2.md) | 2017-11-13 | +| [1.0.1](/releases/release-1.0.1.md) | 2017-11-01 | +| [1.0.0](/releases/release-1.0-ga.md) | 2017-10-16 | +| [Pre-GA](/releases/release-pre-ga.md) | 2017-08-30 | +| [rc4](/releases/release-rc.4.md) | 2017-08-04 | +| [rc3](/releases/release-rc.3.md) | 2017-06-16 | +| [rc2](/releases/release-rc.2.md) | 2017-03-01 | +| [rc1](/releases/release-rc.1.md) | 2016-12-23 | \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/upgrade-tidb-using-tiup.md b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/upgrade-tidb-using-tiup.md new file mode 100644 index 00000000..700c2d41 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidb/release-6.7/upgrade-tidb-using-tiup.md @@ -0,0 +1,292 @@ +--- +title: Upgrade TiDB Using TiUP +summary: Learn how to upgrade TiDB using TiUP. +aliases: ['/docs/dev/upgrade-tidb-using-tiup/','/docs/dev/how-to/upgrade/using-tiup/','/tidb/dev/upgrade-tidb-using-tiup-offline','/docs/dev/upgrade-tidb-using-tiup-offline/'] +--- + +# Upgrade TiDB Using TiUP + +This document is targeted for the following upgrade paths: + +- Upgrade from TiDB 4.0 versions to TiDB 7.1. +- Upgrade from TiDB 5.0-5.4 versions to TiDB 7.1. +- Upgrade from TiDB 6.0-6.6 to TiDB 7.1. +- Upgrade from TiDB 7.0 to TiDB 7.1. + +> **Warning:** +> +> 1. You cannot upgrade TiFlash online from versions earlier than 5.3 to 5.3 or later. Instead, you must first stop all the TiFlash instances of the early version, and then upgrade the cluster offline. If other components (such as TiDB and TiKV) do not support an online upgrade, follow the instructions in warnings in [Online upgrade](#online-upgrade). +> 2. **DO NOT** run DDL statements during the upgrade process. Otherwise, the issue of undefined behavior might occur. +> 3. **DO NOT** upgrade a TiDB cluster when a DDL statement is being executed in the cluster (usually for the time-consuming DDL statements such as `ADD INDEX` and the column type changes). Before the upgrade, it is recommended to use the [`ADMIN SHOW DDL`](/sql-statements/sql-statement-admin-show-ddl.md) command to check whether the TiDB cluster has an ongoing DDL job. If the cluster has a DDL job, to upgrade the cluster, wait until the DDL execution is finished or use the [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) command to cancel the DDL job before you upgrade the cluster. +> +> If the TiDB version before upgrade is v7.1.0 or later, you can ignore the preceding warnings 2 and 3. For more information, see [TiDB Smooth Upgrade](/smooth-upgrade-tidb.md). + +> **Note:** +> +> If your cluster to be upgraded is v3.1 or an earlier version (v3.0 or v2.1), the direct upgrade to v7.1.0 is not supported. You need to upgrade your cluster first to v4.0 and then to v7.1.0. + +## Upgrade caveat + +- TiDB currently does not support version downgrade or rolling back to an earlier version after the upgrade. +- For the v4.0 cluster managed using TiDB Ansible, you need to import the cluster to TiUP (`tiup cluster`) for new management according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). Then you can upgrade the cluster to v7.1.0 according to this document. +- To update versions earlier than v3.0 to v7.1.0: + 1. Update this version to 3.0 using [TiDB Ansible](https://docs.pingcap.com/tidb/v3.0/upgrade-tidb-using-ansible). + 2. Use TiUP (`tiup cluster`) to import the TiDB Ansible configuration. + 3. Update the 3.0 version to 4.0 according to [Upgrade TiDB Using TiUP (v4.0)](https://docs.pingcap.com/tidb/v4.0/upgrade-tidb-using-tiup#import-tidb-ansible-and-the-inventoryini-configuration-to-tiup). + 4. Upgrade the cluster to v7.1.0 according to this document. +- Support upgrading the versions of TiDB Binlog, TiCDC, TiFlash, and other components. +- When upgrading TiFlash from versions earlier than v6.3.0 to v6.3.0 and later versions, note that the CPU must support the AVX2 instruction set under the Linux AMD64 architecture and the ARMv8 instruction set architecture under the Linux ARM64 architecture. For details, see the description in [v6.3.0 Release Notes](/releases/release-6.3.0.md#others). +- For detailed compatibility changes of different versions, see the [Release Notes](/releases/release-notes.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. +- For clusters that upgrade from versions earlier than v5.3 to v5.3 or later versions, the default deployed Prometheus will upgrade from v2.8.1 to v2.27.1. Prometheus v2.27.1 provides more features and fixes a security issue. Compared with v2.8.1, alert time representation in v2.27.1 is changed. For more details, see [Prometheus commit](https://github.com/prometheus/prometheus/commit/7646cbca328278585be15fa615e22f2a50b47d06) for more details. + +## Preparations + +This section introduces the preparation works needed before upgrading your TiDB cluster, including upgrading TiUP and the TiUP Cluster component. + +### Step 1: Review compatibility changes + +Review [the compatibility changes](/releases/release-7.2.0.md#compatibility-changes) in TiDB v7.2.0 release notes. If any changes affect your upgrade, take actions accordingly. + +### Step 2: Upgrade TiUP or TiUP offline mirror + +Before upgrading your TiDB cluster, you first need to upgrade TiUP or TiUP mirror. + +#### Upgrade TiUP and TiUP Cluster + +> **Note:** +> +> If the control machine of the cluster to upgrade cannot access `https://tiup-mirrors.pingcap.com`, skip this section and see [Upgrade TiUP offline mirror](#upgrade-tiup-offline-mirror). + +1. Upgrade the TiUP version. It is recommended that the TiUP version is `1.11.3` or later. + + {{< copyable "shell-regular" >}} + + ```shell + tiup update --self + tiup --version + ``` + +2. Upgrade the TiUP Cluster version. It is recommended that the TiUP Cluster version is `1.11.3` or later. + + {{< copyable "shell-regular" >}} + + ```shell + tiup update cluster + tiup cluster --version + ``` + +#### Upgrade TiUP offline mirror + +> **Note:** +> +> If the cluster to upgrade was deployed not using the offline method, skip this step. + +Refer to [Deploy a TiDB Cluster Using TiUP - Deploy TiUP offline](/production-deployment-using-tiup.md#deploy-tiup-offline) to download the TiUP mirror of the new version and upload it to the control machine. After executing `local_install.sh`, TiUP will complete the overwrite upgrade. + +{{< copyable "shell-regular" >}} + +```shell +tar xzvf tidb-community-server-${version}-linux-amd64.tar.gz +sh tidb-community-server-${version}-linux-amd64/local_install.sh +source /home/tidb/.bash_profile +``` + +After the overwrite upgrade, run the following command to merge the server and toolkit offline mirrors to the server directory: + +{{< copyable "shell-regular" >}} + +```bash +tar xf tidb-community-toolkit-${version}-linux-amd64.tar.gz +ls -ld tidb-community-server-${version}-linux-amd64 tidb-community-toolkit-${version}-linux-amd64 +cd tidb-community-server-${version}-linux-amd64/ +cp -rp keys ~/.tiup/ +tiup mirror merge ../tidb-community-toolkit-${version}-linux-amd64 +``` + +After merging the mirrors, run the following command to upgrade the TiUP Cluster component: + +{{< copyable "shell-regular" >}} + +```shell +tiup update cluster +``` + +Now, the offline mirror has been upgraded successfully. If an error occurs during TiUP operation after the overwriting, it might be that the `manifest` is not updated. You can try `rm -rf ~/.tiup/manifests/*` before running TiUP again. + +### Step 3: Edit TiUP topology configuration file + +> **Note:** +> +> Skip this step if one of the following situations applies: +> +> + You have not modified the configuration parameters of the original cluster. Or you have modified the configuration parameters using `tiup cluster` but no more modification is needed. +> + After the upgrade, you want to use v7.1.0's default parameter values for the unmodified configuration items. + +1. Enter the `vi` editing mode to edit the topology file: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster edit-config + ``` + +2. Refer to the format of [topology](https://github.com/pingcap/tiup/blob/master/embed/examples/cluster/topology.example.yaml) configuration template and fill the parameters you want to modify in the `server_configs` section of the topology file. + +3. After the modification, enter : + w + q to save the change and exit the editing mode. Enter Y to confirm the change. + +> **Note:** +> +> Before you upgrade the cluster to v6.6.0, make sure that the parameters you have modified in v4.0 are compatible in v7.1.0. For details, see [TiKV Configuration File](/tikv-configuration-file.md). + +### Step 4: Check the health status of the current cluster + +To avoid the undefined behaviors or other issues during the upgrade, it is recommended to check the health status of Regions of the current cluster before the upgrade. To do that, you can use the `check` sub-command. + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster check --cluster +``` + +After the command is executed, the "Region status" check result will be output. + ++ If the result is "All Regions are healthy", all Regions in the current cluster are healthy and you can continue the upgrade. ++ If the result is "Regions are not fully healthy: m miss-peer, n pending-peer" with the "Please fix unhealthy regions before other operations." prompt, some Regions in the current cluster are abnormal. You need to troubleshoot the anomalies until the check result becomes "All Regions are healthy". Then you can continue the upgrade. + +### Step 5: Check the DDL and backup status of the cluster + +To avoid undefined behaviors or other unexpected problems during the upgrade, it is recommended to check the following items before the upgrade. + +- Cluster DDLs: It is recommended to execute the [`ADMIN SHOW DDL`](/sql-statements/sql-statement-admin-show-ddl.md) statement to check whether there is an ongoing DDL job. If yes, wait for its execution or cancel it by executing the [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) statement before performing an upgrade. +- Cluster backup: It is recommended to execute the [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) statement to check whether there is an ongoing backup or restore task in the cluster. If yes, wait for its completion before performing an upgrade. + +## Upgrade the TiDB cluster + +This section describes how to upgrade the TiDB cluster and verify the version after the upgrade. + +### Upgrade the TiDB cluster to a specified version + +You can upgrade your cluster in one of the two ways: online upgrade and offline upgrade. + +By default, TiUP Cluster upgrades the TiDB cluster using the online method, which means that the TiDB cluster can still provide services during the upgrade process. With the online method, the leaders are migrated one by one on each node before the upgrade and restart. Therefore, for a large-scale cluster, it takes a long time to complete the entire upgrade operation. + +If your application has a maintenance window for the database to be stopped for maintenance, you can use the offline upgrade method to quickly perform the upgrade operation. + +#### Online upgrade + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade +``` + +For example, if you want to upgrade the cluster to v7.1.0: + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade v7.1.0 +``` + +> **Note:** +> +> + An online upgrade upgrades all components one by one. During the upgrade of TiKV, all leaders in a TiKV instance are evicted before stopping the instance. The default timeout time is 5 minutes (300 seconds). The instance is directly stopped after this timeout time. +> +> + You can use the `--force` parameter to upgrade the cluster immediately without evicting the leader. However, the errors that occur during the upgrade will be ignored, which means that you are not notified of any upgrade failure. Therefore, use the `--force` parameter with caution. +> +> + To keep a stable performance, make sure that all leaders in a TiKV instance are evicted before stopping the instance. You can set `--transfer-timeout` to a larger value, for example, `--transfer-timeout 3600` (unit: second). +> +> + To upgrade TiFlash from versions earlier than 5.3 to 5.3 or later, you should stop TiFlash and then upgrade it. The following steps help you upgrade TiFlash without interrupting other components: +> 1. Stop the TiFlash instance: `tiup cluster stop -R tiflash` +> 2. Upgrade the TiDB cluster without restarting it (only updating the files): `tiup cluster upgrade --offline`, such as `tiup cluster upgrade v6.3.0 --offline` +> 3. Reload the TiDB cluster: `tiup cluster reload `. After the reload, the TiFlash instance is started and you do not need to manually start it. +> +> + Try to avoid creating a new clustered index table when you apply rolling updates to the clusters using TiDB Binlog. + +#### Offline upgrade + +1. Before the offline upgrade, you first need to stop the entire cluster. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster stop + ``` + +2. Use the `upgrade` command with the `--offline` option to perform the offline upgrade. Fill in the name of your cluster for `` and the version to upgrade to for ``, such as `v7.1.0`. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster upgrade --offline + ``` + +3. After the upgrade, the cluster will not be automatically restarted. You need to use the `start` command to restart it. + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster start + ``` + +### Verify the cluster version + +Execute the `display` command to view the latest cluster version `TiDB Version`: + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster display +``` + +``` +Cluster type: tidb +Cluster name: +Cluster version: v7.1.0 +``` + +## FAQ + +This section describes common problems encountered when updating the TiDB cluster using TiUP. + +### If an error occurs and the upgrade is interrupted, how to resume the upgrade after fixing this error? + +Re-execute the `tiup cluster upgrade` command to resume the upgrade. The upgrade operation restarts the nodes that have been previously upgraded. If you do not want the upgraded nodes to be restarted, use the `replay` sub-command to retry the operation: + +1. Execute `tiup cluster audit` to see the operation records: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster audit + ``` + + Find the failed upgrade operation record and keep the ID of this operation record. The ID is the `` value in the next step. + +2. Execute `tiup cluster replay ` to retry the corresponding operation: + + {{< copyable "shell-regular" >}} + + ```shell + tiup cluster replay + ``` + +### The evict leader has waited too long during the upgrade. How to skip this step for a quick upgrade? + +You can specify `--force`. Then the processes of transferring PD leader and evicting TiKV leader are skipped during the upgrade. The cluster is directly restarted to update the version, which has a great impact on the cluster that runs online. In the following command, `` is the version to upgrade to, such as `v7.1.0`. + +{{< copyable "shell-regular" >}} + +```shell +tiup cluster upgrade --force +``` + +### How to update the version of tools such as pd-ctl after upgrading the TiDB cluster? + +You can upgrade the tool version by using TiUP to install the `ctl` component of the corresponding version: + +{{< copyable "shell-regular" >}} + +```shell +tiup install ctl:v7.1.0 +``` diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/TOC.md b/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/TOC.md new file mode 100644 index 00000000..3e740e1a --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/TOC.md @@ -0,0 +1,420 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB Cloud + - [Why TiDB Cloud](/tidb-cloud/tidb-cloud-intro.md) + - [Architecture](/tidb-cloud/tidb-cloud-intro.md#architecture) + - [High Availability](/tidb-cloud/high-availability-with-multi-az.md) + - [MySQL Compatibility](/mysql-compatibility.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Perform a PoC](/tidb-cloud/tidb-cloud-poc.md) +- Develop Applications + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Developer Cluster](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Build a Simple CRUD App with TiDB + - [Java](/develop/dev-guide-sample-application-java.md) + - [Golang](/develop/dev-guide-sample-application-golang.md) + - Example Applications + - [Build a TiDB Application using Spring Boot](/develop/dev-guide-sample-application-spring-boot.md) + - Connect to TiDB + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Reference + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) +- Manage Cluster + - Plan Your Cluster + - [Select Your Cluster Tier](/tidb-cloud/select-cluster-tier.md) + - [Determine Your TiDB Size](/tidb-cloud/size-your-cluster.md) + - [Create a TiDB Cluster](/tidb-cloud/create-tidb-cluster.md) + - Connect to Your TiDB Cluster + - [Connect via a SQL Client](/tidb-cloud/connect-to-tidb-cluster.md) + - [Connect via SQL Shell](/tidb-cloud/connect-to-tidb-cluster.md#connect-via-sql-shell) + - [Set Up VPC Peering Connections](/tidb-cloud/set-up-vpc-peering-connections.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Scale a TiDB Cluster](/tidb-cloud/scale-tidb-cluster.md) + - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) + - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) + - [Use TiDB Cloud API (Beta)](/tidb-cloud/api-overview.md) +- Migrate Data + - [Import Sample Data](/tidb-cloud/import-sample-data.md) + - Migrate Data into TiDB + - [Configure Amazon S3 Access and GCS Access](/tidb-cloud/config-s3-and-gcs-access.md) + - [Migrate from MySQL-Compatible Databases](/tidb-cloud/migrate-data-into-tidb.md) + - [Migrate Incremental Data from MySQL-Compatible Databases](/tidb-cloud/migrate-incremental-data-from-mysql.md) + - [Migrate from Amazon Aurora MySQL in Bulk](/tidb-cloud/migrate-from-aurora-bulk-import.md) + - [Import or Migrate from Amazon S3 or GCS to TiDB Cloud](/tidb-cloud/migrate-from-amazon-s3-or-gcs.md) + - [Import CSV Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-csv-files.md) + - [Import Apache Parquet Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-parquet-files.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Export Data from TiDB](/tidb-cloud/export-data-from-tidb-cloud.md) +- Back Up and Restore + - [Automatic Backup](/tidb-cloud/backup-and-restore.md) + - [Manual Backup](/tidb-cloud/backup-and-restore.md#manual-backup) + - [Restore](/tidb-cloud/backup-and-restore.md#restore) +- Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Monitoring](/tidb-cloud/built-in-monitoring.md) + - [Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md) + - Third-Party Monitoring Integrations + - [Datadog Integration](/tidb-cloud/monitor-datadog-integration.md) + - [Prometheus and Grafana Integration](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) +- Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - Analyze Performance + - [Statement Analysis](/tidb-cloud/tune-performance.md) + - [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) + - [Statement Summary Tables](/statement-summary-tables.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Prepare Execution Plan Cache](/sql-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - [Tune TiFlash performance](/tiflash/tune-tiflash-performance.md) +- Manage User Access + - [Manage Console User Access](/tidb-cloud/manage-user-access.md) + - [Configure Cluster Security Settings](/tidb-cloud/configure-security-settings.md) +- Billing + - [Node Cost](/tidb-cloud/tidb-cloud-billing.md) + - [Backup Storage Cost](/tidb-cloud/tidb-cloud-billing.md#backup-storage-cost) + - [Data Transfer Cost](/tidb-cloud/tidb-cloud-billing.md#data-transfer-cost) + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) +- Reference + - TiDB Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [TiDB Cloud Cluster Limits and Quotas](/tidb-cloud/limitations-and-quotas.md) + - [TiDB Limitations](/tidb-limitations.md) + - SQL + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN SHOW DDL [JOBS|QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [System Variables](/system-variables.md) + - [API Reference](https://docs.pingcap.com/tidbcloud/api/v1beta) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Dumpling](/dumpling-overview.md) + - [Table Filter](/table-filter.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) +- [FAQs](/tidb-cloud/tidb-cloud-faq.md) +- Release Notes + - [2022](/tidb-cloud/release-notes-2022.md) + - [2021](/tidb-cloud/release-notes-2021.md) + - [2020](/tidb-cloud/release-notes-2020.md) +- [Support](/tidb-cloud/tidb-cloud-support.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md b/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md new file mode 100644 index 00000000..7416ae07 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md @@ -0,0 +1,36 @@ +--- +title: TiDB Cloud API Overview +summary: Learn about what is TiDB Cloud API, its features, and how to use API to manage your TiDB Cloud clusters. +--- + +# TiDB Cloud API Overview Beta + +> **Note:** +> +> TiDB Cloud API is still in beta and only available upon request. You can apply for API access by submitting a request: +> +> - Click **Help** in the lower-right corner of TiDB Cloud console. +> - In the dialog, fill in "Apply for TiDB Cloud API" in the **Description** field and click **Send**. +> +> You will receive an email for notification when the API is available for you. + +The TiDB Cloud API is a [REST interface](https://en.wikipedia.org/wiki/Representational_state_transfer) that provides you with programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can manage resources automatically and efficiently: + +* Projects +* Clusters +* Backups +* Restores + +The API has the following features: + +- **JSON entities.** All entities are expressed in JSON. +- **HTTPS-only.** You can only access the API via HTTPS, ensuring all the data sent over the network is encrypted with TLS. +- **Key-based access and digest authentication.** Before you access TiDB Cloud API, you must generate an API key. All requests are authenticated through [HTTP Digest Authentication](https://en.wikipedia.org/wiki/Digest_access_authentication), ensuring the API key is never sent over the network. + +To start using TiDB Cloud API, refer to the following resources: + +- [Get Started](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Get-Started) +- [Authentication](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication) +- [Rate Limiting](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Rate-Limiting) +- [API Full References](https://docs.pingcap.com/tidbcloud/api/v1beta#tag/Project) +- [Changelog](https://docs.pingcap.com/tidbcloud/api/v1beta#section/API-Changelog) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md new file mode 100644 index 00000000..a26a5e4d --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes 文档](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev) +- 关于 TiDB Operator + - [简介](tidb-operator-overview.md) + - [v1.5 新特性](whats-new-in-v1.5.md) +- [快速上手](get-started.md) +- 部署 + - 自托管的 Kubernetes + - [集群环境要求](prerequisites.md) + - [配置 Storage Class](configure-storage-class.md) + - [部署 TiDB Operator](deploy-tidb-operator.md) + - [配置 TiDB 集群](configure-a-tidb-cluster.md) + - [部署 TiDB 集群](deploy-on-general-kubernetes.md) + - [初始化 TiDB 集群](initialize-a-cluster.md) + - [访问 TiDB 集群](access-tidb.md) + - 公有云的 Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [阿里云 ACK](deploy-on-alibaba-cloud.md) + - [在 ARM64 机器上部署 TiDB 集群](deploy-cluster-on-arm64.md) + - [部署 TiDB HTAP 存储引擎 TiFlash](deploy-tiflash.md) + - 跨多个 Kubernetes 集群部署 TiDB 集群 + - [构建多个网络互通的 AWS EKS 集群](build-multi-aws-eks.md) + - [构建多个网络互通的 GKE 集群](build-multi-gcp-gke.md) + - [跨多个 Kubernetes 集群部署 TiDB 集群](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [部署 TiDB 异构集群](deploy-heterogeneous-tidb-cluster.md) + - [部署增量数据同步工具 TiCDC](deploy-ticdc.md) + - [部署 Binlog 收集工具](deploy-tidb-binlog.md) +- 监控与告警 + - [部署 TiDB 集群监控与告警](monitor-a-tidb-cluster.md) + - [使用 TiDB Dashboard 监控诊断 TiDB 集群](access-dashboard.md) + - [聚合多个 TiDB 集群的监控数据](aggregate-multiple-cluster-monitor-data.md) + - [跨多个 Kubernetes 集群监控 TiDB 集群](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [开启 TidbMonitor 动态配置](enable-monitor-dynamic-configuration.md) + - [开启 TidbMonitor 分片功能](enable-monitor-shards.md) +- 数据迁移 + - [导入集群数据](restore-data-using-tidb-lightning.md) + - 从 MySQL 迁移 + - [部署 DM](deploy-tidb-dm.md) + - [使用 DM 迁移 MySQL 数据到 TiDB 集群](use-tidb-dm.md) + - [迁移 TiDB 至 Kubernetes](migrate-tidb-to-kubernetes.md) +- 运维管理 + - 安全 + - [为 MySQL 客户端开启 TLS](enable-tls-for-mysql-client.md) + - [为 TiDB 组件间开启 TLS](enable-tls-between-components.md) + - [为 DM 组件开启 TLS](enable-tls-for-dm.md) + - [同步数据到开启 TLS 的下游服务](enable-tls-for-ticdc-sink.md) + - [更新和替换 TLS 证书](renew-tls-certificate.md) + - [以非 root 用户运行](containers-run-as-non-root-user.md) + - [扩缩容](scale-a-tidb-cluster.md) + - 升级 + - [升级 TiDB 集群](upgrade-a-tidb-cluster.md) + - 升级 TiDB Operator + - [常规升级](upgrade-tidb-operator.md) + - [灰度升级](canary-upgrade-tidb-operator.md) + - 备份与恢复 + - [备份与恢复简介](backup-restore-overview.md) + - [备份与恢复 CR 介绍](backup-restore-cr.md) + - [远程存储访问授权](grant-permissions-to-remote-storage.md) + - 使用 Amazon S3 兼容的存储 + - [使用 BR 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-aws-s3-using-br.md) + - [使用 BR 恢复 S3 兼容存储上的备份数据](restore-from-aws-s3-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-s3.md) + - [使用 TiDB Lightning 恢复 S3 兼容存储上的备份数据](restore-from-s3.md) + - 使用 Google Cloud Storage + - [使用 BR 备份 TiDB 集群数据到 GCS](backup-to-gcs-using-br.md) + - [使用 BR 恢复 GCS 上的备份数据](restore-from-gcs-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到 GCS](backup-to-gcs.md) + - [使用 TiDB Lightning 恢复 GCS 上的备份数据](restore-from-gcs.md) + - 使用 Azure Blob Storage + - [使用 BR 备份 TiDB 集群数据到 Azblob](backup-to-azblob-using-br.md) + - [使用 BR 恢复 Azblob 上的备份数据](restore-from-azblob-using-br.md) + - 使用持久卷 + - [使用 BR 备份 TiDB 集群数据到持久卷](backup-to-pv-using-br.md) + - [使用 BR 恢复持久卷上的备份数据](restore-from-pv-using-br.md) + - 基于快照的备份和恢复 + - [功能架构](volume-snapshot-backup-restore.md) + - [基于 EBS 快照备份 TiDB 集群](backup-to-aws-s3-by-snapshot.md) + - [基于 EBS 快照恢复 TiDB 集群](restore-from-aws-s3-by-snapshot.md) + - [基于 EBS 卷快照备份恢复的性能介绍](backup-restore-snapshot-perf.md) + - [基于 EBS 快照备份恢复的常见问题](backup-restore-faq.md) + - 运维 + - [重启 TiDB 集群](restart-a-tidb-cluster.md) + - [销毁 TiDB 集群](destroy-a-tidb-cluster.md) + - [查看 TiDB 日志](view-logs.md) + - [修改 TiDB 集群配置](modify-tidb-configuration.md) + - [配置集群故障自动转移](use-auto-failover.md) + - [暂停 TiDB 集群同步](pause-sync-of-tidb-cluster.md) + - [挂起 TiDB 集群](suspend-tidb-cluster.md) + - [使用多套 TiDB Operator 单独管理不同的 TiDB 集群](deploy-multiple-tidb-operator.md) + - [维护 TiDB 集群所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + - [从 Helm 2 迁移到 Helm 3](migrate-to-helm3.md) + - 为 TiDB 集群更换节点 + - [更换云存储节点](replace-nodes-for-cloud-disk.md) + - [更换本地存储节点](replace-nodes-for-local-disk.md) + - 灾难恢复 + - [恢复误删的 TiDB 集群](recover-deleted-cluster.md) + - [恢复 PD 集群](pd-recover.md) +- 故障诊断 + - [使用技巧](tips.md) + - [部署错误](deploy-failures.md) + - [集群异常](exceptions.md) + - [网络问题](network-issues.md) + - [使用 PingCAP Clinic 诊断 TiDB 集群](clinic-user-guide.md) +- [常见问题](faq.md) +- 参考 + - 架构 + - [TiDB Operator 架构](architecture.md) + - [TiDB Scheduler 扩展调度器](tidb-scheduler.md) + - [增强型 StatefulSet 控制器](advanced-statefulset.md) + - [准入控制器](enable-admission-webhook.md) + - [Sysbench 性能测试](benchmark-sysbench.md) + - [API 参考文档](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [TiDB Operator RBAC 规则](tidb-operator-rbac.md) + - 工具 + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - 配置 + - [tidb-drainer chart 配置](configure-tidb-binlog-drainer.md) + - [日志收集](logs-collection.md) + - [Kubernetes 监控与告警](monitor-kubernetes.md) + - [PingCAP Clinic 数据采集范围说明](clinic-data-collection.md) +- 版本发布历史 + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..deab93b7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md @@ -0,0 +1,205 @@ +--- +title: 远程存储访问授权 +summary: 介绍如何授权访问远程存储。 +--- + +# 远程存储访问授权 + +本文详细描述了如何授权访问远程存储,以实现备份 TiDB 集群数据到远程存储或从远程存储恢复备份数据到 TiDB 集群。 + +## AWS 账号授权 + +在 AWS 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下三种权限授予配置方式。 + +### 通过 AccessKey 和 SecretKey 授权 + +AWS 的客户端支持读取进程环境变量中的 `AWS_ACCESS_KEY_ID` 以及 `AWS_SECRET_ACCESS_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `s3-secret` secret,在以下命令中使用 AWS 账号的 AccessKey 和 SecretKey 进行授权。该 secret 存放用于访问 S3 兼容存储的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### 通过 IAM 绑定 Pod 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与所运行的 Pod 资源进行绑定,使 Pod 中运行的进程获得角色所拥有的权限,这种授权方式是由 [`kube2iam`](https://github.com/jtblin/kube2iam) 提供。 + +> **注意:** +> +> - 使用该授权模式时,可以参考 [kube2iam 文档](https://github.com/jtblin/kube2iam#usage)在 Kubernetes 集群中创建 kube2iam 环境,并且部署 TiDB Operator 以及 TiDB 集群。 +> - 该模式不适用于 [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) 网络模式,请确保参数 `spec.tikv.hostNetwork` 的值为 `false`。 + +1. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html)来为账号创建一个 IAM 角色,并且通过 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html)为 IAM 角色赋予需要的权限。由于 `Backup` 需要访问 AWS 的 S3 存储,所以这里给 IAM 赋予了 `AmazonS3FullAccess` 的权限。 + + 如果是进行基于 AWS Elastic Block Store (EBS) 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. 绑定 IAM 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 S3 存储进行读写操作,所以这里需要给 TiKV Pod 打上 annotation 来绑定 IAM 角色。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这个 annotation。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 1 中创建的 IAM 角色。 + +### 通过 IAM 绑定 ServiceAccount 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与 Kubeneters 中的 [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) 资源进行绑定, 从而使得使用该 ServiceAccount 账号的 Pod 都拥有该角色所拥有的权限,这种授权方式由 [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook) 服务提供。 + +使用该授权模式时,可以参考 [AWS 官方文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html)创建 EKS 集群,并且部署 TiDB Operator 以及 TiDB 集群。 + +1. 在集群上为服务帐户启用 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)开启所在的 EKS 集群的 IAM 角色授权。 + +2. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html)创建一个 IAM 角色,为角色赋予 `AmazonS3FullAccess` 的权限,并且编辑角色的 `Trust relationships`,赋予 tidb-backup-manager 使用此 IAM 角色的权限。 + + 如果是进行基于 AWS EBS 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + {{< copyable "shell-regular" >}} + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + 同时编辑角色的 `Trust relationships`,赋予 tidb-controller-manager 使用此 IAM 角色的权限。 + +3. 绑定 IAM 到 ServiceAccount 资源上: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + 如果是进行基于 AWS EBS 快照的备份和恢复,需要绑定 IAM 到 tidb-controller-manager 的 ServiceAccount 上: + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + 重启 TiDB Operator 的 tidb-controller-manager Pod,使配置的 ServiceAccount 生效。 + +4. 将 ServiceAccount 绑定到 TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + 将 `spec.tikv.serviceAccount` 修改为 tidb-backup-manager,等到 TiKV Pod 重启后,查看 Pod 的 `serviceAccountName` 是否有变化。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 2 中创建的 IAM 角色。 + +## GCS 账号授权 + +### 通过服务账号密钥授权 + +创建 `gcs-secret` secret。该 secret 存放用于访问 GCS 的凭证。`google-credentials.json` 文件存放用户从 Google Cloud console 上下载的 service account key。具体操作参考 [Google Cloud 官方文档](https://cloud.google.com/docs/authentication/getting-started)。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure 账号授权 + +在 Azure 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下两种权限授予配置方式。 + +### 通过访问密钥授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT` 以及 `AZURE_STORAGE_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `azblob-secret` secret,在以下命令中使用 Azure 账号的访问密钥进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### 通过 Azure AD 授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT`、`AZURE_CLIENT_ID`、`AZURE_TENANT_ID`、`AZURE_CLIENT_SECRET` 来获取与之相关联的用户或者角色的权限。 + +1. 创建 `azblob-secret-ad` secret,在以下命令中使用 Azure 账号的 AD 进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from- literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. 绑定 secret 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 Azure Blob Storage 进行读写操作,所以这里需要给 TiKV Pod 绑定 secret。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这些环境变量。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md new file mode 100644 index 00000000..6804a9a2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: 了解 TiDB Operator 1.5.0 版本的新功能、优化提升,以及 Bug 修复。 +--- + +# TiDB Operator 1.5.0 Release Notes + +发布日期: 2023 年 8 月 4 日 + +TiDB Operator 版本:1.5.0 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 新功能 + +- 新增 BR Federation Manager 组件,支持跨多个 Kubernetes 集群编排 `Backup` 和 `Restore` custom resources (CR) ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持使用 `VolumeBackup` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份 ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeRestore` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的恢复 ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeBackupSchedule` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的自动备份 ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- 当对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份时,支持备份与 `TidbCluster` 相关的 CR 数据 ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## 优化提升 + +- 为 DM master 添加 `startUpScriptVersion` 字段,支持设置启动脚本的版本 ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- 为 DmCluster、TidbDashboard、TidbMonitor 以及 TidbNGMonitoring 增加 `spec.preferIPv6` 支持 ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- 支持为 TiKV 驱逐 leader 和 PD 转移 leader 设置过期时间 ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- 支持为 `TidbInitializer` 设置 tolerations ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持为 PD 设置启动超时时间 ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- 当 TiKV 在扩展 PVC 的大小时,不再执行驱逐 leader 操作,避免因磁盘容量不足而造成驱逐卡住 ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持更新 PD、TiKV、TiFlash、TiProxy、DM-Master 与 DM-worker 组件 Service 的 annotation 与 label ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- 默认启用 volume resize,支持对 PV 的扩容 ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug 修复 + +- 修复升级 TiKV 时由于部分 store 下线而造成 quorum 丢失的问题 ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- 修复升级 PD 时由于部分 member 下线而造成 quorum 丢失的问题 ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- 修复 TiDB Operator 在未配置任何 Kubernetes 集群级别权限时 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复在 `TidbCluster` CR 中设置 `AdditionalVolumeMounts` 时 TiDB Operator 可能 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复 `TidbDashboard` CR 在使用自定义的 image registry 时解析 `baseImage` 错误的问题 ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md new file mode 100644 index 00000000..fb34f208 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md @@ -0,0 +1,71 @@ +--- +title: TiDB Operator 简介 +summary: 介绍 TiDB Operator 的整体架构及使用方式。 +aliases: ['/docs-cn/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator 简介 + +[TiDB Operator](https://github.com/pingcap/tidb-operator) 是 Kubernetes 上的 TiDB 集群自动运维系统,提供包括部署、升级、扩缩容、备份恢复、配置变更的 TiDB 全生命周期管理。借助 TiDB Operator,TiDB 可以无缝运行在公有云或自托管的 Kubernetes 集群上。 + +TiDB Operator 与适用的 TiDB 版本的对应关系如下: + +| TiDB 版本 | 适用的 TiDB Operator 版本 | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5(推荐),1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4(推荐),1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3(推荐) | +| 5.1 <= TiDB < 5.4 | 1.4,1.3(推荐),1.2 | +| 3.0 <= TiDB < 5.1 | 1.4,1.3(推荐),1.2,1.1 | +| 2.1 <= TiDB < v3.0| 1.0(停止维护) | + +## 使用 TiDB Operator 管理 TiDB 集群 + +TiDB Operator 提供了多种方式来部署 Kubernetes 上的 TiDB 集群: + ++ 测试环境: + + - [kind](get-started.md#方法一使用-kind-创建-kubernetes-集群) + - [Minikube](get-started.md#方法二使用-minikube-创建-kubernetes-集群) + - [Google Cloud Shell](https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https://github.com/pingcap/docs-tidb-operator&cloudshell_tutorial=zh/deploy-tidb-from-kubernetes-gke.md) + ++ 生产环境: + + - 在公有云上部署生产可用的 TiDB 集群并进行后续的运维管理; + + - [在 AWS EKS 上部署 TiDB 集群](deploy-on-aws-eks.md) + - [在 Google Cloud GKE 上部署 TiDB 集群](deploy-on-gcp-gke.md) + - [在 Azure AKS 上部署 TiDB 集群](deploy-on-azure-aks.md) + - [在阿里云 ACK 上部署 TiDB 集群](deploy-on-alibaba-cloud.md) + + - 在自托管的 Kubernetes 集群中部署 TiDB 集群: + + 首先按照[部署 TiDB Operator](deploy-tidb-operator.md)在集群中安装 TiDB Operator,再根据[在标准 Kubernetes 集群上部署 TiDB 集群](deploy-on-general-kubernetes.md)来部署你的 TiDB 集群。对于生产级 TiDB 集群,你还需要参考 [TiDB 集群环境要求](prerequisites.md)调整 Kubernetes 集群配置并根据[本地 PV 配置](configure-storage-class.md#本地-pv-配置)为你的 Kubernetes 集群配置本地 PV,以满足 TiKV 的低延迟本地存储需求。 + +在任何环境上部署前,都可以参考 [TiDB 集群配置](configure-a-tidb-cluster.md)来自定义 TiDB 配置。 + +部署完成后,你可以参考下面的文档进行 Kubernetes 上 TiDB 集群的使用和运维: + ++ [部署 TiDB 集群](deploy-on-general-kubernetes.md) ++ [访问 TiDB 集群](access-tidb.md) ++ [TiDB 集群扩缩容](scale-a-tidb-cluster.md) ++ [TiDB 集群升级](upgrade-a-tidb-cluster.md) ++ [TiDB 集群配置变更](configure-a-tidb-cluster.md) ++ [TiDB 集群备份与恢复](backup-restore-overview.md) ++ [配置 TiDB 集群故障自动转移](use-auto-failover.md) ++ [监控 TiDB 集群](monitor-a-tidb-cluster.md) ++ [查看 TiDB 日志](view-logs.md) ++ [维护 TiDB 所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + +当集群出现问题需要进行诊断时,你可以: + ++ 查阅 [Kubernetes 上的 TiDB FAQ](faq.md) 寻找是否存在现成的解决办法; ++ 参考 [Kubernetes 上的 TiDB 故障诊断](tips.md)解决故障。 + +Kubernetes 上的 TiDB 提供了专用的命令行工具 `tkctl` 用于集群管理和辅助诊断,同时,在 Kubernetes 上,TiDB 的部分生态工具的使用方法也有所不同,你可以: + ++ 参考 [`tkctl` 使用指南](use-tkctl.md) 来使用 `tkctl`; ++ 参考 [Kubernetes 上的 TiDB 相关工具使用指南](tidb-toolkit.md)来了解 TiDB 生态工具在 Kubernetes 上的使用方法。 + +最后,当 TiDB Operator 发布新版本时,你可以参考[升级 TiDB Operator](upgrade-tidb-operator.md) 进行版本更新。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md new file mode 100644 index 00000000..595d35c8 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: TiDB Operator v1.5 新特性 +Summary: 了解 TiDB Operator 1.5.0 版本引入的新特性。 +--- + +# TiDB Operator v1.5 新特性 + +TiDB Operator v1.5 引入了以下关键特性,从扩展性、易用性等方面帮助你更轻松地管理 TiDB 集群及其周边工具。 + +## 兼容性改动 + +如需使用在 [#4959](https://github.com/pingcap/tidb-operator/pull/4959) 中引入的 `PreferDualStack` 特性(通过 `spec.preferIPv6: true` 启用),Kubernetes 版本需要大于等于 v1.20。 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 扩展性 + +- 支持通过 `bootstrapSQLConfigMapName` 字段指定 TiDB 首次启动时所执行的初始 SQL 文件。 +- 支持通过配置 `spec.preferIPv6: true` 为所有组件的 Service 的 `ipFamilyPolicy` 配置 `PreferDualStack`。 +- 支持使用 [Advanced StatefulSet](advanced-statefulset.md) 管理 TiCDC 和 TiProxy。 +- 新增 BR Federation Manager 组件,支持对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS snapshot 的备份恢复。 + +## 易用性 + +- 支持通过为 PD Pod 加上 `tidb.pingcap.com/pd-transfer-leader` annotation 来优雅重启 PD Pod。 +- 支持通过为 TiDB Pod 加上 `tidb.pingcap.com/tidb-graceful-shutdown` annotation 来优雅重启 TiDB Pod。 +- 允许用户自定义策略来重启失败的备份任务,以提高备份的稳定性。 +- 添加与 reconciler 和 worker queue 相关的监控指标以提高可观测性。 +- 添加统计协调流程失败计数的监控指标以提高可观测性。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/TOC.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/TOC.md new file mode 100644 index 00000000..a26a5e4d --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes 文档](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev) +- 关于 TiDB Operator + - [简介](tidb-operator-overview.md) + - [v1.5 新特性](whats-new-in-v1.5.md) +- [快速上手](get-started.md) +- 部署 + - 自托管的 Kubernetes + - [集群环境要求](prerequisites.md) + - [配置 Storage Class](configure-storage-class.md) + - [部署 TiDB Operator](deploy-tidb-operator.md) + - [配置 TiDB 集群](configure-a-tidb-cluster.md) + - [部署 TiDB 集群](deploy-on-general-kubernetes.md) + - [初始化 TiDB 集群](initialize-a-cluster.md) + - [访问 TiDB 集群](access-tidb.md) + - 公有云的 Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [阿里云 ACK](deploy-on-alibaba-cloud.md) + - [在 ARM64 机器上部署 TiDB 集群](deploy-cluster-on-arm64.md) + - [部署 TiDB HTAP 存储引擎 TiFlash](deploy-tiflash.md) + - 跨多个 Kubernetes 集群部署 TiDB 集群 + - [构建多个网络互通的 AWS EKS 集群](build-multi-aws-eks.md) + - [构建多个网络互通的 GKE 集群](build-multi-gcp-gke.md) + - [跨多个 Kubernetes 集群部署 TiDB 集群](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [部署 TiDB 异构集群](deploy-heterogeneous-tidb-cluster.md) + - [部署增量数据同步工具 TiCDC](deploy-ticdc.md) + - [部署 Binlog 收集工具](deploy-tidb-binlog.md) +- 监控与告警 + - [部署 TiDB 集群监控与告警](monitor-a-tidb-cluster.md) + - [使用 TiDB Dashboard 监控诊断 TiDB 集群](access-dashboard.md) + - [聚合多个 TiDB 集群的监控数据](aggregate-multiple-cluster-monitor-data.md) + - [跨多个 Kubernetes 集群监控 TiDB 集群](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [开启 TidbMonitor 动态配置](enable-monitor-dynamic-configuration.md) + - [开启 TidbMonitor 分片功能](enable-monitor-shards.md) +- 数据迁移 + - [导入集群数据](restore-data-using-tidb-lightning.md) + - 从 MySQL 迁移 + - [部署 DM](deploy-tidb-dm.md) + - [使用 DM 迁移 MySQL 数据到 TiDB 集群](use-tidb-dm.md) + - [迁移 TiDB 至 Kubernetes](migrate-tidb-to-kubernetes.md) +- 运维管理 + - 安全 + - [为 MySQL 客户端开启 TLS](enable-tls-for-mysql-client.md) + - [为 TiDB 组件间开启 TLS](enable-tls-between-components.md) + - [为 DM 组件开启 TLS](enable-tls-for-dm.md) + - [同步数据到开启 TLS 的下游服务](enable-tls-for-ticdc-sink.md) + - [更新和替换 TLS 证书](renew-tls-certificate.md) + - [以非 root 用户运行](containers-run-as-non-root-user.md) + - [扩缩容](scale-a-tidb-cluster.md) + - 升级 + - [升级 TiDB 集群](upgrade-a-tidb-cluster.md) + - 升级 TiDB Operator + - [常规升级](upgrade-tidb-operator.md) + - [灰度升级](canary-upgrade-tidb-operator.md) + - 备份与恢复 + - [备份与恢复简介](backup-restore-overview.md) + - [备份与恢复 CR 介绍](backup-restore-cr.md) + - [远程存储访问授权](grant-permissions-to-remote-storage.md) + - 使用 Amazon S3 兼容的存储 + - [使用 BR 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-aws-s3-using-br.md) + - [使用 BR 恢复 S3 兼容存储上的备份数据](restore-from-aws-s3-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-s3.md) + - [使用 TiDB Lightning 恢复 S3 兼容存储上的备份数据](restore-from-s3.md) + - 使用 Google Cloud Storage + - [使用 BR 备份 TiDB 集群数据到 GCS](backup-to-gcs-using-br.md) + - [使用 BR 恢复 GCS 上的备份数据](restore-from-gcs-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到 GCS](backup-to-gcs.md) + - [使用 TiDB Lightning 恢复 GCS 上的备份数据](restore-from-gcs.md) + - 使用 Azure Blob Storage + - [使用 BR 备份 TiDB 集群数据到 Azblob](backup-to-azblob-using-br.md) + - [使用 BR 恢复 Azblob 上的备份数据](restore-from-azblob-using-br.md) + - 使用持久卷 + - [使用 BR 备份 TiDB 集群数据到持久卷](backup-to-pv-using-br.md) + - [使用 BR 恢复持久卷上的备份数据](restore-from-pv-using-br.md) + - 基于快照的备份和恢复 + - [功能架构](volume-snapshot-backup-restore.md) + - [基于 EBS 快照备份 TiDB 集群](backup-to-aws-s3-by-snapshot.md) + - [基于 EBS 快照恢复 TiDB 集群](restore-from-aws-s3-by-snapshot.md) + - [基于 EBS 卷快照备份恢复的性能介绍](backup-restore-snapshot-perf.md) + - [基于 EBS 快照备份恢复的常见问题](backup-restore-faq.md) + - 运维 + - [重启 TiDB 集群](restart-a-tidb-cluster.md) + - [销毁 TiDB 集群](destroy-a-tidb-cluster.md) + - [查看 TiDB 日志](view-logs.md) + - [修改 TiDB 集群配置](modify-tidb-configuration.md) + - [配置集群故障自动转移](use-auto-failover.md) + - [暂停 TiDB 集群同步](pause-sync-of-tidb-cluster.md) + - [挂起 TiDB 集群](suspend-tidb-cluster.md) + - [使用多套 TiDB Operator 单独管理不同的 TiDB 集群](deploy-multiple-tidb-operator.md) + - [维护 TiDB 集群所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + - [从 Helm 2 迁移到 Helm 3](migrate-to-helm3.md) + - 为 TiDB 集群更换节点 + - [更换云存储节点](replace-nodes-for-cloud-disk.md) + - [更换本地存储节点](replace-nodes-for-local-disk.md) + - 灾难恢复 + - [恢复误删的 TiDB 集群](recover-deleted-cluster.md) + - [恢复 PD 集群](pd-recover.md) +- 故障诊断 + - [使用技巧](tips.md) + - [部署错误](deploy-failures.md) + - [集群异常](exceptions.md) + - [网络问题](network-issues.md) + - [使用 PingCAP Clinic 诊断 TiDB 集群](clinic-user-guide.md) +- [常见问题](faq.md) +- 参考 + - 架构 + - [TiDB Operator 架构](architecture.md) + - [TiDB Scheduler 扩展调度器](tidb-scheduler.md) + - [增强型 StatefulSet 控制器](advanced-statefulset.md) + - [准入控制器](enable-admission-webhook.md) + - [Sysbench 性能测试](benchmark-sysbench.md) + - [API 参考文档](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [TiDB Operator RBAC 规则](tidb-operator-rbac.md) + - 工具 + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - 配置 + - [tidb-drainer chart 配置](configure-tidb-binlog-drainer.md) + - [日志收集](logs-collection.md) + - [Kubernetes 监控与告警](monitor-kubernetes.md) + - [PingCAP Clinic 数据采集范围说明](clinic-data-collection.md) +- 版本发布历史 + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..deab93b7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/grant-permissions-to-remote-storage.md @@ -0,0 +1,205 @@ +--- +title: 远程存储访问授权 +summary: 介绍如何授权访问远程存储。 +--- + +# 远程存储访问授权 + +本文详细描述了如何授权访问远程存储,以实现备份 TiDB 集群数据到远程存储或从远程存储恢复备份数据到 TiDB 集群。 + +## AWS 账号授权 + +在 AWS 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下三种权限授予配置方式。 + +### 通过 AccessKey 和 SecretKey 授权 + +AWS 的客户端支持读取进程环境变量中的 `AWS_ACCESS_KEY_ID` 以及 `AWS_SECRET_ACCESS_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `s3-secret` secret,在以下命令中使用 AWS 账号的 AccessKey 和 SecretKey 进行授权。该 secret 存放用于访问 S3 兼容存储的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### 通过 IAM 绑定 Pod 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与所运行的 Pod 资源进行绑定,使 Pod 中运行的进程获得角色所拥有的权限,这种授权方式是由 [`kube2iam`](https://github.com/jtblin/kube2iam) 提供。 + +> **注意:** +> +> - 使用该授权模式时,可以参考 [kube2iam 文档](https://github.com/jtblin/kube2iam#usage)在 Kubernetes 集群中创建 kube2iam 环境,并且部署 TiDB Operator 以及 TiDB 集群。 +> - 该模式不适用于 [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) 网络模式,请确保参数 `spec.tikv.hostNetwork` 的值为 `false`。 + +1. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html)来为账号创建一个 IAM 角色,并且通过 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html)为 IAM 角色赋予需要的权限。由于 `Backup` 需要访问 AWS 的 S3 存储,所以这里给 IAM 赋予了 `AmazonS3FullAccess` 的权限。 + + 如果是进行基于 AWS Elastic Block Store (EBS) 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. 绑定 IAM 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 S3 存储进行读写操作,所以这里需要给 TiKV Pod 打上 annotation 来绑定 IAM 角色。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这个 annotation。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 1 中创建的 IAM 角色。 + +### 通过 IAM 绑定 ServiceAccount 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与 Kubeneters 中的 [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) 资源进行绑定, 从而使得使用该 ServiceAccount 账号的 Pod 都拥有该角色所拥有的权限,这种授权方式由 [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook) 服务提供。 + +使用该授权模式时,可以参考 [AWS 官方文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html)创建 EKS 集群,并且部署 TiDB Operator 以及 TiDB 集群。 + +1. 在集群上为服务帐户启用 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)开启所在的 EKS 集群的 IAM 角色授权。 + +2. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html)创建一个 IAM 角色,为角色赋予 `AmazonS3FullAccess` 的权限,并且编辑角色的 `Trust relationships`,赋予 tidb-backup-manager 使用此 IAM 角色的权限。 + + 如果是进行基于 AWS EBS 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + {{< copyable "shell-regular" >}} + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + 同时编辑角色的 `Trust relationships`,赋予 tidb-controller-manager 使用此 IAM 角色的权限。 + +3. 绑定 IAM 到 ServiceAccount 资源上: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + 如果是进行基于 AWS EBS 快照的备份和恢复,需要绑定 IAM 到 tidb-controller-manager 的 ServiceAccount 上: + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + 重启 TiDB Operator 的 tidb-controller-manager Pod,使配置的 ServiceAccount 生效。 + +4. 将 ServiceAccount 绑定到 TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + 将 `spec.tikv.serviceAccount` 修改为 tidb-backup-manager,等到 TiKV Pod 重启后,查看 Pod 的 `serviceAccountName` 是否有变化。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 2 中创建的 IAM 角色。 + +## GCS 账号授权 + +### 通过服务账号密钥授权 + +创建 `gcs-secret` secret。该 secret 存放用于访问 GCS 的凭证。`google-credentials.json` 文件存放用户从 Google Cloud console 上下载的 service account key。具体操作参考 [Google Cloud 官方文档](https://cloud.google.com/docs/authentication/getting-started)。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure 账号授权 + +在 Azure 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下两种权限授予配置方式。 + +### 通过访问密钥授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT` 以及 `AZURE_STORAGE_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `azblob-secret` secret,在以下命令中使用 Azure 账号的访问密钥进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### 通过 Azure AD 授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT`、`AZURE_CLIENT_ID`、`AZURE_TENANT_ID`、`AZURE_CLIENT_SECRET` 来获取与之相关联的用户或者角色的权限。 + +1. 创建 `azblob-secret-ad` secret,在以下命令中使用 Azure 账号的 AD 进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from- literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. 绑定 secret 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 Azure Blob Storage 进行读写操作,所以这里需要给 TiKV Pod 绑定 secret。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这些环境变量。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md new file mode 100644 index 00000000..6804a9a2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: 了解 TiDB Operator 1.5.0 版本的新功能、优化提升,以及 Bug 修复。 +--- + +# TiDB Operator 1.5.0 Release Notes + +发布日期: 2023 年 8 月 4 日 + +TiDB Operator 版本:1.5.0 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 新功能 + +- 新增 BR Federation Manager 组件,支持跨多个 Kubernetes 集群编排 `Backup` 和 `Restore` custom resources (CR) ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持使用 `VolumeBackup` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份 ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeRestore` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的恢复 ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeBackupSchedule` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的自动备份 ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- 当对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份时,支持备份与 `TidbCluster` 相关的 CR 数据 ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## 优化提升 + +- 为 DM master 添加 `startUpScriptVersion` 字段,支持设置启动脚本的版本 ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- 为 DmCluster、TidbDashboard、TidbMonitor 以及 TidbNGMonitoring 增加 `spec.preferIPv6` 支持 ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- 支持为 TiKV 驱逐 leader 和 PD 转移 leader 设置过期时间 ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- 支持为 `TidbInitializer` 设置 tolerations ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持为 PD 设置启动超时时间 ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- 当 TiKV 在扩展 PVC 的大小时,不再执行驱逐 leader 操作,避免因磁盘容量不足而造成驱逐卡住 ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持更新 PD、TiKV、TiFlash、TiProxy、DM-Master 与 DM-worker 组件 Service 的 annotation 与 label ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- 默认启用 volume resize,支持对 PV 的扩容 ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug 修复 + +- 修复升级 TiKV 时由于部分 store 下线而造成 quorum 丢失的问题 ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- 修复升级 PD 时由于部分 member 下线而造成 quorum 丢失的问题 ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- 修复 TiDB Operator 在未配置任何 Kubernetes 集群级别权限时 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复在 `TidbCluster` CR 中设置 `AdditionalVolumeMounts` 时 TiDB Operator 可能 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复 `TidbDashboard` CR 在使用自定义的 image registry 时解析 `baseImage` 错误的问题 ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md new file mode 100644 index 00000000..fb34f208 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/tidb-operator-overview.md @@ -0,0 +1,71 @@ +--- +title: TiDB Operator 简介 +summary: 介绍 TiDB Operator 的整体架构及使用方式。 +aliases: ['/docs-cn/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator 简介 + +[TiDB Operator](https://github.com/pingcap/tidb-operator) 是 Kubernetes 上的 TiDB 集群自动运维系统,提供包括部署、升级、扩缩容、备份恢复、配置变更的 TiDB 全生命周期管理。借助 TiDB Operator,TiDB 可以无缝运行在公有云或自托管的 Kubernetes 集群上。 + +TiDB Operator 与适用的 TiDB 版本的对应关系如下: + +| TiDB 版本 | 适用的 TiDB Operator 版本 | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5(推荐),1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4(推荐),1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3(推荐) | +| 5.1 <= TiDB < 5.4 | 1.4,1.3(推荐),1.2 | +| 3.0 <= TiDB < 5.1 | 1.4,1.3(推荐),1.2,1.1 | +| 2.1 <= TiDB < v3.0| 1.0(停止维护) | + +## 使用 TiDB Operator 管理 TiDB 集群 + +TiDB Operator 提供了多种方式来部署 Kubernetes 上的 TiDB 集群: + ++ 测试环境: + + - [kind](get-started.md#方法一使用-kind-创建-kubernetes-集群) + - [Minikube](get-started.md#方法二使用-minikube-创建-kubernetes-集群) + - [Google Cloud Shell](https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https://github.com/pingcap/docs-tidb-operator&cloudshell_tutorial=zh/deploy-tidb-from-kubernetes-gke.md) + ++ 生产环境: + + - 在公有云上部署生产可用的 TiDB 集群并进行后续的运维管理; + + - [在 AWS EKS 上部署 TiDB 集群](deploy-on-aws-eks.md) + - [在 Google Cloud GKE 上部署 TiDB 集群](deploy-on-gcp-gke.md) + - [在 Azure AKS 上部署 TiDB 集群](deploy-on-azure-aks.md) + - [在阿里云 ACK 上部署 TiDB 集群](deploy-on-alibaba-cloud.md) + + - 在自托管的 Kubernetes 集群中部署 TiDB 集群: + + 首先按照[部署 TiDB Operator](deploy-tidb-operator.md)在集群中安装 TiDB Operator,再根据[在标准 Kubernetes 集群上部署 TiDB 集群](deploy-on-general-kubernetes.md)来部署你的 TiDB 集群。对于生产级 TiDB 集群,你还需要参考 [TiDB 集群环境要求](prerequisites.md)调整 Kubernetes 集群配置并根据[本地 PV 配置](configure-storage-class.md#本地-pv-配置)为你的 Kubernetes 集群配置本地 PV,以满足 TiKV 的低延迟本地存储需求。 + +在任何环境上部署前,都可以参考 [TiDB 集群配置](configure-a-tidb-cluster.md)来自定义 TiDB 配置。 + +部署完成后,你可以参考下面的文档进行 Kubernetes 上 TiDB 集群的使用和运维: + ++ [部署 TiDB 集群](deploy-on-general-kubernetes.md) ++ [访问 TiDB 集群](access-tidb.md) ++ [TiDB 集群扩缩容](scale-a-tidb-cluster.md) ++ [TiDB 集群升级](upgrade-a-tidb-cluster.md) ++ [TiDB 集群配置变更](configure-a-tidb-cluster.md) ++ [TiDB 集群备份与恢复](backup-restore-overview.md) ++ [配置 TiDB 集群故障自动转移](use-auto-failover.md) ++ [监控 TiDB 集群](monitor-a-tidb-cluster.md) ++ [查看 TiDB 日志](view-logs.md) ++ [维护 TiDB 所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + +当集群出现问题需要进行诊断时,你可以: + ++ 查阅 [Kubernetes 上的 TiDB FAQ](faq.md) 寻找是否存在现成的解决办法; ++ 参考 [Kubernetes 上的 TiDB 故障诊断](tips.md)解决故障。 + +Kubernetes 上的 TiDB 提供了专用的命令行工具 `tkctl` 用于集群管理和辅助诊断,同时,在 Kubernetes 上,TiDB 的部分生态工具的使用方法也有所不同,你可以: + ++ 参考 [`tkctl` 使用指南](use-tkctl.md) 来使用 `tkctl`; ++ 参考 [Kubernetes 上的 TiDB 相关工具使用指南](tidb-toolkit.md)来了解 TiDB 生态工具在 Kubernetes 上的使用方法。 + +最后,当 TiDB Operator 发布新版本时,你可以参考[升级 TiDB Operator](upgrade-tidb-operator.md) 进行版本更新。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md new file mode 100644 index 00000000..595d35c8 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb-in-kubernetes/release-6.7/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: TiDB Operator v1.5 新特性 +Summary: 了解 TiDB Operator 1.5.0 版本引入的新特性。 +--- + +# TiDB Operator v1.5 新特性 + +TiDB Operator v1.5 引入了以下关键特性,从扩展性、易用性等方面帮助你更轻松地管理 TiDB 集群及其周边工具。 + +## 兼容性改动 + +如需使用在 [#4959](https://github.com/pingcap/tidb-operator/pull/4959) 中引入的 `PreferDualStack` 特性(通过 `spec.preferIPv6: true` 启用),Kubernetes 版本需要大于等于 v1.20。 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 扩展性 + +- 支持通过 `bootstrapSQLConfigMapName` 字段指定 TiDB 首次启动时所执行的初始 SQL 文件。 +- 支持通过配置 `spec.preferIPv6: true` 为所有组件的 Service 的 `ipFamilyPolicy` 配置 `PreferDualStack`。 +- 支持使用 [Advanced StatefulSet](advanced-statefulset.md) 管理 TiCDC 和 TiProxy。 +- 新增 BR Federation Manager 组件,支持对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS snapshot 的备份恢复。 + +## 易用性 + +- 支持通过为 PD Pod 加上 `tidb.pingcap.com/pd-transfer-leader` annotation 来优雅重启 PD Pod。 +- 支持通过为 TiDB Pod 加上 `tidb.pingcap.com/tidb-graceful-shutdown` annotation 来优雅重启 TiDB Pod。 +- 允许用户自定义策略来重启失败的备份任务,以提高备份的稳定性。 +- 添加与 reconciler 和 worker queue 相关的监控指标以提高可观测性。 +- 添加统计协调流程失败计数的监控指标以提高可观测性。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/TOC.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/TOC.md new file mode 100644 index 00000000..f4091c4f --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/TOC.md @@ -0,0 +1,1188 @@ + + + +- [文档中心](https://docs.pingcap.com/zh) +- 关于 TiDB + - [TiDB 简介](/overview.md) + - [TiDB 7.2 Release Notes](/releases/release-7.2.0.md) + - [功能概览](/basic-features.md) + - [与 MySQL 的兼容性](/mysql-compatibility.md) + - [使用限制](/tidb-limitations.md) + - [荣誉列表](/credits.md) + - [路线图](/tidb-roadmap.md) +- 快速上手 + - [快速上手 TiDB](/quick-start-with-tidb.md) + - [快速上手 HTAP](/quick-start-with-htap.md) + - [SQL 基本操作](/basic-sql-operations.md) + - [深入探索 HTAP](/explore-htap.md) +- 应用开发 + - [概览](/develop/dev-guide-overview.md) + - 快速开始 + - [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md) + - [使用 TiDB 的增删改查 SQL](/develop/dev-guide-tidb-crud-sql.md) + - 示例程序 + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - [Django](/develop/dev-guide-sample-application-python-django.md) + - 连接到 TiDB + - [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) + - [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) + - [连接池与连接参数](/develop/dev-guide-connection-parameters.md) + - 数据库模式设计 + - [概览](/develop/dev-guide-schema-design-overview.md) + - [创建数据库](/develop/dev-guide-create-database.md) + - [创建表](/develop/dev-guide-create-table.md) + - [创建二级索引](/develop/dev-guide-create-secondary-indexes.md) + - 数据写入 + - [插入数据](/develop/dev-guide-insert-data.md) + - [更新数据](/develop/dev-guide-update-data.md) + - [删除数据](/develop/dev-guide-delete-data.md) + - [使用 TTL (Time to Live) 定期删除过期数据](/time-to-live.md) + - [预处理语句](/develop/dev-guide-prepared-statement.md) + - 数据读取 + - [单表读取](/develop/dev-guide-get-data-from-single-table.md) + - [多表连接查询](/develop/dev-guide-join-tables.md) + - [子查询](/develop/dev-guide-use-subqueries.md) + - [查询结果分页](/develop/dev-guide-paginate-results.md) + - [视图](/develop/dev-guide-use-views.md) + - [临时表](/develop/dev-guide-use-temporary-tables.md) + - [公共表表达式](/develop/dev-guide-use-common-table-expression.md) + - 读取副本数据 + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP 查询](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - 事务 + - [概览](/develop/dev-guide-transaction-overview.md) + - [乐观事务和悲观事务](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [事务限制](/develop/dev-guide-transaction-restraints.md) + - [事务错误处理](/develop/dev-guide-transaction-troubleshoot.md) + - 优化 SQL 性能 + - [概览](/develop/dev-guide-optimize-sql-overview.md) + - [SQL 性能调优](/develop/dev-guide-optimize-sql.md) + - [性能调优最佳实践](/develop/dev-guide-optimize-sql-best-practices.md) + - [索引的最佳实践](/develop/dev-guide-index-best-practice.md) + - 其他优化 + - [避免隐式类型转换](/develop/dev-guide-implicit-type-conversion.md) + - [唯一序列号生成方案](/develop/dev-guide-unique-serial-number-generation.md) + - 故障诊断 + - [SQL 或事务问题](/develop/dev-guide-troubleshoot-overview.md) + - [结果集不稳定](/develop/dev-guide-unstable-result-set.md) + - [超时](/develop/dev-guide-timeouts-in-tidb.md) + - 引用文档 + - [Bookshop 示例应用](/develop/dev-guide-bookshop-schema-design.md) + - 规范 + - [命名规范](/develop/dev-guide-object-naming-guidelines.md) + - [SQL 开发规范](/develop/dev-guide-sql-development-specification.md) + - 云原生开发环境 + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - 第三方工具支持 + - [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md) + - [已知的第三方工具兼容问题](/develop/dev-guide-third-party-tools-compatibility.md) + - [TiDB 与 ProxySQL 集成](/develop/dev-guide-proxysql-integration.md) +- 部署标准集群 + - [软硬件环境需求](/hardware-and-software-requirements.md) + - [环境与系统配置检查](/check-before-deployment.md) + - 规划集群拓扑 + - [最小部署拓扑结构](/minimal-deployment-topology.md) + - [TiFlash 部署拓扑](/tiflash-deployment-topology.md) + - [TiCDC 部署拓扑](/ticdc-deployment-topology.md) + - [TiDB Binlog 部署拓扑](/tidb-binlog-deployment-topology.md) + - [TiSpark 部署拓扑](/tispark-deployment-topology.md) + - [跨机房部署拓扑结构](/geo-distributed-deployment-topology.md) + - [混合部署拓扑结构](/hybrid-deployment-topology.md) + - 安装与启动 + - [使用 TiUP 部署](/production-deployment-using-tiup.md) + - [在 Kubernetes 上部署](/tidb-in-kubernetes.md) + - [验证集群状态](/post-installation-check.md) + - 测试集群性能 + - [用 Sysbench 测试 TiDB](/benchmark/benchmark-tidb-using-sysbench.md) + - [对 TiDB 进行 TPC-C 测试](/benchmark/benchmark-tidb-using-tpcc.md) + - [对 TiDB 进行 CH-benCHmark 测试](/benchmark/benchmark-tidb-using-ch.md) +- 数据迁移 + - [数据迁移概述](/migration-overview.md) + - [数据迁移工具](/migration-tools.md) + - [数据导入最佳实践](/tidb-lightning/data-import-best-practices.md) + - 数据迁移场景 + - [从 Aurora 迁移数据到 TiDB](/migrate-aurora-to-tidb.md) + - [从小数据量 MySQL 迁移数据到 TiDB](/migrate-small-mysql-to-tidb.md) + - [从大数据量 MySQL 迁移数据到 TiDB](/migrate-large-mysql-to-tidb.md) + - [从小数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-small-mysql-shards-to-tidb.md) + - [从大数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-large-mysql-shards-to-tidb.md) + - [从 CSV 文件迁移数据到 TiDB](/migrate-from-csv-files-to-tidb.md) + - [从 SQL 文件迁移数据到 TiDB](/migrate-from-sql-files-to-tidb.md) + - [从 Parquet 文件迁移数据到 TiDB](/migrate-from-parquet-files-to-tidb.md) + - [从 TiDB 集群迁移数据至另一 TiDB 集群](/migrate-from-tidb-to-tidb.md) + - [从 TiDB 集群迁移数据至兼容 MySQL 的数据库](/migrate-from-tidb-to-mysql.md) + - 复杂迁移场景 + - [上游使用 pt/gh-ost 工具的持续同步场景](/migrate-with-pt-ghost.md) + - [下游存在更多列的迁移场景](/migrate-with-more-columns-downstream.md) + - [如何根据类型或 DDL 内容过滤 binlog 事件](/filter-binlog-event.md) + - [如何通过 SQL 表达式过滤 DML binlog 事件](/filter-dml-event.md) +- 数据集成 + - [数据集成概述](/integration-overview.md) + - 数据集成场景 + - [与 Confluent Cloud 和 Snowflake 进行数据集成](/ticdc/integrate-confluent-using-ticdc.md) + - [与 Apache Kafka 和 Apache Flink 进行数据集成](/replicate-data-to-kafka.md) +- 运维操作 + - 升级 TiDB 版本 + - [使用 TiUP 升级](/upgrade-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [平滑升级 TiDB](/smooth-upgrade-tidb.md) + - [TiFlash v6.2 升级帮助](/tiflash-620-upgrade-guide.md) + - 扩缩容 + - [使用 TiUP(推荐)](/scale-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - 备份与恢复 + - [备份与恢复概述](/br/backup-and-restore-overview.md) + - 架构设计 + - [架构概述](/br/backup-and-restore-design.md) + - [快照备份与恢复架构](/br/br-snapshot-architecture.md) + - [日志备份与 PITR 架构](/br/br-log-architecture.md) + - 使用 BR 进行备份与恢复 + - [使用概述](/br/br-use-overview.md) + - [快照备份与恢复](/br/br-snapshot-guide.md) + - [日志备份与 PITR](/br/br-pitr-guide.md) + - [实践示例](/br/backup-and-restore-use-cases.md) + - [备份存储](/br/backup-and-restore-storages.md) + - br cli 命令手册 + - [命令概述](/br/use-br-command-line-tool.md) + - [快照备份与恢复命令手册](/br/br-snapshot-manual.md) + - [日志备份与 PITR 命令手册](/br/br-pitr-manual.md) + - 参考指南 + - BR 特性 + - [自动调节](/br/br-auto-tune.md) + - [批量建表](/br/br-batch-create-table.md) + - [断点备份](/br/br-checkpoint-backup.md) + - [断点恢复](/br/br-checkpoint-restore.md) + - [使用 Dumpling 和 TiDB Lightning 备份与恢复](/backup-and-restore-using-dumpling-lightning.md) + - [备份与恢复 RawKV](/br/rawkv-backup-and-restore.md) + - [增量备份与恢复](/br/br-incremental-guide.md) + - 集群容灾 + - [容灾方案介绍](/dr-solution-introduction.md) + - [基于主备集群的容灾](/dr-secondary-cluster.md) + - [基于多副本的单集群容灾](/dr-multi-replica.md) + - [基于备份与恢复的容灾](/dr-backup-restore.md) + - [使用资源管控 (Resource Control) 实现资源隔离](/tidb-resource-control.md) + - [修改时区](/configure-time-zone.md) + - [日常巡检](/daily-check.md) + - [TiFlash 常用运维操作](/tiflash/maintain-tiflash.md) + - [使用 TiUP 运维集群](/maintain-tidb-using-tiup.md) + - [在线修改集群配置](/dynamic-config.md) + - [在线有损恢复](/online-unsafe-recovery.md) + - [搭建双集群主从复制](/replicate-between-primary-and-secondary-clusters.md) +- 监控与告警 + - [监控框架概述](/tidb-monitoring-framework.md) + - [监控 API](/tidb-monitoring-api.md) + - [手动部署监控](/deploy-monitoring-services.md) + - [将 Grafana 监控数据导出成快照](/exporting-grafana-snapshots.md) + - [TiDB 集群报警规则与处理方法](/alert-rules.md) + - [TiFlash 报警规则与处理方法](/tiflash/tiflash-alert-rules.md) + - [自定义监控组件的配置](/tiup/customized-montior-in-tiup-environment.md) + - [BR 监控告警](/br/br-monitoring-and-alert.md) +- 故障诊断 + - 故障诊断问题汇总 + - [TiDB 集群问题导图](/tidb-troubleshooting-map.md) + - [TiDB 集群常见问题](/troubleshoot-tidb-cluster.md) + - [TiFlash 常见问题](/tiflash/troubleshoot-tiflash.md) + - 故障场景 + - 慢查询 + - [定位慢查询](/identify-slow-queries.md) + - [分析慢查询](/analyze-slow-queries.md) + - [TiDB OOM 故障排查](/troubleshoot-tidb-oom.md) + - [热点问题处理](/troubleshoot-hot-spot-issues.md) + - [CPU 占用过多导致读写延迟增加](/troubleshoot-cpu-issues.md) + - [写冲突与写性能下降](/troubleshoot-write-conflicts.md) + - [磁盘 I/O 过高](/troubleshoot-high-disk-io.md) + - [锁冲突与 TTL 超时](/troubleshoot-lock-conflicts.md) + - [数据索引不一致报错](/troubleshoot-data-inconsistency-errors.md) + - 故障诊断方法 + - [通过 SQL 诊断获取集群诊断信息](/information-schema/information-schema-sql-diagnostics.md) + - [通过 Statement Summary 排查 SQL 性能问题](/statement-summary-tables.md) + - [使用 Top SQL 定位系统资源消耗过多的查询](/dashboard/top-sql.md) + - [通过日志定位消耗系统资源多的查询](/identify-expensive-queries.md) + - [保存和恢复集群现场信息](/sql-plan-replayer.md) + - [获取支持](/support.md) +- 性能调优 + - 优化手册 + - [优化概述](/performance-tuning-overview.md) + - [优化方法](/performance-tuning-methods.md) + - [OLTP 负载性能优化实践](/performance-tuning-practices.md) + - [TiFlash 性能分析方法](/tiflash-performance-tuning-methods.md) + - [TiCDC 性能分析方法](/ticdc-performance-tuning-methods.md) + - [延迟的拆解分析](/latency-breakdown.md) + - 配置调优 + - [操作系统性能参数调优](/tune-operating-system.md) + - [TiDB 内存调优](/configure-memory-usage.md) + - [TiKV 线程调优](/tune-tikv-thread-performance.md) + - [TiKV 内存调优](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Region 性能调优](/tune-region-performance.md) + - [TiFlash 调优](/tiflash/tune-tiflash-performance.md) + - [下推计算结果缓存](/coprocessor-cache.md) + - 垃圾回收 (GC) + - [GC 机制简介](/garbage-collection-overview.md) + - [GC 配置](/garbage-collection-configuration.md) + - SQL 性能调优 + - [SQL 性能调优概览](/sql-tuning-overview.md) + - 理解 TiDB 执行计划 + - [TiDB 执行计划概览](/explain-overview.md) + - [使用 `EXPLAIN` 解读执行计划](/explain-walkthrough.md) + - [MPP 模式查询的执行计划](/explain-mpp.md) + - [索引查询的执行计划](/explain-indexes.md) + - [Join 查询的执行计划](/explain-joins.md) + - [子查询的执行计划](/explain-subqueries.md) + - [聚合查询的执行计划](/explain-aggregation.md) + - [视图查询的执行计划](/explain-views.md) + - [分区查询的执行计划](/explain-partitions.md) + - [开启 IndexMerge 查询的执行计划](/explain-index-merge.md) + - SQL 优化流程 + - [SQL 优化流程概览](/sql-optimization-concepts.md) + - 逻辑优化 + - [逻辑优化概览](/sql-logical-optimization.md) + - [子查询相关的优化](/subquery-optimization.md) + - [列裁剪](/column-pruning.md) + - [关联子查询去关联](/correlated-subquery-optimization.md) + - [Max/Min 消除](/max-min-eliminate.md) + - [谓词下推](/predicate-push-down.md) + - [分区裁剪](/partition-pruning.md) + - [TopN 和 Limit 下推](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [从窗口函数中推导 TopN 或 Limit](/derive-topn-from-window.md) + - 物理优化 + - [物理优化概览](/sql-physical-optimization.md) + - [索引的选择](/choose-index.md) + - [统计信息简介](/statistics.md) + - [错误索引的解决方案](/wrong-index-solution.md) + - [Distinct 优化](/agg-distinct-optimization.md) + - [代价模型](/cost-model.md) + - [Prepare 语句执行计划缓存](/sql-prepared-plan-cache.md) + - [非 Prepare 语句执行计划缓存](/sql-non-prepared-plan-cache.md) + - 控制执行计划 + - [控制执行计划概览](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [执行计划管理](/sql-plan-management.md) + - [优化规则及表达式下推的黑名单](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) +- 教程 + - [单区域多 AZ 部署](/multi-data-centers-in-one-city-deployment.md) + - [双区域多 AZ 部署](/three-data-centers-in-two-cities-deployment.md) + - [单区域双 AZ 部署](/two-data-centers-in-one-city-deployment.md) + - 读取历史数据 + - 使用 Stale Read 功能读取历史数据(推荐) + - [Stale Read 使用场景介绍](/stale-read.md) + - [使用 `AS OF TIMESTAMP` 语法读取历史数据](/as-of-timestamp.md) + - [使用系统变量 `tidb_read_staleness` 读取历史数据](/tidb-read-staleness.md) + - [使用系统变量 `tidb_external_ts` 读取历史数据](/tidb-external-ts.md) + - [使用系统变量 `tidb_snapshot` 读取历史数据](/read-historical-data.md) + - 最佳实践 + - [TiDB 最佳实践](/best-practices/tidb-best-practices.md) + - [Java 应用开发最佳实践](/best-practices/java-app-best-practices.md) + - [HAProxy 最佳实践](/best-practices/haproxy-best-practices.md) + - [高并发写入场景最佳实践](/best-practices/high-concurrency-best-practices.md) + - [Grafana 监控最佳实践](/best-practices/grafana-monitor-best-practices.md) + - [PD 调度策略最佳实践](/best-practices/pd-scheduling-best-practices.md) + - [海量 Region 集群调优](/best-practices/massive-regions-best-practices.md) + - [三节点混合部署最佳实践](/best-practices/three-nodes-hybrid-deployment.md) + - [在三数据中心下就近读取数据](/best-practices/three-dc-local-read.md) + - [使用 UUID](/best-practices/uuid.md) + - [只读存储节点最佳实践](/best-practices/readonly-nodes.md) + - [Placement Rules 使用文档](/configure-placement-rules.md) + - [Load Base Split 使用文档](/configure-load-base-split.md) + - [Store Limit 使用文档](/configure-store-limit.md) + - [DDL 执行原理及最佳实践](/ddl-introduction.md) +- TiDB 工具 + - [功能概览](/ecosystem-tool-user-guide.md) + - [使用场景](/ecosystem-tool-user-case.md) + - [工具下载](/download-ecosystem-tools.md) + - TiUP + - [文档地图](/tiup/tiup-documentation-guide.md) + - [概览](/tiup/tiup-overview.md) + - [术语及核心概念](/tiup/tiup-terminology-and-concepts.md) + - [TiUP 组件管理](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [故障排查](/tiup/tiup-troubleshooting-guide.md) + - TiUP 命令参考手册 + - [命令概览](/tiup/tiup-reference.md) + - TiUP 命令 + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [tiup mirror 概览](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster 命令 + - [TiUP Cluster 命令概览](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM 命令 + - [TiUP DM 命令概览](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB 集群拓扑文件配置](/tiup/tiup-cluster-topology-reference.md) + - [DM 集群拓扑文件配置](/tiup/tiup-dm-topology-reference.md) + - [TiUP 镜像参考指南](/tiup/tiup-mirror-reference.md) + - TiUP 组件文档 + - [tiup-playground 运行本地测试集群](/tiup/tiup-playground.md) + - [tiup-cluster 部署运维生产集群](/tiup/tiup-cluster.md) + - [tiup-mirror 定制离线镜像](/tiup/tiup-mirror.md) + - [tiup-bench 进行 TPCC/TPCH 压力测试](/tiup/tiup-bench.md) + - [TiDB Operator](/tidb-operator-overview.md) + - TiDB Data Migration + - [关于 Data Migration](/dm/dm-overview.md) + - [架构简介](/dm/dm-arch.md) + - [快速开始](/dm/quick-start-with-dm.md) + - [最佳实践](/dm/dm-best-practices.md) + - 部署 DM 集群 + - [软硬件要求](/dm/dm-hardware-and-software-requirements.md) + - [使用 TiUP 联网部署(推荐)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [使用 TiUP 离线部署](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [使用 Binary 部署](/dm/deploy-a-dm-cluster-using-binary.md) + - [在 Kubernetes 环境中部署](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev/deploy-tidb-dm) + - 入门指南 + - [创建数据源](/dm/quick-start-create-source.md) + - [数据源操作](/dm/dm-manage-source.md) + - [任务配置向导](/dm/dm-task-configuration-guide.md) + - [分库分表合并](/dm/dm-shard-merge.md) + - [表路由](/dm/dm-table-routing.md) + - [黑白名单](/dm/dm-block-allow-table-lists.md) + - [过滤 binlog 事件](/dm/dm-binlog-event-filter.md) + - [通过 SQL 表达式过滤 DML](/dm/feature-expression-filter.md) + - [Online DDL 工具支持](/dm/dm-online-ddl-tool-support.md) + - 迁移任务操作 + - [任务前置检查](/dm/dm-precheck.md) + - [创建任务](/dm/dm-create-task.md) + - [查询状态](/dm/dm-query-status.md) + - [暂停任务](/dm/dm-pause-task.md) + - [恢复任务](/dm/dm-resume-task.md) + - [停止任务](/dm/dm-stop-task.md) + - 进阶教程 + - 分库分表合并迁移 + - [概述](/dm/feature-shard-merge.md) + - [悲观模式](/dm/feature-shard-merge-pessimistic.md) + - [乐观模式](/dm/feature-shard-merge-optimistic.md) + - [手动处理 Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [迁移使用 GH-ost/PT-osc 的数据源](/dm/feature-online-ddl.md) + - [上下游列数量不一致的迁移](/migrate-with-more-columns-downstream.md) + - [增量数据校验](/dm/dm-continuous-data-validation.md) + - 运维管理 + - 集群版本升级 + - [使用 TiUP 运维集群(推荐)](/dm/maintain-dm-using-tiup.md) + - [1.0.x 到 2.0+ 手动升级](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - [在线应用 Hotfix 到 DM 集群](/tiup/tiup-component-dm-patch.md) + - 集群运维工具 + - [使用 WebUI 管理迁移任务](/dm/dm-webui-guide.md) + - [使用 dmctl 管理迁移任务](/dm/dmctl-introduction.md) + - 性能调优 + - [性能数据](/dm/dm-benchmark-v5.4.0.md) + - [配置调优](/dm/dm-tune-configuration.md) + - [如何进行压力测试](/dm/dm-performance-test.md) + - [性能问题及处理方法](/dm/dm-handle-performance-issues.md) + - 数据源管理 + - [变更同步的数据源地址](/dm/usage-scenario-master-slave-switch.md) + - 任务管理 + - [处理出错的 DDL 语句](/dm/handle-failed-ddl-statements.md) + - [管理迁移表的表结构](/dm/dm-manage-schema.md) + - [导出和导入集群的数据源和任务配置](/dm/dm-export-import-config.md) + - [处理告警](/dm/dm-handle-alerts.md) + - [日常巡检](/dm/dm-daily-check.md) + - 参考手册 + - 架构组件 + - [DM-worker 说明](/dm/dm-worker-intro.md) + - [安全模式](/dm/dm-safe-mode.md) + - [Relay Log](/dm/relay-log.md) + - [DDL 特殊处理说明](/dm/dm-ddl-compatible.md) + - 运行机制 + - [DML 同步机制](/dm/dm-dml-replication-logic.md) + - 命令行 + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - 配置文件 + - [概述](/dm/dm-config-overview.md) + - [数据源配置](/dm/dm-source-configuration-file.md) + - [迁移任务配置](/dm/task-configuration-file-full.md) + - [DM-master 配置](/dm/dm-master-configuration-file.md) + - [DM-worker 配置](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [兼容性目录](/dm/dm-compatibility-catalog.md) + - 安全 + - [为 DM 的连接开启加密传输](/dm/dm-enable-tls.md) + - [生成自签名证书](/dm/dm-generate-self-signed-certificates.md) + - 监控告警 + - [监控指标](/dm/monitor-a-dm-cluster.md) + - [告警信息](/dm/dm-alert-rules.md) + - [错误码](/dm/dm-error-handling.md#常见故障处理方法) + - [术语表](/dm/dm-glossary.md) + - 使用示例 + - [使用 DM 迁移数据](/dm/migrate-data-using-dm.md) + - [快速创建迁移任务](/dm/quick-start-create-task.md) + - [分表合并数据迁移最佳实践](/dm/shard-merge-best-practices.md) + - 异常解决 + - [常见问题](/dm/dm-faq.md) + - [错误处理及恢复](/dm/dm-error-handling.md) + - [版本发布历史](/dm/dm-release-notes.md) + - TiDB Lightning + - [概述](/tidb-lightning/tidb-lightning-overview.md) + - [快速上手](/get-started-with-tidb-lightning.md) + - [部署 TiDB Lightning](/tidb-lightning/deploy-tidb-lightning.md) + - [目标数据库要求](/tidb-lightning/tidb-lightning-requirements.md) + - 数据源 + - [文件匹配规则](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [自定义文件匹配](/tidb-lightning/tidb-lightning-data-source.md#自定义文件匹配) + - 物理导入模式 + - [概述](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-physical-import-mode.md#必要条件及限制) + - [配置及使用](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#性能调优) + - 逻辑导入模式 + - [概述](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-logical-import-mode.md#必要条件) + - [配置及使用](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#性能调优) + - [前置检查](/tidb-lightning/tidb-lightning-prechecks.md) + - [表库过滤](/table-filter.md) + - [断点续传](/tidb-lightning/tidb-lightning-checkpoints.md) + - [并行导入](/tidb-lightning/tidb-lightning-distributed-import.md) + - [可容忍错误](/tidb-lightning/tidb-lightning-error-resolution.md) + - [故障处理](/tidb-lightning/troubleshoot-tidb-lightning.md) + - 参考手册 + - [完整配置文件](/tidb-lightning/tidb-lightning-configuration.md) + - [命令行参数](/tidb-lightning/tidb-lightning-command-line-full.md) + - [监控告警](/tidb-lightning/monitor-tidb-lightning.md) + - [Web 界面](/tidb-lightning/tidb-lightning-web-interface.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [术语表](/tidb-lightning/tidb-lightning-glossary.md) + - [Dumpling](/dumpling-overview.md) + - TiCDC + - [概述](/ticdc/ticdc-overview.md) + - [安装部署与集群运维](/ticdc/deploy-ticdc.md) + - Changefeed + - [Changefeed 概述](/ticdc/ticdc-changefeed-overview.md) + - 创建 Changefeed + - [同步数据到 MySQL 兼容的数据库](/ticdc/ticdc-sink-to-mysql.md) + - [同步数据到 Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [同步数据到存储服务](/ticdc/ticdc-sink-to-cloud-storage.md) + - [管理 Changefeed](/ticdc/ticdc-manage-changefeed.md) + - [日志过滤器](/ticdc/ticdc-filter.md) + - [双向复制](/ticdc/ticdc-bidirectional-replication.md) + - [单行数据正确性校验](/ticdc/ticdc-integrity-check.md) + - 监控告警 + - [基本监控指标](/ticdc/ticdc-summary-monitor.md) + - [详细监控指标](/ticdc/monitor-ticdc.md) + - [报警规则](/ticdc/ticdc-alert-rules.md) + - 参考指南 + - [架构设计与原理](/ticdc/ticdc-architecture.md) + - [TiCDC Server 配置参数](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed 配置参数](/ticdc/ticdc-changefeed-config.md) + - 输出数据协议 + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API v2](/ticdc/ticdc-open-api-v2.md) + - [TiCDC Open API v1](/ticdc/ticdc-open-api.md) + - TiCDC 数据消费 + - [基于 Avro 的 TiCDC 行数据 Checksum 校验](/ticdc/ticdc-avro-checksum-verification.md) + - [Storage sink 消费程序编写指引](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [兼容性](/ticdc/ticdc-compatibility.md) + - [故障处理](/ticdc/troubleshoot-ticdc.md) + - [常见问题解答](/ticdc/ticdc-faq.md) + - [术语表](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [概述](/tidb-binlog/tidb-binlog-overview.md) + - [快速上手](/tidb-binlog/get-started-with-tidb-binlog.md) + - [部署使用](/tidb-binlog/deploy-tidb-binlog.md) + - [运维管理](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [配置说明](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [版本升级](/tidb-binlog/upgrade-tidb-binlog.md) + - [监控告警](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [增量恢复](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl 工具](/tidb-binlog/binlog-control.md) + - [Kafka 自定义开发](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [集群间双向同步](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [术语表](/tidb-binlog/tidb-binlog-glossary.md) + - 故障诊断 + - [故障诊断](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [常见错误修复](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - PingCAP Clinic 诊断服务 + - [概述](/clinic/clinic-introduction.md) + - [快速上手](/clinic/quick-start-with-clinic.md) + - [使用 PingCAP Clinic 诊断集群](/clinic/clinic-user-guide-for-tiup.md) + - [使用 PingCAP Clinic 生成诊断报告](/clinic/clinic-report.md) + - [采集 SQL 查询计划信息](/clinic/clinic-collect-sql-query-plan.md) + - [数据采集说明](/clinic/clinic-data-instruction-for-tiup.md) + - TiSpark + - [TiSpark 用户指南](/tispark-overview.md) + - sync-diff-inspector + - [概述](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [不同库名或表名的数据校验](/sync-diff-inspector/route-diff.md) + - [分库分表场景下的数据校验](/sync-diff-inspector/shard-diff.md) + - [TiDB 主从集群的数据校验](/sync-diff-inspector/upstream-downstream-diff.md) + - [基于 DM 同步场景下的数据校验](/sync-diff-inspector/dm-diff.md) + - TiUniManager + - [概述](/tiunimanager/tiunimanager-overview.md) + - [安装和运维](/tiunimanager/tiunimanager-install-and-maintain.md) + - [快速操作](/tiunimanager/tiunimanager-quick-start.md) + - 操作指南 + - [登录与初始化](/tiunimanager/tiunimanager-login-and-initialize.md) + - [管理集群资源](/tiunimanager/tiunimanager-manage-host-resources.md) + - [管理集群](/tiunimanager/tiunimanager-manage-clusters.md) + - [导入与导出数据](/tiunimanager/tiunimanager-import-and-export-data.md) + - [管理任务](/tiunimanager/tiunimanager-manage-tasks.md) + - [管理系统](/tiunimanager/tiunimanager-manage-system.md) + - [FAQ](/tiunimanager/tiunimanager-faq.md) + - 发布版本历史 + - [发布版本汇总](/tiunimanager/tiunimanager-release-notes.md) + - [v1.0.2](/tiunimanager/tiunimanager-release-1.0.2.md) + - [v1.0.1](/tiunimanager/tiunimanager-release-1.0.1.md) + - [v1.0.0](/tiunimanager/tiunimanager-release-1.0.0.md) +- 参考指南 + - 架构 + - [概述](/tidb-architecture.md) + - [存储](/tidb-storage.md) + - [计算](/tidb-computing.md) + - [调度](/tidb-scheduling.md) + - 存储引擎 TiKV + - [TiKV 简介](/tikv-overview.md) + - [RocksDB 简介](/storage-engine/rocksdb-overview.md) + - [Titan 简介](/storage-engine/titan-overview.md) + - [Titan 配置说明](/storage-engine/titan-configuration.md) + - [Partitioned Raft KV](/partitioned-raft-kv.md) + - 存储引擎 TiFlash + - [TiFlash 简介](/tiflash/tiflash-overview.md) + - [构建 TiFlash 副本](/tiflash/create-tiflash-replicas.md) + - [使用 TiDB 读取 TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [使用 TiSpark 读取 TiFlash](/tiflash/use-tispark-to-read-tiflash.md) + - [使用 MPP 模式](/tiflash/use-tiflash-mpp-mode.md) + - [TiFlash 存算分离架构与 S3 支持](/tiflash/tiflash-disaggregated-and-s3.md) + - [使用 FastScan 功能](/tiflash/use-fastscan.md) + - [TiFlash 支持的计算下推](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash 查询结果物化](/tiflash/tiflash-results-materialization.md) + - [TiFlash 延迟物化](/tiflash/tiflash-late-materialization.md) + - [TiFlash 数据落盘](/tiflash/tiflash-spill-disk.md) + - [TiFlash 数据校验](/tiflash/tiflash-data-validation.md) + - [TiFlash 兼容性说明](/tiflash/tiflash-compatibility.md) + - [TiFlash Pipeline Model 执行模型](/tiflash/tiflash-pipeline-model.md) + - [系统变量](/system-variables.md) + - 配置文件参数 + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - [binlog-ctl](/tidb-binlog/binlog-control.md) + - 命令行参数 + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - 监控指标 + - [Overview 面板](/grafana-overview-dashboard.md) + - [Performance Overview 面板](/grafana-performance-overview-dashboard.md) + - [TiDB 面板](/grafana-tidb-dashboard.md) + - [PD 面板](/grafana-pd-dashboard.md) + - [TiKV 面板](/grafana-tikv-dashboard.md) + - [TiFlash 监控指标](/tiflash/monitor-tiflash.md) + - [TiCDC 监控指标](/ticdc/monitor-ticdc.md) + - [Resource Control 监控指标](/grafana-resource-control-dashboard.md) + - 安全加固 + - [为 TiDB 客户端服务端间通信开启加密传输](/enable-tls-between-clients-and-servers.md) + - [为 TiDB 组件间通信开启加密传输](/enable-tls-between-components.md) + - [生成自签名证书](/generate-self-signed-certificates.md) + - [静态加密](/encryption-at-rest.md) + - [为 TiDB 落盘文件开启加密](/enable-disk-spill-encrypt.md) + - [日志脱敏](/log-redaction.md) + - 权限 + - [与 MySQL 安全特性差异](/security-compatibility-with-mysql.md) + - [权限管理](/privilege-management.md) + - [TiDB 用户账户管理](/user-account-management.md) + - [TiDB 密码管理](/password-management.md) + - [基于角色的访问控制](/role-based-access-control.md) + - [TiDB 证书鉴权使用指南](/certificate-authentication.md) + - SQL + - SQL 语言结构和语法 + - 属性 + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [字面值](/literal-values.md) + - [Schema 对象名](/schema-object-names.md) + - [关键字](/keywords.md) + - [用户自定义变量](/user-defined-variables.md) + - [表达式语法](/expression-syntax.md) + - [注释语法](/comment-syntax.md) + - SQL 语句 + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER RESOURCE GROUP`](/sql-statements/sql-statement-alter-resource-group.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CALIBRATE RESOURCE`](/sql-statements/sql-statement-calibrate-resource.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE RESOURCE GROUP`](/sql-statements/sql-statement-create-resource-group.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP RESOURCE GROUP`](/sql-statements/sql-statement-drop-resource-group.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` 和 `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET RESOURCE GROUP`](/sql-statements/sql-statement-set-resource-group.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE RESOURCE GROUP`](/sql-statements/sql-statement-show-create-resource-group.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - 数据类型 + - [数据类型概述](/data-type-overview.md) + - [数据类型默认值](/data-type-default-values.md) + - [数值类型](/data-type-numeric.md) + - [日期和时间类型](/data-type-date-and-time.md) + - [字符串类型](/data-type-string.md) + - [JSON 类型](/data-type-json.md) + - 函数与操作符 + - [函数与操作符概述](/functions-and-operators/functions-and-operators-overview.md) + - [表达式求值的类型转换](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [操作符](/functions-and-operators/operators.md) + - [控制流程函数](/functions-and-operators/control-flow-functions.md) + - [字符串函数](/functions-and-operators/string-functions.md) + - [数值函数与操作符](/functions-and-operators/numeric-functions-and-operators.md) + - [日期和时间函数](/functions-and-operators/date-and-time-functions.md) + - [位函数和操作符](/functions-and-operators/bit-functions-and-operators.md) + - [Cast 函数和操作符](/functions-and-operators/cast-functions-and-operators.md) + - [加密和压缩函数](/functions-and-operators/encryption-and-compression-functions.md) + - [锁函数](/functions-and-operators/locking-functions.md) + - [信息函数](/functions-and-operators/information-functions.md) + - [JSON 函数](/functions-and-operators/json-functions.md) + - [GROUP BY 聚合函数](/functions-and-operators/aggregate-group-by-functions.md) + - [窗口函数](/functions-and-operators/window-functions.md) + - [其它函数](/functions-and-operators/miscellaneous-functions.md) + - [精度数学](/functions-and-operators/precision-math.md) + - [集合运算](/functions-and-operators/set-operators.md) + - [下推到 TiKV 的表达式列表](/functions-and-operators/expressions-pushed-down.md) + - [TiDB 特有的函数](/functions-and-operators/tidb-functions.md) + - [Oracle 与 TiDB 函数和语法差异对照](/oracle-functions-to-tidb.md) + - [聚簇索引](/clustered-indexes.md) + - [约束](/constraints.md) + - [生成列](/generated-columns.md) + - [SQL 模式](/sql-mode.md) + - [表属性](/table-attributes.md) + - 事务 + - [事务概览](/transaction-overview.md) + - [隔离级别](/transaction-isolation-levels.md) + - [乐观事务](/optimistic-transaction.md) + - [悲观事务](/pessimistic-transaction.md) + - [非事务 DML 语句](/non-transactional-dml.md) + - [视图](/views.md) + - [分区表](/partitioned-table.md) + - [临时表](/temporary-tables.md) + - [缓存表](/cached-tables.md) + - [外键约束](/foreign-key.md) + - 字符集和排序 + - [概述](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - 系统表 + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`RESOURCE_GROUPS`](/information-schema/information-schema-resource-groups.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [元数据锁](/metadata-lock.md) + - UI + - TiDB Dashboard + - [简介](/dashboard/dashboard-intro.md) + - 运维 + - [部署](/dashboard/dashboard-ops-deploy.md) + - [反向代理](/dashboard/dashboard-ops-reverse-proxy.md) + - [用户管理](/dashboard/dashboard-user.md) + - [安全](/dashboard/dashboard-ops-security.md) + - [访问](/dashboard/dashboard-access.md) + - [概况页面](/dashboard/dashboard-overview.md) + - [集群信息页面](/dashboard/dashboard-cluster-info.md) + - [Top SQL 页面](/dashboard/top-sql.md) + - [流量可视化页面](/dashboard/dashboard-key-visualizer.md) + - [监控关系图](/dashboard/dashboard-metrics-relation.md) + - SQL 语句分析 + - [列表页面](/dashboard/dashboard-statement-list.md) + - [执行详情页面](/dashboard/dashboard-statement-details.md) + - [慢查询页面](/dashboard/dashboard-slow-query.md) + - 集群诊断页面 + - [访问](/dashboard/dashboard-diagnostics-access.md) + - [查看报告](/dashboard/dashboard-diagnostics-report.md) + - [使用示例](/dashboard/dashboard-diagnostics-usage.md) + - [监控指标页面](/dashboard/dashboard-monitoring.md) + - [日志搜索页面](/dashboard/dashboard-log-search.md) + - [资源管控页面](/dashboard/dashboard-resource-manager.md) + - 实例性能分析 + - [手动分析页面](/dashboard/dashboard-profiling.md) + - [持续分析页面](/dashboard/continuous-profiling.md) + - 会话管理与配置 + - [分享会话](/dashboard/dashboard-session-share.md) + - [配置 SSO 登录](/dashboard/dashboard-session-sso.md) + - [常见问题](/dashboard/dashboard-faq.md) + - [遥测](/telemetry.md) + - [错误码](/error-codes.md) + - [通过拓扑 label 进行副本调度](/schedule-replicas-by-topology-labels.md) + - 内部组件介绍 + - [TiDB 后端任务分布式并行执行框架](/tidb-distributed-execution-framework.md) +- 常见问题解答 (FAQ) + - [FAQ 汇总](/faq/faq-overview.md) + - [产品 FAQ](/faq/tidb-faq.md) + - [SQL FAQ](/faq/sql-faq.md) + - [安装部署 FAQ](/faq/deploy-and-maintain-faq.md) + - [迁移 FAQ](/faq/migration-tidb-faq.md) + - [升级 FAQ](/faq/upgrade-faq.md) + - [监控 FAQ](/faq/monitor-faq.md) + - [集群管理 FAQ](/faq/manage-cluster-faq.md) + - [高可用 FAQ](/faq/high-availability-faq.md) + - [高可靠 FAQ](/faq/high-reliability-faq.md) + - [备份恢复 FAQ](/faq/backup-and-restore-faq.md) +- 版本发布历史 + - [发布版本汇总](/releases/release-notes.md) + - [版本发布时间线](/releases/release-timeline.md) + - [TiDB 版本规则](/releases/versioning.md) + - [TiDB 离线包](/binary-package.md) + - v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) + - v7.1 + - [7.1.0](/releases/release-7.1.0.md) + - v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) + - v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) + - v6.5 + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.7](/releases/release-6.1.7.md) + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [术语表](/glossary.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md new file mode 100644 index 00000000..369cec61 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md @@ -0,0 +1,303 @@ +--- +title: 选择驱动或 ORM 框架 +summary: 选择驱动或 ORM 框架连接 TiDB。 +aliases: ['/zh/tidb/dev/choose-driver-or-orm'] +--- + +# 选择驱动或 ORM 框架 + +> **注意:** +> +> TiDB 支持等级说明: +> +> - **Full**:表明 TiDB 已经兼容该工具的绝大多数功能,并且在该工具的新版本中对其保持兼容。PingCAP 将定期地对 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)中的新版本进行兼容性测试。 +> - **Compatible**:表明由于该工具已适配 MySQL,而 TiDB 高度兼容 MySQL 协议,因此 TiDB 可以兼容该工具的大部分功能。但 PingCAP 并未对该工具作出完整的兼容性验证,有可能出现一些意外的行为。 +> +> 关于更多 TiDB 支持的第三方工具,你可以查看 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)。 + +TiDB 兼容 MySQL 的协议,但存在部分与 MySQL 不兼容或有差异的特性,具体信息可查看[与 MySQL 兼容性对比](/mysql-compatibility.md)。 + +## Java + +本节介绍 Java 语言的 Driver 及 ORM 的使用方式。 + +### Java Drivers + + +
+ +支持等级:**Full** + +按照 [MySQL 文档](https://dev.mysql.com/doc/connector-j/8.0/en/)中的说明下载并配置 Java JDBC 驱动程序即可使用。对于 TiDB v6.3.0 及以上版本,建议使用 MySQL Connector/J 8.0.33 及以上版本。 + +> **建议:** +> +> 在 8.0.32 之前的 MySQL Connector/J 8.0 版本中存在一个 [bug](https://bugs.mysql.com/bug.php?id=106252),当与 TiDB v6.3.0 之前的版本一起使用时,可能会导致线程卡死。为了避免此问题,建议使用 MySQL Connector/J 8.0.32 或更高版本,或者使用 TiDB JDBC(见 *TiDB-JDBC* 标签)。 + +有关一个完整的实例应用程序,可参阅 [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md)。 + +
+
+ +支持等级:**Full** + +[TiDB-JDBC](https://github.com/pingcap/mysql-connector-j) 是基于 MySQL 8.0.29 的定制版本。TiDB-JDBC 基于 MySQL 官方 8.0.29 版本编译,修复了原 JDBC 在 prepare 模式下多参数、多字段 EOF 的错误,并新增 TiCDC snapshot 自动维护和 SM3 认证插件等功能。 + +基于 SM3 的认证仅在 TiDB 版本的 MySQL Connector/J 中支持。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + +``` + +如果你需要使用 SM3 认证,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + org.bouncycastle + bcprov-jdk15on + 1.67 + + + org.bouncycastle + bcpkix-jdk15on + 1.67 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'org.bouncycastle', name: 'bcprov-jdk15on', version: '1.67' +implementation group: 'org.bouncycastle', name: 'bcpkix-jdk15on', version: '1.67' +``` + +
+
+ +### Java ORM 框架 + +> **注意:** +> +> - Hibernate 当前[不支持嵌套事务](https://stackoverflow.com/questions/37927208/nested-transaction-in-spring-app-with-jpa-postgres)。 +> - TiDB 从 v6.2.0 版本开始支持 [Savepoint](/sql-statements/sql-statement-savepoint.md)。如需在 `@Transactional` 中使用 `Propagation.NESTED` 事务传播选项,即 `@Transactional(propagation = Propagation.NESTED)`,请确认你的 TiDB 版本为 v6.2.0 或以上。 + + +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取你的应用程序的所有依赖项,且会帮你下载依赖项的间接依赖,而无需你手动管理复杂的依赖关系。注意,只有 Hibernate `6.0.0.Beta2` 及以上版本才支持 TiDB 方言。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + org.hibernate.orm + hibernate-core + 6.0.0.CR2 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 `Gradle`,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.hibernate:hibernate-core:6.0.0.CR2' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +- 有关原生 Java 使用 Hibernate 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md)。 +- 有关 Spring 使用 Spring Data JPA、Hibernate 进行 TiDB 应用程序构建的例子,可参阅[使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md)。 + +额外的,你需要在 [Hibernate 配置文件](https://www.tutorialspoint.com/hibernate/hibernate_configuration.htm)中指定 TiDB 方言 `org.hibernate.dialect.TiDBDialect`,此方言在 Hibernate `6.0.0.Beta2` 以上才可支持。若你无法升级 Hibernate 版本,那么请你直接使用 MySQL 5.7 的方言 `org.hibernate.dialect.MySQL57Dialect`。但这可能造成不可预料的使用结果,及部分 TiDB 特有特性的缺失,如:[序列](/sql-statements/sql-statement-create-sequence.md)等。 + +
+ +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取应用程序的所有依赖项包括间接依赖,无需手动管理复杂的依赖关系。 + +如果你使用的是 Maven,请将以下内容添加到你的 ``: + +```xml + + org.mybatis + mybatis + 3.5.9 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.mybatis:mybatis:3.5.9' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +使用 MyBatis 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 MyBatis 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-mybatis.md)。 + +
+ +
+ +### Java 客户端负载均衡 + +**tidb-loadbalance** + +支持等级:**Full** + +[tidb-loadbalance](https://github.com/pingcap/tidb-loadbalance) 是应用端的负载均衡组件。通过 tidb-loadbalance,你可以实现自动维护 TiDB server 的节点信息,根据节点信息使用 tidb-loadbalance 策略在客户端分发 JDBC 连接。客户端应用与 TiDB server 之间使用 JDBC 直连,性能高于使用负载均衡组件。 + +目前 tidb-loadbalance 已实现轮询、随机、权重等负载均衡策略。 + +> **注意:** +> +> tidb-loadbalance 需配合 mysql-connector-j 一起使用。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + io.github.lastincisor + tidb-loadbalance + 0.0.5 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'io.github.lastincisor', name: 'tidb-loadbalance', version: '0.0.5' +``` + +## Golang + +本节介绍 Golang 语言的 Driver 及 ORM 的使用方式。 + +### Golang Drivers + +**go-sql-driver/mysql** + +支持等级:**Full** + +按照 [go-sql-driver/mysql 文档](https://github.com/go-sql-driver/mysql)中的说明获取并配置 Golang 驱动程序即可使用。 + +有关一个完整的实例应用程序,可参阅使用 [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md)。 + +### Golang ORM 框架 + +**GORM** + +支持等级:**Full** + +GORM 是一个流行的 Golang 的 ORM 框架,你可以使用 `go get` 获取你的应用程序的所有依赖项。 + +```shell +go get -u gorm.io/gorm +go get -u gorm.io/driver/mysql +``` + +使用 GORM 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md)。 + +## Python + +本节介绍 Python 语言的 Driver 及 ORM 的使用方式。 + +### Python Drivers + + +
+ +支持等级:**Compatible** + +按照 [PyMySQL 文档](https://pypi.org/project/PyMySQL/)中的说明下载并配置驱动程序即可使用。建议使用 **1.0.2** 及以上版本。 + +使用 PyMySQL 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [mysqlclient 文档](https://pypi.org/project/mysqlclient/)中的说明下载并配置驱动程序即可使用。建议使用 **2.1.1** 及以上版本。 + +使用 mysqlclient 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [MySQL Connector/Python 文档](https://dev.mysql.com/doc/connector-python/en/connector-python-installation-binary.html)中的说明下载并配置驱动程序即可使用。建议使用 **8.0.31** 及以上版本。 + +使用 MySQL Connector/Python 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md)。 + +
+
+ +### Python ORM 框架 + + + +
+ +支持等级:**Full** + +[Django](https://docs.djangoproject.com/) 是一个流行的 Python 的开发框架,你可以使用 `pip install Django==3.2.16 django-tidb>=3.0.0` 获取你的应用程序的所有依赖项。建议使用 Django **3.2.16** 及以上版本。 + +使用 Django 构建 TiDB 应用程序的例子,可参阅[使用 Django 构建 TiDB 应用程序](/develop/dev-guide-sample-application-python-django.md)。 + +
+ +
+ +支持等级:**Full** + +[SQLAlchemy](https://www.sqlalchemy.org/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install SQLAlchemy==1.4.44` 获取你的应用程序的所有依赖项。建议使用 **1.4.44** 及以上版本。 + +使用 SQLAlchemy 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md)。 + +
+ +
+ +支持等级:**Compatible** + +[peewee](http://docs.peewee-orm.com/en/latest/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install peewee==3.15.4` 获取你的应用程序的所有依赖项。建议使用 **3.15.4** 及以上版本。 + +使用 peewee 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md)。 + +
+ +
diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md new file mode 100644 index 00000000..aef7b5a7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md @@ -0,0 +1,293 @@ +--- +title: 插入数据 +summary: 插入数据、批量导入数据的方法、最佳实践及例子。 +aliases: ['/zh/tidb/dev/insert-data'] +--- + + + +# 插入数据 + +此页面将展示使用 SQL 语言,配合各种编程语言将数据插入到 TiDB 中。 + +## 在开始之前 + +在阅读本页面之前,你需要准备以下事项: + +- [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md)。 +- 阅读[数据库模式概览](/develop/dev-guide-schema-design-overview.md),并[创建数据库](/develop/dev-guide-create-database.md)、[创建表](/develop/dev-guide-create-table.md)、[创建二级索引](/develop/dev-guide-create-secondary-indexes.md)。 + +## 插入行 + +假设你需要插入多行数据,那么会有两种插入的办法,假设需要插入 3 个玩家数据: + +- 一个**多行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2), (3, 300, 5); + ``` + +- 多个**单行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (2, 230, 2); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (3, 300, 5); + ``` + +一般来说使用一个`多行插入语句`,会比多个`单行插入语句`快。 + + +
+ +在 SQL 中插入多行数据的示例: + +```sql +CREATE TABLE `player` (`id` INT, `coins` INT, `goods` INT); +INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2); +``` + +有关如何使用此 SQL,可查阅[连接到 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-2-步连接到集群)文档部分,按文档步骤使用客户端连接到 TiDB 集群后,输入 SQL 语句即可。 + +
+ +
+ +在 Java 中插入多行数据的示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + connection.setAutoCommit(false); + + PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) + + // first player + pstmt.setInt(1, 1); + pstmt.setInt(2, 1000); + pstmt.setInt(3, 1); + pstmt.addBatch(); + + // second player + pstmt.setInt(1, 2); + pstmt.setInt(2, 230); + pstmt.setInt(3, 2); + pstmt.addBatch(); + + pstmt.executeBatch(); + connection.commit(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +另外,由于 MySQL JDBC Driver 默认设置问题,你需更改部分参数,以获得更好的批量插入性能: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :------------------------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | +| `rewriteBatchedStatements` | 是否重写 Batch 语句 | 需要批量操作时 | `true` | +| `allowMultiQueries` | 开启批量操作 | 因为一个[客户端 Bug](https://bugs.mysql.com/bug.php?id=96623) 在 `rewriteBatchedStatements = true` 和 `useServerPrepStmts = true` 时,需设置此项 | `true` | + +MySQL JDBC Driver 还提供了一个集成配置项:`useConfigs`。当它配置为 `maxPerformance` 时,相当于配置了一组配置,以 `mysql:mysql-connector-java:8.0.28` 为例,`useConfigs=maxPerformance` 包含: + +```properties +cachePrepStmts=true +cacheCallableStmts=true +cacheServerConfiguration=true +useLocalSessionState=true +elideSetAutoCommits=true +alwaysSendSetIsolation=false +enableQueryTimeouts=false +connectionAttributes=none +useInformationSchema=true +``` + +你可以自行查看 `mysql-connector-java-{version}.jar!/com/mysql/cj/configurations/maxPerformance.properties` 来获得对应版本 MySQL JDBC Driver 的 `useConfigs=maxPerformance` 包含配置。 + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户名: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
+ +在 Golang 中插入多行数据的示例: + +```go +package main + +import ( + "database/sql" + "strings" + + _ "github.com/go-sql-driver/mysql" +) + +type Player struct { + ID string + Coins int + Goods int +} + +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + strings.Repeat(",(?,?,?)", amount-1) +} +``` + +有关 Golang 的完整示例,可参阅: + +- [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md) +- [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md) + +
+ +
+ +在 Python 中插入多行数据的示例: + +```python +import MySQLdb + +connection = MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="bookshop", + autocommit=True +) +with get_connection(autocommit=True) as connection: + + with connection.cursor() as cur: + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + cur.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player_list[idx:idx + 114]) +``` + +有关 Python 的完整示例,可参阅: + +- [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md) +- [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md) +- [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md) +- [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md) +- [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md) + +
+ +
+ +## 批量插入 + +如果你需要快速地将大量数据导入 TiDB 集群,最好的方式并不是使用 `INSERT` 语句,这并不是最高效的方法,而且需要你自行处理异常等问题。推荐使用 PingCAP 提供的一系列工具进行数据迁移: + +- 数据导出工具:[Dumpling](/dumpling-overview.md)。可以导出 MySQL 或 TiDB 的数据到本地或 Amazon S3 中。 +- 数据导入工具:[TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md)。可以导入 `Dumpling` 导出的数据、CSV 文件,或者 [Amazon Aurora 生成的 Apache Parquet 文件](/migrate-aurora-to-tidb.md)。同时支持在本地盘或 Amazon S3 云盘读取数据。 +- 数据同步工具:[TiDB Data Migration](/dm/dm-overview.md)。可同步 MySQL、MariaDB、Amazon Aurora 数据库到 TiDB 中。且支持分库分表数据库的迁移。 +- 数据备份恢复工具:[Backup & Restore (BR)](/br/backup-and-restore-overview.md)。相对于 `Dumpling`,BR 更适合**_大数据量_**的场景。 + +## 避免热点 + +在设计表时需要考虑是否存在大量插入行为,若有,需在表设计期间对热点进行规避。请查看[创建表 - 选择主键](/develop/dev-guide-create-table.md#选择主键)部分,并遵从[选择主键时应遵守的规则](/develop/dev-guide-create-table.md#选择主键时应遵守的规则)。 + +更多有关热点问题的处理办法,请参考 [TiDB 热点问题处理](/troubleshoot-hot-spot-issues.md)文档。 + +## 主键为 `AUTO_RANDOM` 表插入数据 + +在插入的表主键为 `AUTO_RANDOM` 时,这时默认情况下,不能指定主键。例如 [bookshop](/develop/dev-guide-bookshop-schema-design.md) 数据库中,可以看到 [users 表](/develop/dev-guide-bookshop-schema-design.md#users-表) 的 `id` 字段含有 `AUTO_RANDOM` 属性。 + +此时,不可使用类似以下 SQL 进行插入: + +```sql +INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); +``` + +将会产生错误: + +``` +ERROR 8216 (HY000): Invalid auto random: Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true. +``` + +这是旨在提示你,不建议在插入时手动指定 `AUTO_RANDOM` 的列。这时,你有两种解决办法处理此错误: + +- (推荐) 插入语句中去除此列,使用 TiDB 帮你初始化的 `AUTO_RANDOM` 值。这样符合 `AUTO_RANDOM` 的语义。 + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `bookshop`.`users` (`balance`, `nickname`) VALUES (0.00, 'nicky'); + ``` + +- 如果你确认一定需要指定此列,那么可以使用 [SET 语句](/sql-statements/sql-statement-set-variable.md)通过更改用户变量的方式,允许在插入时,指定 `AUTO_RANDOM` 的列。 + + {{< copyable "sql" >}} + + ```sql + SET @@allow_auto_random_explicit_insert = true; + INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); + ``` + +## 使用 HTAP + +在 TiDB 中,使用 HTAP 能力无需你在插入数据时进行额外操作。不会有任何额外的插入逻辑,由 TiDB 自动进行数据的一致性保证。你只需要在创建表后,[开启列存副本同步](/develop/dev-guide-create-table.md#使用-htap-能力),就可以直接使用列存副本来加速你的查询。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md new file mode 100644 index 00000000..ae76dfec --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md @@ -0,0 +1,49 @@ +--- +title: 开发者手册概览 +summary: 整体叙述了开发者手册,罗列了开发者手册的大致脉络。 +aliases: ['/zh/tidb/dev/developer-guide-overview'] +--- + +# 开发者手册概览 + +本文是为应用程序开发者所编写的,如果你对 TiDB 的内部原理感兴趣,或希望参与到 TiDB 的开发中来,那么可前往阅读 [TiDB Kernel Development Guide](https://pingcap.github.io/tidb-dev-guide/) 来获取更多 TiDB 的相关信息。 + +本手册将展示如何使用 TiDB 来快速构建一个应用,并且阐述使用 TiDB 期间可能出现的场景以及可能会遇到的问题。因此,在阅读此页面之前,建议你先行阅读 [TiDB 数据库快速上手指南](/quick-start-with-tidb.md)。 + +此外,你还可以通过视频的形式学习免费的 [TiDB SQL 开发在线课程](https://pingcap.com/zh/courses-catalog/back-end-developer/?utm_source=docs-cn-dev-guide)。 + +## TiDB 基础 + +在你开始使用 TiDB 之前,你需要了解一些关于 TiDB 数据库的一些重要工作机制: + +- 阅读 [TiDB 事务概览](/transaction-overview.md)来了解 TiDB 的事务运作方式或查看[为应用开发程序员准备的事务说明](/develop/dev-guide-transaction-overview.md)查看应用开发程序员需要了解的事务部分。 +- 学习免费在线课程 [TiDB 架构与特点](https://learn.pingcap.com/learner/course/600003/?utm_source=docs-cn-dev-guide),了解构建 TiDB 分布式数据库集群的核心组件及其概念。 +- 了解[应用程序与 TiDB 交互的方式](#应用程序与-tidb-交互的方式)。 + +## TiDB 事务机制 + +TiDB 支持分布式事务,而且提供[乐观事务](/optimistic-transaction.md)与[悲观事务](/pessimistic-transaction.md)两种事务模式。TiDB 当前版本中默认采用 **悲观事务** 模式,这让你在 TiDB 事务时可以像使用传统的单体数据库 (如: MySQL) 事务一样。 + +你可以使用 [BEGIN](/sql-statements/sql-statement-begin.md) 开启一个事务,或者使用 `BEGIN PESSIMISTIC` 显式的指定开启一个**悲观事务**,使用 `BEGIN OPTIMISTIC` 显式的指定开启一个**乐观事务**。随后,使用 [COMMIT](/sql-statements/sql-statement-commit.md) 提交事务,或使用 [ROLLBACK](/sql-statements/sql-statement-rollback.md) 回滚事务。 + +TiDB 会为你保证 `BEGIN` 开始到 `COMMIT` 或 `ROLLBACK` 结束间的所有语句的原子性,即在这期间的所有语句全部成功,或者全部失败。用以保证你在应用开发时所需的数据一致性。 + +若你不清楚**乐观事务**是什么,请暂时不要使用它。因为使用**乐观事务**的前提是需要应用程序可以正确的处理 `COMMIT` 语句所返回的[所有错误](/error-codes.md)。如果不确定应用程序如何处理,请直接使用**悲观事务**。 + +## 应用程序与 TiDB 交互的方式 + +TiDB 高度兼容 MySQL 协议,TiDB 支持[大多数 MySQL 的语法及特性](/mysql-compatibility.md),因此大部分的 MySQL 的连接库都与 TiDB 兼容。如果你的应用程序框架或语言无 PingCAP 的官方适配,那么建议你使用 MySQL 的客户端库。同时,也有越来越多的三方数据库主动支持 TiDB 的差异特性。 + +因为 TiDB 兼容 MySQL 协议,且兼容 MySQL 语法,因此大多数支持 MySQL 的 ORM 也兼容 TiDB。 + +## 扩展阅读 + +- [快速开始](/develop/dev-guide-build-cluster-in-cloud.md) +- [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) +- [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) +- [数据库模式设计](/develop/dev-guide-schema-design-overview.md) +- [数据写入](/develop/dev-guide-insert-data.md) +- [数据读取](/develop/dev-guide-get-data-from-single-table.md) +- [事务](/develop/dev-guide-transaction-overview.md) +- [优化 SQL 性能](/develop/dev-guide-optimize-sql-overview.md) +- [示例程序](/develop/dev-guide-sample-application-java-spring-boot.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md new file mode 100644 index 00000000..531143c2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md @@ -0,0 +1,169 @@ +--- +title: Gitpod +--- + + + +# Gitpod + +使用 [Gitpod](https://www.gitpod.io/),只需单击一个按钮或链接即可在浏览器中获得完整的开发环境,并且可以立即编写代码。 + +Gitpod 是一个开源 Kubernetes 应用程序(GitHub 仓库地址 ),适用于可直接编写代码的开发环境,可为云中的每个任务提供全新的自动化开发环境,非常迅速。此外,Gitpod 能够将你的开发环境描述为代码,并直接从你的浏览器或桌面 IDE 启动即时、远程和基于云的开发环境。 + +## 快速开始 + +1. Fork 出 TiDB 应用开发的示例代码仓库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java)。 + +2. 通过浏览器的地址栏,在示例代码仓库的 URL 前加上 `https://gitpod.io/#` 来启动你的 gitpod 工作区。 + + - 例如,`https://gitpod.io/#https://github.com/pingcap-inc/tidb-example-java`。 + + - 支持在 URL 中配置环境变量。例如,`https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java`。 + +3. 使用列出的提供商之一登录并启动工作区,例如,`Github`。 + +## 使用默认的 Gitpod 配置和环境 + +完成[快速开始](#快速开始) 的步骤之后,Gitpod 会需要一段时间来设置你的工作区。 + +以 [Spring Boot Web](/develop/dev-guide-sample-application-java-spring-boot.md) 应用程序为例,通过 URL `https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java` 可以创建一个新工作区。 + +完成后,你将看到如下所示的页面。 + +![playground gitpod workspace init](/media/develop/playground-gitpod-workspace-init.png) + +页面中的这个场景使用了 [TiUP](https://docs.pingcap.com/zh/tidb/stable/tiup-overview) 来搭建一个 TiDB Playground。你可以在终端的左侧查看进度。 + +一旦 TiDB Playground 准备就绪,另一个 `Spring JPA Hibernate` 任务将运行。 你可以在终端的右侧查看进度。 + +完成所有任务后,你可以看到如下所示的页面,并在左侧导航栏的 `REMOTE EXPLORER` 中找到你的端口 `8080` URL(Gitpod 支持基于 URL 的端口转发)。 + +![playground gitpod workspace ready](/media/develop/playground-gitpod-workspace-ready.png) + +你可以按照[该指南](/develop/dev-guide-sample-application-java-spring-boot.md#第-6-步http-请求)测试 API。注意请将 URL `http://localhost:8080` 替换为你在 `REMOTE EXPLORER` 中找到的那个。 + +## 使用自定义的 Gitpod 配置和 Docker 镜像 + +### 自定义 Gitpod 配置 + +在项目的根目录中,参考[示例 .gitpod.yml](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.yml),创建一个 `.gitpod.yml` 文件用于配置 Gitpod 工作空间。 + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +# image: +# file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 自定义 Gitpod Docker 镜像 + +默认情况下,Gitpod 使用名为 Workspace-Full 的标准 Docker 镜像作为工作空间的基础。 基于此默认镜像启动的工作区预装了 Docker、Go、Java、Node.js、C/C++、Python、Ruby、Rust、PHP 以及 Homebrew、Tailscale、Nginx 等工具。 + +你可以提供公共 Docker 镜像或 Dockerfile。 并为你的项目安装所需的任何依赖项。 + +这是一个 Dockerfile 示例:[示例 .gitpod.Dockerfile](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.Dockerfile) + +```dockerfile +FROM gitpod/workspace-java-17 + +RUN sudo apt install mysql-client -y +RUN curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh +``` + +然后需要更新`.gitpod.yml`: + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +image: + # 在这里导入你的 Dockerfile + file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 应用更改 + +完成对 `.gitpod.yml` 文件配置后,请保证最新的代码已在你对应的 GitHub 代码仓库中可用。 + +访问 `https://gitpod.io/#` 以建立新的 Gitpod 工作区,新工作区会应用最新的代码。 + +访问 `https://gitpod.io/workspaces` 以获取所有建立的工作区。 + +## 总结 + +Gitpod 提供了完整的、自动化的、预配置的云原生开发环境。无需本地配置,你可以直接在浏览器中开发、运行、测试代码。 + +![playground gitpod summary](/media/develop/playground-gitpod-summary.png) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md new file mode 100644 index 00000000..dada8dfd --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md @@ -0,0 +1,233 @@ +--- +title: 预处理语句 +summary: 介绍 TiDB 的预处理语句功能。 +aliases: ['/zh/tidb/dev/prepared-statement'] +--- + +# 预处理语句 + +[预处理语句](/sql-statements/sql-statement-prepare.md)是一种将多个仅有参数不同的 SQL 语句进行模板化的语句,它让 SQL 语句与参数进行了分离。可以用它提升 SQL 语句的: + +- 安全性:因为参数和语句已经分离,所以避免了 [SQL 注入攻击](https://en.wikipedia.org/wiki/SQL_injection)的风险。 +- 性能:因为语句在 TiDB 端被预先解析,后续执行只需要传递参数,节省了完整 SQL 解析、拼接 SQL 语句字符串以及网络传输的代价。 + +在大部分的应用程序中,SQL 语句是可以被枚举的,可以使用有限个 SQL 语句来完成整个应用程序的数据查询,所以使用预处理语句是最佳实践之一。 + +## SQL 语法 + +本节将介绍创建、使用及删除预处理语句的 SQL 语法。 + +### 创建预处理语句 + +```sql +PREPARE {prepared_statement_name} FROM '{prepared_statement_sql}'; +``` + +| 参数 | 描述 | +| :-------------------------: | :------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称 | +| `{prepared_statement_sql}` | 预处理语句 SQL,以英文半角问号做占位符 | + +你可查看 [PREPARE 语句](/sql-statements/sql-statement-prepare.md) 获得更多信息。 + +### 使用预处理语句 + +预处理语句仅可使用用户变量作为参数,因此,需先使用 [SET 语句](/sql-statements/sql-statement-set-variable.md) 设置变量后,供 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 调用预处理语句。 + +```sql +SET @{parameter_name} = {parameter_value}; +EXECUTE {prepared_statement_name} USING @{parameter_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{parameter_name}` | 用户参数名 | +| `{parameter_value}` | 用户参数值 | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 获得更多信息。 + +### 删除预处理语句 + +```sql +DEALLOCATE PREPARE {prepared_statement_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [DEALLOCATE 语句](/sql-statements/sql-statement-deallocate.md) 获得更多信息。 + +## 例子 + +本节以使用预处理语句,完成查询数据和插入数据两个场景的示例。 + +### 查询示例 + +例如,需要查询 [Bookshop 应用](/develop/dev-guide-bookshop-schema-design.md#books-表) 中,`id` 为 1 的书籍信息。 + + + +
+ +使用 SQL 查询示例: + +```sql +PREPARE `books_query` FROM 'SELECT * FROM `books` WHERE `id` = ?'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.01 sec) +``` + +```sql +SET @id = 1; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_query` USING @id; +``` + +运行结果为: + +``` ++---------+---------------------------------+--------+---------------------+-------+--------+ +| id | title | type | published_at | stock | price | ++---------+---------------------------------+--------+---------------------+-------+--------+ +| 1 | The Adventures of Pierce Wehner | Comics | 1904-06-06 20:46:25 | 586 | 411.66 | ++---------+---------------------------------+--------+---------------------+-------+--------+ +1 row in set (0.05 sec) +``` + +
+ +
+ +使用 Java 查询示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM `books` WHERE `id` = ?"); + preparedStatement.setLong(1, 1); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.println("No books in the table with id 1"); + } else { + // got book's info, which id is 1 + System.out.println(res.getLong("id")); + System.out.println(res.getString("title")); + System.out.println(res.getString("type")); + } +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +
+ +
+ +### 插入示例 + +还是使用 [books 表](/develop/dev-guide-bookshop-schema-design.md#books-表) 为例,需要插入一个 `title` 为 `TiDB Developer Guide`, `type` 为 `Science & Technology`, `stock` 为 `100`, `price` 为 `0.0`, `published_at` 为 `插入的当前时间` 的书籍信息。需要注意的是,`books` 表的主键包含 `AUTO_RANDOM` 属性,无需指定它。如果你对插入数据还不了解,可以在[插入数据](/develop/dev-guide-insert-data.md)一节了解更多数据插入的相关信息。 + + + +
+ +使用 SQL 插入数据示例如下: + +```sql +PREPARE `books_insert` FROM 'INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.03 sec) +``` + +```sql +SET @title = 'TiDB Developer Guide'; +SET @type = 'Science & Technology'; +SET @stock = 100; +SET @price = 0.0; +SET @published_at = NOW(); +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_insert` USING @title, @type, @stock, @price, @published_at; +``` + +运行结果为: + +``` +Query OK, 1 row affected (0.03 sec) +``` + +
+ +
+ +使用 Java 插入数据示例如下: + +```java +try (Connection connection = ds.getConnection()) { + String sql = "INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);"; + PreparedStatement preparedStatement = connection.prepareStatement(sql); + + preparedStatement.setString(1, "TiDB Developer Guide"); + preparedStatement.setString(2, "Science & Technology"); + preparedStatement.setInt(3, 100); + preparedStatement.setBigDecimal(4, new BigDecimal("0.0")); + preparedStatement.setTimestamp(5, new Timestamp(Calendar.getInstance().getTimeInMillis())); + + preparedStatement.executeUpdate(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +可以看到,JDBC 帮你管控了预处理语句的生命周期,而无需你在应用程序里手动使用预处理语句的创建、使用、删除等。但值得注意的是,因为 TiDB 兼容 MySQL 协议,在客户端使用 MySQL JDBC Driver 的过程中,其默认配置并非开启 **_服务端_** 的预处理语句选项,而是使用客户端的预处理语句。你需要关注以下配置项,来获得在 JDBC 下 TiDB 服务端预处理语句的支持,及在你的使用场景下的最佳配置: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :---------------------: | :-----------------------------------: | :--------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +你也可以查看[插入行](/develop/dev-guide-insert-data.md#插入行)一章,来查看是否需要在插入数据场景下更改其他 JDBC 的参数。 + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md new file mode 100644 index 00000000..49f8d1c7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md @@ -0,0 +1,291 @@ +--- +title: TiDB 和 GORM 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 GORM 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 GORM 的简单 CRUD 应用程序 + +[GORM](https://gorm.io/) 为当前比较流行的 Golang 开源 ORM 之一。 + +本文档将展示如何使用 TiDB 和 GORM 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +此处将以 GORM v1.23.5 版本进行说明。 + +封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "gorm.io/gorm" +) + +// TiDBGormBegin start a TiDB and Gorm transaction as a block. If no error is returned, the transaction will be committed. Otherwise, the transaction will be rolled back. +func TiDBGormBegin(db *gorm.DB, pessimistic bool, fc func(tx *gorm.DB) error) (err error) { + session := db.Session(&gorm.Session{}) + if session.Error != nil { + return session.Error + } + + if pessimistic { + session = session.Exec("set @@tidb_txn_mode=pessimistic") + } else { + session = session.Exec("set @@tidb_txn_mode=optimistic") + } + + if session.Error != nil { + return session.Error + } + return session.Transaction(fc) +} +``` + +进入目录 `gorm`: + +```shell +cd gorm +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── go.mod +├── go.sum +└── gorm.go +``` + +其中,`gorm.go` 是 `gorm` 这个示例程序的主体。使用 gorm 时,相较于 go-sql-driver/mysql,gorm 屏蔽了创建数据库连接时,不同数据库差异的细节,其还封装了大量的操作,如 AutoMigrate、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 是数据结构体,为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。相较于 go-sql-driver/mysql,gorm 的 `Player` 数据结构体为了给 gorm 提供更多的信息,加入了形如 `gorm:"primaryKey;type:VARCHAR(36);column:id"` 的注解,用来指示映射关系。 + +```go + +package main + +import ( + "fmt" + "math/rand" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" + + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/logger" +) + +type Player struct { + ID string `gorm:"primaryKey;type:VARCHAR(36);column:id"` + Coins int `gorm:"column:coins"` + Goods int `gorm:"column:goods"` +} + +func (*Player) TableName() string { + return "player" +} + +func main() { + // 1. Configure the example database connection. + db := createDB() + + // AutoMigrate for player table + db.AutoMigrate(&Player{}) + + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) +} + +func tradeExample(db *gorm.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := &Player{ID: "1", Coins: 100} + player2 := &Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player1) + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player2) + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func simpleExample(db *gorm.DB) { + // Create a player, who has a coin and a goods. + if err := db.Clauses(clause.OnConflict{UpdateAll: true}). + Create(&Player{ID: "test", Coins: 1, Goods: 1}).Error; err != nil { + panic(err) + } + + // Get a player. + var testPlayer Player + db.Find(&testPlayer, "id = ?", "test") + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + bulkInsertPlayers := make([]Player, 1919, 1919) + total, batch := 1919, 114 + for i := 0; i < total; i++ { + bulkInsertPlayers[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + if err := db.Session(&gorm.Session{Logger: db.Logger.LogMode(logger.Error)}). + CreateInBatches(bulkInsertPlayers, batch).Error; err != nil { + panic(err) + } + + // Count players amount. + playersCount := int64(0) + db.Model(&Player{}).Count(&playersCount) + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers := make([]Player, 3, 3) + db.Limit(3).Find(&threePlayers) + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func createDB() *gorm.DB { + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + panic(err) + } + + return db +} + +func buyGoods(db *gorm.DB, sellID, buyID string, amount, price int) error { + return util.TiDBGormBegin(db, true, func(tx *gorm.DB) error { + var sellPlayer, buyPlayer Player + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&sellPlayer, "id = ?", sellID).Error; err != nil { + return err + } + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&buyPlayer, "id = ?", buyID).Error; err != nil { + return err + } + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateSQL := "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + if err := tx.Exec(updateSQL, -amount, price, sellID).Error; err != nil { + return err + } + + if err := tx.Exec(updateSQL, amount, -price, buyID).Error; err != nil { + return err + } + + fmt.Println("\n[buyGoods]:\n 'trade success'") + return nil + }) +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `gorm.go` 内 `dsn` 参数值: + +```go +dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `mysql.RegisterTLSConfig` 和 `dsn` 更改为: + +```go +mysql.RegisterTLSConfig("register-tidb-tls", &tls.Config { + MinVersion: tls.VersionTLS12, + ServerName: "xxx.tidbcloud.com", +}) + +dsn := "2aEp24QWEDLqRFs.root:123456@tcp(xxx.tidbcloud.com:4000)/test?charset=utf8mb4&tls=register-tidb-tls" +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `go build -o bin/gorm-example` +make run # this command executes `./bin/gorm-example` +``` + +或者你也可以直接使用原生的命令: + +```shell +go build -o bin/gorm-example +./bin/gorm-example +``` + +再或者直接运行 `make all` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[GORM 预期输出](https://github.com/pingcap-inc/tidb-example-golang/blob/main/Expected-Output.md#gorm) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md new file mode 100644 index 00000000..2adddfc7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md @@ -0,0 +1,537 @@ +--- +title: TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-golang'] +--- + + + + +# TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +进入目录 `sqldriver`: + +```shell +cd sqldriver +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── dao.go +├── go.mod +├── go.sum +├── sql +│   └── dbinit.sql +├── sql.go +└── sqldriver.go +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`sqldriver.go` 是 `sqldriver` 这个示例程序的主体。与 GORM 对比,go-sql-driver/mysql 的实现方式并非最优体验。你需要自行编写错误处理逻辑,手动关闭 `*sql.Rows`,并且代码无法简单复用。这会使你的代码有些冗余。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `db, err := sql.Open("mysql", dsn)`,以此连接到 TiDB。并在其后,调用 `dao.go` 中的一系列方法,用来管理数据对象,进行增删改查等操作。 + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/go-sql-driver/mysql" +) + +func main() { + // 1. Configure the example database connection. + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + openDB("mysql", dsn, func(db *sql.DB) { + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) + }) +} + +func simpleExample(db *sql.DB) { + // Create a player, who has a coin and a goods. + err := createPlayer(db, Player{ID: "test", Coins: 1, Goods: 1}) + if err != nil { + panic(err) + } + + // Get a player. + testPlayer, err := getPlayer(db, "test") + if err != nil { + panic(err) + } + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + + err = bulkInsertPlayers(db, randomPlayers(1919), 114) + if err != nil { + panic(err) + } + + // Count players amount. + playersCount, err := getCount(db) + if err != nil { + panic(err) + } + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers, err := getPlayerByLimit(db, 3) + if err != nil { + panic(err) + } + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func tradeExample(db *sql.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := Player{ID: "1", Coins: 100} + player2 := Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + if err := createPlayer(db, player1); err != nil { + panic(err) + } + if err := createPlayer(db, player2); err != nil { + panic(err) + } + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func openDB(driverName, dataSourceName string, runnable func(db *sql.DB)) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + defer db.Close() + + runnable(db) +} +``` + +随后,封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "context" + "database/sql" +) + +type TiDBSqlTx struct { + *sql.Tx + conn *sql.Conn + pessimistic bool +} + +func TiDBSqlBegin(db *sql.DB, pessimistic bool) (*TiDBSqlTx, error) { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return nil, err + } + if pessimistic { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "pessimistic") + } else { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "optimistic") + } + if err != nil { + return nil, err + } + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &TiDBSqlTx{ + conn: conn, + Tx: tx, + pessimistic: pessimistic, + }, nil +} + +func (tx *TiDBSqlTx) Commit() error { + defer tx.conn.Close() + return tx.Tx.Commit() +} + +func (tx *TiDBSqlTx) Rollback() error { + defer tx.conn.Close() + return tx.Tx.Rollback() +} +``` + +在 `dao.go` 中定义一系列数据的操作方法,用来对提供数据的写入能力。这也是本例子中核心部分。 + +```go +package main + +import ( + "database/sql" + "fmt" + "math/rand" + "strings" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" +) + +type Player struct { + ID string + Coins int + Goods int +} + +// createPlayer create a player +func createPlayer(db *sql.DB, player Player) error { + _, err := db.Exec(CreatePlayerSQL, player.ID, player.Coins, player.Goods) + return err +} + +// getPlayer get a player +func getPlayer(db *sql.DB, id string) (Player, error) { + var player Player + + rows, err := db.Query(GetPlayerSQL, id) + if err != nil { + return player, err + } + defer rows.Close() + + if rows.Next() { + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + return player, nil + } else { + return player, err + } + } + + return player, fmt.Errorf("can not found player") +} + +// getPlayerByLimit get players by limit +func getPlayerByLimit(db *sql.DB, limit int) ([]Player, error) { + var players []Player + + rows, err := db.Query(GetPlayerByLimitSQL, limit) + if err != nil { + return players, err + } + defer rows.Close() + + for rows.Next() { + player := Player{} + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + players = append(players, player) + } else { + return players, err + } + } + + return players, nil +} + +// bulk-insert players +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func getCount(db *sql.DB) (int, error) { + count := 0 + + rows, err := db.Query(GetCountSQL) + if err != nil { + return count, err + } + + defer rows.Close() + + if rows.Next() { + if err := rows.Scan(&count); err != nil { + return count, err + } + } + + return count, nil +} + +func buyGoods(db *sql.DB, sellID, buyID string, amount, price int) error { + var sellPlayer, buyPlayer Player + + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + buyExec := func() error { + stmt, err := tx.Prepare(GetPlayerWithLockSQL) + if err != nil { + return err + } + defer stmt.Close() + + sellRows, err := stmt.Query(sellID) + if err != nil { + return err + } + defer sellRows.Close() + + if sellRows.Next() { + if err := sellRows.Scan(&sellPlayer.ID, &sellPlayer.Coins, &sellPlayer.Goods); err != nil { + return err + } + } + sellRows.Close() + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + buyRows, err := stmt.Query(buyID) + if err != nil { + return err + } + defer buyRows.Close() + + if buyRows.Next() { + if err := buyRows.Scan(&buyPlayer.ID, &buyPlayer.Coins, &buyPlayer.Goods); err != nil { + return err + } + } + buyRows.Close() + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateStmt, err := tx.Prepare(UpdatePlayerSQL) + if err != nil { + return err + } + defer updateStmt.Close() + + if _, err := updateStmt.Exec(-amount, price, sellID); err != nil { + return err + } + + if _, err := updateStmt.Exec(amount, -price, buyID); err != nil { + return err + } + + return nil + } + + err = buyExec() + if err == nil { + fmt.Println("\n[buyGoods]:\n 'trade success'") + tx.Commit() + } else { + tx.Rollback() + } + + return err +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return CreatePlayerSQL + strings.Repeat(",(?,?,?)", amount-1) +} + +func randomPlayers(amount int) []Player { + players := make([]Player, amount, amount) + for i := 0; i < amount; i++ { + players[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + return players +} +``` + +`sql.go` 中存放了 SQL 语句的常量。 + +```go +package main + +const ( + CreatePlayerSQL = "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + GetPlayerSQL = "SELECT id, coins, goods FROM player WHERE id = ?" + GetCountSQL = "SELECT count(*) FROM player" + GetPlayerWithLockSQL = GetPlayerSQL + " FOR UPDATE" + UpdatePlayerSQL = "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + GetPlayerByLimitSQL = "SELECT id, coins, goods FROM player LIMIT ?" +) +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 go-sql-driver/mysql 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `sqldriver` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 Hibernate 的简单 CRUD 应用程序 + +[Hibernate](https://hibernate.org/) 是当前比较流行的开源 Java 应用持久层框架,且 Hibernate 在版本 `6.0.0.Beta2` 及以后支持了 TiDB 方言,完美适配了 TiDB 的特性。 + +本文档将展示如何使用 TiDB 和 Java 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [Hibernate](https://hibernate.org/orm/) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +此处将以 `6.0.0.Beta2` 版本进行说明。 + +进入目录 `plain-java-hibernate`: + +```shell +cd plain-java-hibernate +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-hibernate.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── HibernateExample.java + └── resources + └── hibernate.cfg.xml +``` + +其中,`hibernate.cfg.xml` 为 Hibernate 配置文件,定义了: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +`HibernateExample.java` 是 `plain-java-hibernate` 这个示例程序的主体。使用 Hibernate 时,相较于 JDBC,这里仅需写入配置文件地址,Hibernate 屏蔽了创建数据库连接时,不同数据库差异的细节。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。其中定义了一系列数据的操作方法,用来提供数据的写入能力。相较于 JDBC,Hibernate 封装了大量的操作,如对象映射、基本对象的 CRUD 等,极大地简化了代码量。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。相较于 JDBC,Hibernate 的 `PlayerBean` 实体类为了给 Hibernate 提供更多的信息,加入了注解,用来指示映射关系。 + +```java +package com.pingcap; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; +import org.hibernate.JDBCException; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; +import org.hibernate.cfg.Configuration; +import org.hibernate.query.NativeQuery; +import org.hibernate.query.Query; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +@Entity +@Table(name = "player_hibernate") +class PlayerBean { + @Id + private String id; + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } +} + +/** + * Main class for the basic Hibernate example. + **/ +public class HibernateExample +{ + public static class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we don't have to duplicate it in + // various places. + public Object runTransaction(Session session, Function fn) { + Object resultObject = null; + + Transaction txn = session.beginTransaction(); + try { + resultObject = fn.apply(session); + txn.commit(); + System.out.println("APP: COMMIT;"); + } catch (JDBCException e) { + System.out.println("APP: ROLLBACK BY JDBC ERROR;"); + txn.rollback(); + } catch (NotEnoughException e) { + System.out.printf("APP: ROLLBACK BY LOGIC; %s", e.getMessage()); + txn.rollback(); + } + return resultObject; + } + + public Function createPlayers(List players) throws JDBCException { + return session -> { + Integer addedPlayerAmount = 0; + for (PlayerBean player: players) { + session.persist(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) throws JDBCException { + return session -> { + PlayerBean sellPlayer = session.get(PlayerBean.class, sellId); + PlayerBean buyPlayer = session.get(PlayerBean.class, buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + session.persist(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + session.persist(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return 0; + }; + } + + public Function getPlayerByID(String id) throws JDBCException { + return session -> session.get(PlayerBean.class, id); + } + + public Function printPlayers(Integer limit) throws JDBCException { + return session -> { + NativeQuery limitQuery = session.createNativeQuery("SELECT * FROM player_hibernate LIMIT :limit", PlayerBean.class); + limitQuery.setParameter("limit", limit); + List players = limitQuery.getResultList(); + + for (PlayerBean player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() throws JDBCException { + return session -> { + Query countQuery = session.createQuery("SELECT count(player_hibernate) FROM PlayerBean player_hibernate", Long.class); + return countQuery.getSingleResult(); + }; + } + } + + public static void main(String[] args) { + // 1. Create a SessionFactory based on our hibernate.cfg.xml configuration + // file, which defines how to connect to the database. + SessionFactory sessionFactory + = new Configuration() + .configure("hibernate.cfg.xml") + .addAnnotatedClass(PlayerBean.class) + .buildSessionFactory(); + + try (Session session = sessionFactory.openSession()) { + // 2. And then, create DAO to manager your data. + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(session, playerDAO.createPlayers(Collections.singletonList( + new PlayerBean("test", 1, 1)))); + + // Get a player. + PlayerBean testPlayer = (PlayerBean)playerDAO.runTransaction(session, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Long count = (Long)playerDAO.runTransaction(session, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(session, playerDAO.printPlayers(3)); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } finally { + sessionFactory.close(); + } + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `hibernate.cfg.xml` 内关于 `hibernate.connection.url`、`hibernate.connection.username`、`hibernate.connection.password` 的参数: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件更改为: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + 2aEp24QWEDLqRFs.root + 123456 + false + + + create-drop + + + true + true + + +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mvn clean package +java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[Hibernate 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-hibernate) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md new file mode 100644 index 00000000..250943e0 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md @@ -0,0 +1,576 @@ +--- +title: TiDB 和 JDBC 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 JDBC 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 JDBC 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 JDBC 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +进入目录 `plain-java-jdbc`: + +```shell +cd plain-java-jdbc +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-jdbc.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── JDBCExample.java + └── resources + └── dbinit.sql +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`JDBCExample.java` 是 `plain-java-jdbc` 这个示例程序的主体。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `MysqlDataSource`,以此连接到 TiDB。并在其后,初始化 `PlayerDAO`,用来管理数据对象,进行增删改查等操作。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用来对提供数据的写入能力。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap; + +import com.mysql.cj.jdbc.MysqlDataSource; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + +/** + * Main class for the basic JDBC example. + **/ +public class JDBCExample +{ + public static class PlayerBean { + private String id; + private Integer coins; + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } + } + + /** + * Data access object used by 'ExampleDataSource'. + * Example for CURD and bulk insert. + */ + public static class PlayerDAO { + private final MysqlDataSource ds; + private final Random rand = new Random(); + + PlayerDAO(MysqlDataSource ds) { + this.ds = ds; + } + + /** + * Create players by passing in a List of PlayerBean. + * + * @param players Will create players list + * @return The number of create accounts + */ + public int createPlayers(List players){ + int rows = 0; + + Connection connection = null; + PreparedStatement preparedStatement = null; + try { + connection = ds.getConnection(); + preparedStatement = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)"); + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + + return -1; + } + + try { + for (PlayerBean player : players) { + preparedStatement.setString(1, player.getId()); + preparedStatement.setInt(2, player.getCoins()); + preparedStatement.setInt(3, player.getGoods()); + + preparedStatement.execute(); + rows += preparedStatement.getUpdateCount(); + } + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + } finally { + try { + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + System.out.printf("\n[createPlayers]:\n '%s'\n", preparedStatement); + return rows; + } + + /** + * Buy goods and transfer funds between one player and another in one transaction. + * @param sellId Sell player id. + * @param buyId Buy player id. + * @param amount Goods amount, if sell player has not enough goods, the trade will break. + * @param price Price should pay, if buy player has not enough coins, the trade will break. + * + * @return The number of effected players. + */ + public int buyGoods(String sellId, String buyId, Integer amount, Integer price) { + int effectPlayers = 0; + + Connection connection = null; + try { + connection = ds.getConnection(); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + return effectPlayers; + } + + try { + connection.setAutoCommit(false); + + PreparedStatement playerQuery = connection.prepareStatement("SELECT * FROM player WHERE id=? OR id=? FOR UPDATE"); + playerQuery.setString(1, sellId); + playerQuery.setString(2, buyId); + playerQuery.execute(); + + PlayerBean sellPlayer = null; + PlayerBean buyPlayer = null; + + ResultSet playerQueryResultSet = playerQuery.getResultSet(); + while (playerQueryResultSet.next()) { + PlayerBean player = new PlayerBean( + playerQueryResultSet.getString("id"), + playerQueryResultSet.getInt("coins"), + playerQueryResultSet.getInt("goods") + ); + + System.out.println("\n[buyGoods]:\n 'check goods and coins enough'"); + System.out.println(player); + + if (sellId.equals(player.getId())) { + sellPlayer = player; + } else { + buyPlayer = player; + } + } + + if (sellPlayer == null || buyPlayer == null) { + throw new SQLException("player not exist."); + } + + if (sellPlayer.getGoods().compareTo(amount) < 0) { + throw new SQLException(String.format("sell player %s goods not enough.", sellId)); + } + + if (buyPlayer.getCoins().compareTo(price) < 0) { + throw new SQLException(String.format("buy player %s coins not enough.", buyId)); + } + + PreparedStatement transfer = connection.prepareStatement("UPDATE player set goods = goods + ?, coins = coins + ? WHERE id=?"); + transfer.setInt(1, -amount); + transfer.setInt(2, price); + transfer.setString(3, sellId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + transfer.setInt(1, amount); + transfer.setInt(2, -price); + transfer.setString(3, buyId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + connection.commit(); + + System.out.println("\n[buyGoods]:\n 'trade success'"); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + + try { + System.out.println("[buyGoods] Rollback"); + + connection.rollback(); + } catch (SQLException ex) { + // do nothing + } + } finally { + try { + connection.close(); + } catch (SQLException e) { + // do nothing + } + } + + return effectPlayers; + } + + /** + * Get the player info by id. + * + * @param id Player id. + * @return The player of this id. + */ + public PlayerBean getPlayer(String id) { + PlayerBean player = null; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player WHERE id = ?"); + preparedStatement.setString(1, id); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.printf("No players in the table with id %s", id); + } else { + player = new PlayerBean(res.getString("id"), res.getInt("coins"), res.getInt("goods")); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.getPlayer ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return player; + } + + /** + * Insert randomized account data (id, coins, goods) using the JDBC fast path for + * bulk inserts. The fastest way to get data into TiDB is using the + * TiDB Lightning(https://docs.pingcap.com/tidb/stable/tidb-lightning-overview). + * However, if you must bulk insert from the application using INSERT SQL, the best + * option is the method shown here. It will require the following: + * + * Add `rewriteBatchedStatements=true` to your JDBC connection settings. + * Setting rewriteBatchedStatements to true now causes CallableStatements + * with batched arguments to be re-written in the form "CALL (...); CALL (...); ..." + * to send the batch in as few client/server round trips as possible. + * https://dev.mysql.com/doc/relnotes/connector-j/5.1/en/news-5-1-3.html + * + * You can see the `rewriteBatchedStatements` param effect logic at + * implement function: `com.mysql.cj.jdbc.StatementImpl.executeBatchUsingMultiQueries` + * + * @param total Add players amount. + * @param batchSize Bulk insert size for per batch. + * + * @return The number of new accounts inserted. + */ + public int bulkInsertRandomPlayers(Integer total, Integer batchSize) { + int totalNewPlayers = 0; + + try (Connection connection = ds.getConnection()) { + // We're managing the commit lifecycle ourselves, so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) { + for (int i=0; i<=(total/batchSize);i++) { + for (int j=0; j %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewPlayers; + } + + + /** + * Print a subset of players from the data store by limit. + * + * @param limit Print max size. + */ + public void printPlayers(Integer limit) { + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player LIMIT ?"); + preparedStatement.setInt(1, limit); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + while (!res.next()) { + PlayerBean player = new PlayerBean(res.getString("id"), + res.getInt("coins"), res.getInt("goods")); + System.out.println("\n[printPlayers]:\n" + player); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.printPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + + /** + * Count players from the data store. + * + * @return All players count + */ + public int countPlayers() { + int count = 0; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT count(*) FROM player"); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(res.next()) { + count = res.getInt(1); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.countPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return count; + } + } + + public static void main(String[] args) { + // 1. Configure the example database connection. + + // 1.1 Create a mysql data source instance. + MysqlDataSource mysqlDataSource = new MysqlDataSource(); + + // 1.2 Set server name, port, database name, username and password. + mysqlDataSource.setServerName("localhost"); + mysqlDataSource.setPortNumber(4000); + mysqlDataSource.setDatabaseName("test"); + mysqlDataSource.setUser("root"); + mysqlDataSource.setPassword(""); + + // Or you can use jdbc string instead. + // mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/test?user={user}&password={password}"); + + // 2. And then, create DAO to manager your data. + PlayerDAO dao = new PlayerDAO(mysqlDataSource); + + // 3. Run some simple examples. + + // Create a player, who has a coin and a goods.. + dao.createPlayers(Collections.singletonList(new PlayerBean("test", 1, 1))); + + // Get a player. + PlayerBean testPlayer = dao.getPlayer("test"); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + int addedCount = dao.bulkInsertRandomPlayers(1919, 114); + System.out.printf("PlayerDAO.bulkInsertRandomPlayers:\n => %d total inserted players\n", addedCount); + + // Count players amount. + int count = dao.countPlayers(); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + dao.printPlayers(3); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + addedCount = dao.createPlayers(Arrays.asList(player1, player2)); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + int updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 10, 500); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 2, 100); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:JDBC 表初始化 + +使用 JDBC 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-jdbc` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 MyBatis 的简单 CRUD 应用程序 + +[Mybatis](https://mybatis.org/mybatis-3/index.html) 是当前比较流行的开源 Java 应用持久层框架。 + +本文档将展示如何使用 TiDB 和 MyBatis 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [MyBatis](https://mybatis.org/mybatis-3/index.html) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +本文将以 Maven 插件的方式使用 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 生成部分持久层代码。 + +进入目录 `plain-java-mybatis`: + +```shell +cd plain-java-mybatis +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── pom.xml +└── src + └── main + ├── java + │   └── com + │   └── pingcap + │   ├── MybatisExample.java + │   ├── dao + │   │   └── PlayerDAO.java + │   └── model + │   ├── Player.java + │   ├── PlayerMapper.java + │   └── PlayerMapperEx.java + └── resources + ├── dbinit.sql + ├── log4j.properties + ├── mapper + │   ├── PlayerMapper.xml + │   └── PlayerMapperEx.xml + ├── mybatis-config.xml + └── mybatis-generator.xml +``` + +其中,自动生成的文件有: + +- `src/main/java/com/pingcap/model/Player.java`:Player 实体类文件 +- `src/main/java/com/pingcap/model/PlayerMapper.java`:Player Mapper 的接口文件 +- `src/main/resources/mapper/PlayerMapper.xml`:Player Mapper 的 XML 映射,它是 MyBatis 用于生成 Player Mapper 接口的实现类的配置 + +这些文件的生成策略被写在了 `mybatis-generator.xml` 配置文件内,它是 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 的配置文件,下面配置文件中添加了使用方法的说明: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +`mybatis-generator.xml` 在 `pom.xml` 中,以 `mybatis-generator-maven-plugin` 插件配置的方式被引入: + +```xml + + org.mybatis.generator + mybatis-generator-maven-plugin + 1.4.1 + + src/main/resources/mybatis-generator.xml + true + true + + + + + + mysql + mysql-connector-java + 5.1.49 + + + +``` + +在 Maven 插件内引入后,可删除旧的生成文件后,通过命令 `mvn mybatis-generate` 生成新的文件。或者你也可以使用已经编写好的 `make` 命令,通过 `make gen` 来同时删除旧文件,并生成新文件。 + +> **注意:** +> +> `mybatis-generator.xml` 中的属性 `configuration.overwrite` 仅可控制新生成的 Java 代码文件使用覆盖方式被写入,但 XML 映射文件仍会以追加方式写入。因此,推荐在 MyBatis Generator 生成新的文件前,先删除掉旧的文件。 + +`Player.java` 是使用 MyBatis Generator 生成出的数据实体类文件,为数据库表在程序内的映射。`Player` 类的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap.model; + +public class Player { + private String id; + + private Integer coins; + + private Integer goods; + + public Player(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public Player() { + super(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +`PlayerMapper.java` 是使用 MyBatis Generator 生成出的映射接口文件,它仅规定了接口,接口的实现类是由 MyBatis 来通过 XML 或注解自动生成的: + +```java +package com.pingcap.model; + +import com.pingcap.model.Player; + +public interface PlayerMapper { + int deleteByPrimaryKey(String id); + + int insert(Player row); + + int insertSelective(Player row); + + Player selectByPrimaryKey(String id); + + int updateByPrimaryKeySelective(Player row); + + int updateByPrimaryKey(Player row); +} +``` + +`PlayerMapper.xml` 是使用 MyBatis Generator 生成出的映射 XML 文件,MyBatis 将使用这个文件自动生成 `PlayerMapper` 接口的实现类: + +```xml + + + + + + + + + + + + id, coins, goods + + + + delete from player + where id = #{id,jdbcType=VARCHAR} + + + insert into player (id, coins, goods + ) + values (#{id,jdbcType=VARCHAR}, #{coins,jdbcType=INTEGER}, #{goods,jdbcType=INTEGER} + ) + + + insert into player + + + id, + + + coins, + + + goods, + + + + + #{id,jdbcType=VARCHAR}, + + + #{coins,jdbcType=INTEGER}, + + + #{goods,jdbcType=INTEGER}, + + + + + update player + + + coins = #{coins,jdbcType=INTEGER}, + + + goods = #{goods,jdbcType=INTEGER}, + + + where id = #{id,jdbcType=VARCHAR} + + + update player + set coins = #{coins,jdbcType=INTEGER}, + goods = #{goods,jdbcType=INTEGER} + where id = #{id,jdbcType=VARCHAR} + + +``` + +由于 MyBatis Generator 需要逆向生成源码,因此,数据库中需先行有此表结构,可使用 `dbinit.sql` 生成表结构: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +额外拆分接口 `PlayerMapperEx` 继承 `PlayerMapper`,并且编写与之匹配的 `PlayerMapperEx.xml`。避免直接更改 `PlayerMapper.java` 和 `PlayerMapper.xml`。这是为了规避 MyBatis Generator 的反复生成,影响到自行编写的代码。 + +在 `PlayerMapperEx.java` 中定义自行增加的接口: + +```java +package com.pingcap.model; + +import java.util.List; + +public interface PlayerMapperEx extends PlayerMapper { + Player selectByPrimaryKeyWithLock(String id); + + List selectByLimit(Integer limit); + + Integer count(); +} +``` + +在 `PlayerMapperEx.xml` 中定义映射规则: + +```xml + + + + + + + + + + + + id, coins, goods + + + + + + + + + +``` + +`PlayerDAO.java` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用于数据的写入。 + +```java +package com.pingcap.dao; + +import com.pingcap.model.Player; +import com.pingcap.model.PlayerMapperEx; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; + +import java.util.List; +import java.util.function.Function; + +public class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic, so we don't have to duplicate it in + // various places. + public Object runTransaction(SqlSessionFactory sessionFactory, Function fn) { + Object resultObject = null; + SqlSession session = null; + + try { + // open a session with autoCommit is false + session = sessionFactory.openSession(false); + + // get player mapper + PlayerMapperEx playerMapperEx = session.getMapper(PlayerMapperEx.class); + + resultObject = fn.apply(playerMapperEx); + session.commit(); + System.out.println("APP: COMMIT;"); + } catch (Exception e) { + if (e instanceof NotEnoughException) { + System.out.printf("APP: ROLLBACK BY LOGIC; \n%s\n", e.getMessage()); + } else { + System.out.printf("APP: ROLLBACK BY ERROR; \n%s\n", e.getMessage()); + } + + if (session != null) { + session.rollback(); + } + } finally { + if (session != null) { + session.close(); + } + } + + return resultObject; + } + + public Function createPlayers(List players) { + return playerMapperEx -> { + Integer addedPlayerAmount = 0; + for (Player player: players) { + playerMapperEx.insert(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) { + return playerMapperEx -> { + Player sellPlayer = playerMapperEx.selectByPrimaryKeyWithLock(sellId); + Player buyPlayer = playerMapperEx.selectByPrimaryKeyWithLock(buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + int affectRows = 0; + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + affectRows += playerMapperEx.updateByPrimaryKey(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + affectRows += playerMapperEx.updateByPrimaryKey(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return affectRows; + }; + } + + public Function getPlayerByID(String id) { + return playerMapperEx -> playerMapperEx.selectByPrimaryKey(id); + } + + public Function printPlayers(Integer limit) { + return playerMapperEx -> { + List players = playerMapperEx.selectByLimit(limit); + + for (Player player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() { + return PlayerMapperEx::count; + } +} +``` + +`MybatisExample` 是 `plain-java-mybatis` 这个示例程序的主类。其中定义了入口函数: + +```java +package com.pingcap; + +import com.pingcap.dao.PlayerDAO; +import com.pingcap.model.Player; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; + +public class MybatisExample { + public static void main( String[] args ) throws IOException { + // 1. Create a SqlSessionFactory based on our mybatis-config.xml configuration + // file, which defines how to connect to the database. + InputStream inputStream = Resources.getResourceAsStream("mybatis-config.xml"); + SqlSessionFactory sessionFactory = new SqlSessionFactoryBuilder().build(inputStream); + + // 2. And then, create DAO to manager your data + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(sessionFactory, playerDAO.createPlayers( + Collections.singletonList(new Player("test", 1, 1)))); + + // Get a player. + Player testPlayer = (Player)playerDAO.runTransaction(sessionFactory, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Integer count = (Integer)playerDAO.runTransaction(sessionFactory, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(sessionFactory, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + Player player1 = new Player("1", 100, 0); + Player player2 = new Player("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 MyBatis 时,需手动初始化数据库表。若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-mybatis` 目录下通过 `make prepare` 运行: + +```shell +make prepare +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +``` + +若你不使用本地集群,或未安装 `mysql-client`,请直接登录你的集群,并运行 `src/main/resources/dbinit.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `mybatis-config.xml` 内关于 `dataSource.url`、`dataSource.username`、`dataSource.password` 的参数: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件中 `dataSource` 节点内更改为: + +```xml + + + + + ... + + + + + + + + ... + + +``` + +### 第 3 步第 3 部分:运行 + +你可以分别运行 `make prepare`, `make gen`, `make build` 和 `make run` 以运行此代码: + +```shell +make prepare +# this command executes : +# - `mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql` +# - `mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player"` + +make gen +# this command executes : +# - `rm -f src/main/java/com/pingcap/model/Player.java` +# - `rm -f src/main/java/com/pingcap/model/PlayerMapper.java` +# - `rm -f src/main/resources/mapper/PlayerMapper.xml` +# - `mvn mybatis-generator:generate` + +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player" +rm -f src/main/java/com/pingcap/model/Player.java +rm -f src/main/java/com/pingcap/model/PlayerMapper.java +rm -f src/main/resources/mapper/PlayerMapper.xml +mvn mybatis-generator:generate +mvn clean package +java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make prepare`, `make gen`, `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[MyBatis 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-mybatis) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md new file mode 100644 index 00000000..f823f4e8 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md @@ -0,0 +1,1019 @@ +--- +title: 使用 Spring Boot 构建 TiDB 应用程序 +summary: 给出一个 Spring Boot 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-spring-boot', '/zh/tidb/dev/sample-application-spring-boot'] +--- + + + +# 使用 Spring Boot 构建 TiDB 应用程序 + +本教程向你展示如何使用 TiDB 构建 [Spring Boot](https://spring.io/projects/spring-boot) Web 应用程序。使用 [Spring Data JPA](https://spring.io/projects/spring-data-jpa) 模块作为数据访问能力的框架。此示例应用程序的代码仓库可在 [Github](https://github.com/pingcap-inc/tidb-example-java) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 **TiDB** 作为数据库的通用 **Spring Boot** 后端服务。设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由的交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 JDK + +请在你的计算机上下载并安装 **Java Development Kit** (JDK),这是 Java 开发的必备工具。**Spring Boot** 支持 Java 版本 8 以上的 JDK,由于 **Hibernate** 版本的缘故,推荐使用 Java 版本 11 以上的 JDK。 + +示例应用程序同时支持 **Oracle JDK** 和 **OpenJDK**,请自行选择,本教程将使用版本 17 的 **OpenJDK**。 + +## 第 3 步:安装 Maven + +此示例应用程序使用 **Maven** 来管理应用程序的依赖项。Spring 支持的 **Maven** 版本为 3.2 以上,作为依赖管理软件,推荐使用当前最新稳定版本的 **Maven**。 + +这里给出命令行安装 **Maven** 的办法: + +- macOS 安装: + + {{< copyable "shell-regular" >}} + + ``` + brew install maven + ``` + +- 基于 Debian 的 Linux 发行版上安装(如 Ubuntu 等): + + {{< copyable "shell-regular" >}} + + ``` + apt-get install maven + ``` + +- 基于 Red Hat 的 Linux 发行版上安装(如 Fedora、CentOS 等): + +- dnf 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + dnf install maven + ``` + +- yum 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + yum install maven + ``` + +其他安装方法,请参考 [Maven 官方文档](https://maven.apache.org/install.html)。 + +## 第 4 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java),并进入到目录 `spring-jpa-hibernate` 中。 + +## 第 5 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。Hibernate 将在数据库 `test` 中创建一个表 `player_jpa`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 5 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `application.yml`(位于 `src/main/resources` 内)关于 `spring.datasource.url`、`spring.datasource.username`、`spring.datasource.password` 的参数: + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将参数更改为: + +```yaml +spring: + datasource: + url: jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + username: 2aEp24QWEDLqRFs.root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +### 第 5 步第 2 部分:运行 + +打开终端,进入 `tidb-example-java/spring-jpa-hibernate` 代码示例目录: + +```shell +cd /tidb-example-java/spring-jpa-hibernate +``` + +#### 使用 Make 构建并运行(推荐) + +```shell +make +``` + +#### 手动构建并运行 + +推荐你使用 Make 方式进行构建并运行,当然,若你希望手动进行构建,请依照以下步骤逐步运行,可以得到相同的结果: + +清除缓存并打包: + +```shell +mvn clean package +``` + +运行应用程序的 JAR 文件: + +```shell +java -jar target/spring-jpa-hibernate-0.0.1.jar +``` + +### 第 5 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v3.0.1) + +2023-01-05T14:06:54.427+08:00 INFO 22005 --- [ main] com.pingcap.App : Starting App using Java 17.0.2 with PID 22005 (/Users/cheese/IdeaProjects/tidb-example-java/spring-jpa-hibernate/target/classes started by cheese in /Users/cheese/IdeaProjects/tidb-example-java) +2023-01-05T14:06:54.428+08:00 INFO 22005 --- [ main] com.pingcap.App : No active profile set, falling back to 1 default profile: "default" +2023-01-05T14:06:54.642+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data JPA repositories in DEFAULT mode. +2023-01-05T14:06:54.662+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 17 ms. Found 1 JPA repository interfaces. +2023-01-05T14:06:54.830+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http) +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.4] +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 421 ms +2023-01-05T14:06:54.916+08:00 INFO 22005 --- [ main] o.hibernate.jpa.internal.util.LogHelper : HHH000204: Processing PersistenceUnitInfo [name: default] +2023-01-05T14:06:54.929+08:00 INFO 22005 --- [ main] org.hibernate.Version : HHH000412: Hibernate ORM core version 6.1.6.Final +2023-01-05T14:06:54.969+08:00 WARN 22005 --- [ main] org.hibernate.orm.deprecation : HHH90000021: Encountered deprecated setting [javax.persistence.sharedCache.mode], use [jakarta.persistence.sharedCache.mode] instead +2023-01-05T14:06:55.005+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting... +2023-01-05T14:06:55.074+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.pool.HikariPool : HikariPool-1 - Added connection com.mysql.cj.jdbc.ConnectionImpl@5e905f2c +2023-01-05T14:06:55.075+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed. +2023-01-05T14:06:55.089+08:00 INFO 22005 --- [ main] SQL dialect : HHH000400: Using dialect: org.hibernate.dialect.TiDBDialect +Hibernate: drop table if exists player_jpa +Hibernate: drop sequence player_jpa_id_seq +Hibernate: create sequence player_jpa_id_seq start with 1 increment by 1 +Hibernate: create table player_jpa (id bigint not null, coins integer, goods integer, primary key (id)) engine=InnoDB +2023-01-05T14:06:55.332+08:00 INFO 22005 --- [ main] o.h.e.t.j.p.i.JtaPlatformInitiator : HHH000490: Using JtaPlatform implementation: [org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform] +2023-01-05T14:06:55.335+08:00 INFO 22005 --- [ main] j.LocalContainerEntityManagerFactoryBean : Initialized JPA EntityManagerFactory for persistence unit 'default' +2023-01-05T14:06:55.579+08:00 WARN 22005 --- [ main] JpaBaseConfiguration$JpaWebConfiguration : spring.jpa.open-in-view is enabled by default. Therefore, database queries may be performed during view rendering. Explicitly configure spring.jpa.open-in-view to disable this warning +2023-01-05T14:06:55.710+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path '' +2023-01-05T14:06:55.714+08:00 INFO 22005 --- [ main] com.pingcap.App : Started App in 1.432 seconds (process running for 1.654) +``` + +输出日志中,提示应用程序在启动过程中做了什么,这里显示应用程序使用 [Tomcat](https://tomcat.apache.org/) 启动了一个 **Servlet**,使用 Hibernate 作为 ORM,[HikariCP](https://github.com/brettwooldridge/HikariCP) 作为数据库连接池的实现,使用了 `org.hibernate.dialect.TiDBDialect` 作为数据库方言。启动后,Hibernate 删除并重新创建了表 `player_jpa`,及序列 `player_jpa_id_seq`。在启动的最后,监听了 8080 端口,对外提供 HTTP 服务。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅本教程下方的[实现细节](#实现细节)。 + +## 第 6 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + ![Postman-Create](/media/develop/postman_player_create.png) + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + ![Postman-GetByID](/media/develop/postman_player_getbyid.png) + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + ![Postman-GetByLimit](/media/develop/postman_player_getbylimit.png) + + - 分页获取玩家信息 + + 点击 **GetByPage** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8080/player/page?index=0&size=2` 请求。返回值为 index 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ![Postman-GetByPage](/media/develop//postman_player_getbypage.png) + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + ![Postman-Count](/media/develop/postman_player_count.png) + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `PUT` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + + ![Postman-Trade](/media/develop/postman_player_trade.png) + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8080/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ```json + 1 + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 分页获取玩家信息 + + 使用 `GET` 方法向 `/player/page` 端点发送请求来分页获取玩家信息。额外地需要使用 URL 参数,例如在请求页面序号 `index` 为 0,每页最大请求量 `size` 为 2 时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/page?index=0&size=2' + ``` + + 返回值为 `index` 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ```json + { + "content": [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + } + ], + "empty": false, + "first": true, + "last": false, + "number": 0, + "numberOfElements": 2, + "pageable": { + "offset": 0, + "pageNumber": 0, + "pageSize": 2, + "paged": true, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "unpaged": false + }, + "size": 2, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "totalElements": 4, + "totalPages": 2 + } + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8080/player/count' + ``` + + 返回值为玩家个数: + + ```json + 4 + ``` + +- 玩家交易 + + 使用 `PUT` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request PUT 'http://localhost:8080/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-java/blob/main/spring-jpa-hibernate/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取 `index` 为 0,`size` 为 2 的一页玩家信息 +5. 获取玩家总数 +6. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `make request` 或 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> make request +./request.sh +loop to create 10 players: +1111111111 + +get player 1: +{"id":1,"coins":200,"goods":10} + +get players by limit 3: +[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30},{"id":3,"coins":100,"goods":20}] + +get first players: +{"content":[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30}],"pageable":{"sort":{"empty":true,"unsorted":true,"sorted":false},"offset":0,"pageNumber":0,"pageSize":2,"paged":true,"unpaged":false},"last":false,"totalPages":7,"totalElements":14,"first":true,"size":2,"number":0,"sort":{"empty":true,"unsorted":true,"sorted":false},"numberOfElements":2,"empty":false} + +get players count: +14 + +trade by two players: +false +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的大致目录树如下所示(删除了有碍理解的部分): + +``` +. +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ ├── App.java + │ ├── controller + │ │ └── PlayerController.java + │ ├── dao + │ │ ├── PlayerBean.java + │ │ └── PlayerRepository.java + │ └── service + │ ├── PlayerService.java + │ └── impl + │ └── PlayerServiceImpl.java + └── resources + └── application.yml +``` + +其中: + +- `pom.xml` 内声明了项目的 Maven 配置,如依赖,打包等 +- `application.yml` 内声明了项目的用户配置,如数据库地址、密码、使用的数据库方言等 +- `App.java` 是项目的入口 +- `controller` 是项目对外暴露 HTTP 接口的包 +- `service` 是项目实现接口与逻辑的包 +- `dao` 是项目实现与数据库连接并完成数据持久化的包 + +### 配置 + +本节将简要介绍 `pom.xml` 文件中的 Maven 配置,及 `application.yml` 文件中的用户配置。 + +#### Maven 配置 + +`pom.xml` 文件为 Maven 配置,在文件内声明了项目的 Maven 依赖,打包方法,打包信息等,你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选) 这一节来复刻此配置文件的生成流程,当然,也可直接复制至你的项目来使用。 + +```xml + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.0.1 + + + + com.pingcap + spring-jpa-hibernate + 0.0.1 + spring-jpa-hibernate + an example for spring boot, jpa, hibernate and TiDB + + + 17 + 17 + 17 + + + + + org.springframework.boot + spring-boot-starter-data-jpa + + + + org.springframework.boot + spring-boot-starter-web + + + + mysql + mysql-connector-java + runtime + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +#### 用户配置 + +`application.yml` 此配置文件声明了用户配置,如数据库地址、密码、使用的数据库方言等。 + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +此配置格式为 [YAML](https://yaml.org/) 格式。其中: + +- `spring.datasource.url`:数据库连接的 URL。 +- `spring.datasource.url`:数据库用户名。 +- `spring.datasource.password`:数据库密码,此项为空,需注释或删除。 +- `spring.datasource.driver-class-name`:数据库驱动,因为 TiDB 与 MySQL 兼容,则此处使用与 mysql-connector-java 适配的驱动类 `com.mysql.cj.jdbc.Driver`。 +- `jpa.show-sql`:为 true 时将打印 JPA 运行的 SQL。 +- `jpa.database-platform`:选用的数据库方言,此处连接了 TiDB,自然选择 TiDB 方言,注意,此方言在 6.0.0.Beta2 版本后的 Hibernate 中才可选择,请注意依赖版本。 +- `jpa.hibernate.ddl-auto`:此处选择的 create-drop 将会在程序开始时创建表,退出时删除表。请勿在正式环境使用,但此处为示例程序,希望尽量不影响数据库数据,因此选择了此选项。 + +### 入口文件 + +入口文件 `App.java`: + +```java +package com.pingcap; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.ApplicationPidFileWriter; + +@SpringBootApplication +public class App { + public static void main(String[] args) { + SpringApplication springApplication = new SpringApplication(App.class); + springApplication.addListeners(new ApplicationPidFileWriter("spring-jpa-hibernate.pid")); + springApplication.run(args); + } +} +``` + +入口类比较简单,首先,有一个 Spring Boot 应用程序的标准配置注解 [@SpringBootApplication](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/SpringBootApplication.html)。有关详细信息,请参阅 Spring Boot 官方文档中的 [Using the @SpringBootApplication Annotation](https://docs.spring.io/spring-boot/docs/current/reference/html/using-spring-boot.html#using-boot-using-springbootapplication-annotation)。随后,使用 `ApplicationPidFileWriter` 在程序启动过程中,写下一个名为 `spring-jpa-hibernate.pid` 的 PID (process identification number) 文件,可从外部使用此 PID 文件关闭此应用程序。 + +### 数据库持久层 + +数据库持久层,即 `dao` 包内,实现了数据对象的持久化。 + +#### 实体对象 + +`PlayerBean.java` 文件为实体对象,这个对象对应了数据库的一张表。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.*; + +/** + * it's core entity in hibernate + * @Table appoint to table name + */ +@Entity +@Table(name = "player_jpa") +public class PlayerBean { + /** + * @ID primary key + * @GeneratedValue generated way. this field will use generator named "player_id" + * @SequenceGenerator using `sequence` feature to create a generator, + * and it named "player_jpa_id_seq" in database, initial form 1 (by `initialValue` + * parameter default), and every operator will increase 1 (by `allocationSize`) + */ + @Id + @GeneratedValue(generator="player_id") + @SequenceGenerator(name="player_id", sequenceName="player_jpa_id_seq", allocationSize=1) + private Long id; + + /** + * @Column field + */ + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +这里可以看到,实体类中有很多注解,这些注解给了 Hibernate 额外的信息,用以绑定实体类和表: + +- `@Entity` 声明 `PlayerBean` 是一个实体类。 +- `@Table` 使用注解属性 `name` 将此实体类和表 `player_jpa` 关联。 +- `@Id` 声明此属性关联表的主键列。 +- `@GeneratedValue` 表示自动生成该列的值,而不应手动设置,使用属性 `generator` 指定生成器的名称为 `player_id`。 +- `@SequenceGenerator` 声明一个使用[序列](/sql-statements/sql-statement-create-sequence.md)的生成器,使用注解属性 `name` 声明生成器的名称为 `player_id` (与 `@GeneratedValue` 中指定的名称需保持一致)。随后使用注解属性 `sequenceName` 指定数据库中序列的名称。最后,使用注解属性 `allocationSize` 声明序列的步长为 1。 +- `@Column` 将每个私有属性声明为表 `player_jpa` 的一列,使用注解属性 `name` 确定属性对应的列名。 + +#### 存储库 + +为了抽象数据库层,Spring 应用程序使用 [Repository](https://docs.spring.io/spring-data/jpa/docs/current/reference/html/#repositories) 接口,或者 Repository 的子接口。 这个接口映射到一个数据库对象,常见的,比如会映射到一个表上。JPA 会实现一些预制的方法,比如 [INSERT](/sql-statements/sql-statement-insert.md),或使用主键的 [SELECT](/sql-statements/sql-statement-select.md) 等。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.LockModeType; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Lock; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface PlayerRepository extends JpaRepository { + /** + * use HQL to query by page + * @param pageable a pageable parameter required by hibernate + * @return player list package by page message + */ + @Query(value = "SELECT player_jpa FROM PlayerBean player_jpa") + Page getPlayersByPage(Pageable pageable); + + /** + * use SQL to query by limit, using named parameter + * @param limit sql parameter + * @return player list (max size by limit) + */ + @Query(value = "SELECT * FROM player_jpa LIMIT :limit", nativeQuery = true) + List getPlayersByLimit(@Param("limit") Integer limit); + + /** + * query player and add a lock for update + * @param id player id + * @return player + */ + @Lock(value = LockModeType.PESSIMISTIC_WRITE) + @Query(value = "SELECT player FROM PlayerBean player WHERE player.id = :id") + // @Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) + PlayerBean getPlayerAndLock(@Param("id") Long id); +} +``` + +`PlayerRepository` 拓展了 Spring 用于 JPA 数据访问所使用的接口 `JpaRepository`。使用 `@Query` 注解,告诉 Hibernate 此接口如何实现查询。在此处使用了两种查询语句的语法,其中,在接口 `getPlayersByPage` 中的查询语句使用的是一种被 Hibernate 称为 [HQL](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#hql) (Hibernate Query Language) 的语法。而接口 `getPlayersByLimit` 中使用的是普通的 SQL,在使用 SQL 语法时,需要将 `@Query` 的注解参数 `nativeQuery` 设置为 true。 + +在 `getPlayersByLimit` 注解的 SQL 中,`:limit` 在 Hibernate 中被称为[命名参数](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#jpql-query-parameters),Hibernate 将按名称自动寻找并拼接注解所在接口内的参数。你也可以使用 `@Param` 来指定与参数不同的名称用于注入。 + +在 `getPlayerAndLock` 中,使用了一个注解 [@Lock](https://docs.spring.io/spring-data/jpa/docs/current/api/org/springframework/data/jpa/repository/Lock.html),此注解声明此处使用悲观锁进行锁定,如需了解更多其他锁定方式,可查看[实体锁定](https://openjpa.apache.org/builds/2.2.2/apache-openjpa/docs/jpa_overview_em_locking.html)文档。此处的 `@Lock` 仅可与 HQL 搭配使用,否则将会产生错误。当然,如果你希望直接使用 SQL 进行锁定,可直接使用注释部分的注解: + +```java +@Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) +``` + +直接使用 SQL 的 `FOR UPDATE` 来增加锁。你也可通过 TiDB [SELECT 文档](/sql-statements/sql-statement-select.md) 进行更深层次的原理学习。 + +### 逻辑实现 + +逻辑实现层,即 `service` 包,内含了项目实现的接口与逻辑 + +#### 接口 + +`PlayerService.java` 文件内定义了逻辑接口,实现接口,而不是直接编写一个类的原因,是尽量使例子贴近实际使用,体现设计的开闭原则。你也可以省略掉此接口,在依赖类中直接注入实现类,但并不推荐这样做。 + +```java +package com.pingcap.service; + +import com.pingcap.dao.PlayerBean; +import org.springframework.data.domain.Page; + +import java.util.List; + +public interface PlayerService { + /** + * create players by passing in a List of PlayerBean + * + * @param players will create players list + * @return The number of create accounts + */ + Integer createPlayers(List players); + + /** + * buy goods and transfer funds between one player and another in one transaction + * @param sellId sell player id + * @param buyId buy player id + * @param amount goods amount, if sell player has not enough goods, the trade will break + * @param price price should pay, if buy player has not enough coins, the trade will break + */ + void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException; + + /** + * get the player info by id. + * + * @param id player id + * @return the player of this id + */ + PlayerBean getPlayerByID(Long id); + + /** + * get a subset of players from the data store by limit. + * + * @param limit return max size + * @return player list + */ + List getPlayers(Integer limit); + + /** + * get a page of players from the data store. + * + * @param index page index + * @param size page size + * @return player list + */ + Page getPlayersByPage(Integer index, Integer size); + + /** + * count players from the data store. + * + * @return all players count + */ + Long countPlayers(); +} +``` + +#### 实现(重要) + +`PlayerService.java` 文件内实现了 `PlayerService` 接口,所有数据操作逻辑都编写在这里。 + +```java +package com.pingcap.service.impl; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.dao.PlayerRepository; +import com.pingcap.service.PlayerService; +import jakarta.transaction.Transactional; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageRequest; +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * PlayerServiceImpl implements PlayerService interface + * @Transactional it means every method in this class, will package by a pair of + * transaction.begin() and transaction.commit(). and it will be call + * transaction.rollback() when method throw an exception + */ +@Service +@Transactional +public class PlayerServiceImpl implements PlayerService { + @Autowired + private PlayerRepository playerRepository; + + @Override + public Integer createPlayers(List players) { + return playerRepository.saveAll(players).size(); + } + + @Override + public void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException { + PlayerBean buyPlayer = playerRepository.getPlayerAndLock(buyId); + PlayerBean sellPlayer = playerRepository.getPlayerAndLock(sellId); + if (buyPlayer == null || sellPlayer == null) { + throw new RuntimeException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new RuntimeException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + playerRepository.save(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + playerRepository.save(sellPlayer); + } + + @Override + public PlayerBean getPlayerByID(Long id) { + return playerRepository.findById(id).orElse(null); + } + + @Override + public List getPlayers(Integer limit) { + return playerRepository.getPlayersByLimit(limit); + } + + @Override + public Page getPlayersByPage(Integer index, Integer size) { + return playerRepository.getPlayersByPage(PageRequest.of(index, size)); + } + + @Override + public Long countPlayers() { + return playerRepository.count(); + } +} +``` + +这里使用了 `@Service` 这个注解,声明此对象的生命周期交由 Spring 管理。 + +注意,除了有 `@Service` 注解之外,PlayerServiceImpl 实现类还有一个 [@Transactional](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#transaction-declarative-annotations) 注解。当在应用程序中启用事务管理时 (可使用 [@EnableTransactionManagement](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/transaction/annotation/EnableTransactionManagement.html) 打开,但 Spring Boot 默认开启,无需再次手动配置),Spring 会自动将所有带有 `@Transactional` 注释的对象包装在一个代理中,使用该代理对对象的调用进行处理。 + +你可以简单的认为,代理在带有 `@Transactional` 注释的对象内的函数调用时:在函数顶部将使用 `transaction.begin()` 开启事务,函数返回后,调用 `transaction.commit()` 进行事务提交,而出现任何运行时错误时,代理将会调用 `transaction.rollback()` 来回滚。 + +你可参阅[数据库事务](/develop/dev-guide-transaction-overview.md)来获取更多有关事务的信息,或者阅读 Spring 官网中的文章[理解 Spring 框架的声明式事务实现](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#tx-decl-explained)。 + +整个实现类中,`buyGoods` 函数需重点关注,其在不符合逻辑时将抛出异常,引导 Hibernate 进行事务回滚,防止出现错误数据。 + +### 外部接口 + +`controller` 包对外暴露 HTTP 接口,可以通过 [REST API](https://www.redhat.com/en/topics/api/what-is-a-rest-api#) 来访问服务。 + +```java +package com.pingcap.controller; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.service.PlayerService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.lang.NonNull; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RestController +@RequestMapping("/player") +public class PlayerController { + @Autowired + private PlayerService playerService; + + @PostMapping + public Integer createPlayer(@RequestBody @NonNull List playerList) { + return playerService.createPlayers(playerList); + } + + @GetMapping("/{id}") + public PlayerBean getPlayerByID(@PathVariable Long id) { + return playerService.getPlayerByID(id); + } + + @GetMapping("/limit/{limit_size}") + public List getPlayerByLimit(@PathVariable("limit_size") Integer limit) { + return playerService.getPlayers(limit); + } + + @GetMapping("/page") + public Page getPlayerByPage(@RequestParam Integer index, @RequestParam("size") Integer size) { + return playerService.getPlayersByPage(index, size); + } + + @GetMapping("/count") + public Long getPlayersCount() { + return playerService.countPlayers(); + } + + @PutMapping("/trade") + public Boolean trade(@RequestParam Long sellID, @RequestParam Long buyID, @RequestParam Integer amount, @RequestParam Integer price) { + try { + playerService.buyGoods(sellID, buyID, amount, price); + } catch (RuntimeException e) { + return false; + } + + return true; + } +} +``` + +`PlayerController` 中使用了尽可能多的注解方式来作为示例展示功能,在实际项目中,请尽量保持风格的统一,同时遵循你公司或团体的规则。`PlayerController` 有许多注解,下方将进行逐一解释: + +- [@RestController](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) 将 `PlayerController` 声明为一个 [Web Controller](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller),且将返回值序列化为 JSON 输出。 +- [@RequestMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestMapping.html) 映射 URL 端点为 `/player`,即此 `Web Controller` 仅监听 `/player` URL 下的请求。 +- `@Autowired` 用于 Spring 的自动装配,可以看到,此处声明需要一个 `PlayerService` 对象,此对象为接口,并未指定使用哪一个实现类,这是由 Spring 自动装配的,有关此装配规则,可查看 Spirng 官网中的 [The IoC container](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/beans.html) 一文。 +- [@PostMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PostMapping.html) 声明此函数将响应 HTTP 中的 [POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST) 类型请求。 + - `@RequestBody` 声明此处将 HTTP 的整个载荷解析到参数 `playerList` 中。 + - `@NonNull` 声明参数不可为空,否则将校验并返回错误。 +- [@GetMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/GetMapping.html) 声明此函数将响应 HTTP 中的 [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) 类型请求。 + - [@PathVariable](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PathVariable.html) 可以看到注解中有形如 `{id}` 、`{limit_size}` 这样的占位符,这种占位符将被绑定到 `@PathVariable` 注释的变量中,绑定的依据是注解中的注解属性 `name`(变量名可省略,即 `@PathVariable(name="limit_size")` 可写成 `@PathVariable("limit_size")` ),不特殊指定时,与变量名名称相同。 +- [@PutMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PutMapping.html) 声明此函数将响应 HTTP 中的 [PUT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT) 类型请求。 +- [@RequestParam](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestParam.html) 此声明将解析请求中的 URL 参数、表单参数等参数,绑定至注解的变量中。 + +## 创建相同依赖空白程序(可选) + +本程序使用 [Spring Initializr](https://start.spring.io/) 构建。你可以在这个网页上通过点选以下选项并更改少量配置,来快速得到一个与本示例程序相同依赖的空白应用程序,配置项如下: + +**Project** + +- Maven Project + +**Language** + +- Java + +**Spring Boot** + +- 最新稳定版本 + +**Project Metadata** + +- Group: com.pingcap +- Artifact: spring-jpa-hibernate +- Name: spring-jpa-hibernate +- Package name: com.pingcap +- Packaging: Jar +- Java: 17 + +**Dependencies** + +- Spring Web +- Spring Data JPA +- MySQL Driver + +> **注意:** +> +> 尽管 SQL 相对标准化,但每个数据库供应商都使用 ANSI SQL 定义语法的子集和超集。这被称为数据库的方言。 Hibernate 通过其 org.hibernate.dialect.Dialect 类和每个数据库供应商的各种子类来处理这些方言的变化。 +> +> 在大多数情况下,Hibernate 将能够通过在启动期间通过 JDBC 连接的一些返回值来确定要使用的正确方言。有关 Hibernate 确定要使用的正确方言的能力(以及你影响该解析的能力)的信息,请参阅[方言解析](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#portability-dialectresolver)。 +> +> 如果由于某种原因无法确定正确的方言,或者你想使用自定义方言,则需要设置 hibernate.dialect 配置项。 +> +> _—— 节选自 Hibernate 官方文档: [Database Dialect](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#database-dialect)_ + +随后,即可获取一个拥有与示例程序相同依赖的空白 **Spring Boot** 应用程序。 \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md new file mode 100644 index 00000000..80e143b7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md @@ -0,0 +1,783 @@ +--- +title: 使用 Django 构建 TiDB 应用程序 +summary: 给出一个 Django 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-django'] +--- + + + +# 使用 Django 构建 TiDB 应用程序 + +本文档将展示如何使用 [Django](https://www.djangoproject.com/) 构建一个 TiDB Web 应用程序。使用 [django-tidb](https://github.com/pingcap/django-tidb) 模块作为数据访问能力的框架。示例应用程序的代码可从 [Github](https://github.com/pingcap-inc/tidb-example-python) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 TiDB 作为数据库的通用 Django 后端服务。该示例设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由地交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 Python + +请在你的计算机上下载并安装 **Python**。本文的示例使用 [Django 3.2.16](https://docs.djangoproject.com/zh-hans/3.2/) 版本。根据 [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/faq/install/#what-python-version-can-i-use-with-django),Django 3.2.16 版本支持 Python 3.6、3.7、3.8、3.9 和 3.10 版本,推荐使用 Python 3.10 版本。 + +## 第 3 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-python](https://github.com/pingcap-inc/tidb-example-python),并进入到目录 `django_example` 中。 + +## 第 4 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。你可以使用 `python manage.py migrate` 命令,要求 Django 在数据库 `django` 中创建一个表 `player`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 4 步第 1 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `example_project/settings.py` 中的 `DATABASES` 参数: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +下面以 macOS 为例,应将参数更改为: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': '2aEp24QWEDLqRFs.root', + 'PASSWORD': '123456', + 'HOST': 'xxx.tidbcloud.com', + 'PORT': 4000, + 'OPTIONS': { + 'ssl': { + "ca": "" + }, + }, + }, +} +``` + +### 第 4 步第 2 部分:运行 + +1. 打开终端,进入 `tidb-example-python` 代码示例目录: + + ```bash + cd /tidb-example-python + ``` + +2. 安装项目依赖并进入 `django_example` 目录: + + ```bash + pip install -r requirement.txt + cd django_example + ``` + +3. 运行数据模型迁移: + + > **注意:** + > + > - 此步骤假定已经存在 `django` 数据库。 + > - 若未创建 `django` 数据库,可通过 `CREATE DATABASE django` 语句进行创建。关于创建数据库语句的详细信息,参考 [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md#create-database)。 + > - 数据库名称 `NAME` 可在 `example_project/settings.py` 的 `DATABASES` 属性中更改。 + + 这将在你连接的数据库内生成 Django 所需的相应数据表。 + + ```bash + python manage.py migrate + ``` + +4. 运行应用程序: + + ```bash + python manage.py runserver + ``` + +### 第 4 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` +Watching for file changes with StatReloader +Performing system checks... + +System check identified no issues (0 silenced). +December 12, 2022 - 08:21:50 +Django version 3.2.16, using settings 'example_project.settings' +Starting development server at http://127.0.0.1:8000/ +Quit the server with CONTROL-C. +``` + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +## 第 5 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ``` + create 1 players. + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8000/player/count' + ``` + + 返回值为玩家个数: + + ``` + 4 + ``` + +- 玩家交易 + + 使用 `POST` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取玩家总数 +5. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> ./request.sh +loop to create 10 players: +create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players. + +get player 1: +{"id": 1, "coins": 100, "goods": 20} + +get players by limit 3: +[{"id": 1, "coins": 100, "goods": 20}, {"id": 2, "coins": 100, "goods": 20}, {"id": 3, "coins": 100, "goods": 20}] + +get players count: +10 + +trade by two players: +trade successful +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的目录树大致如下所示: + +``` +. +├── example_project +│ ├── __init__.py +│ ├── asgi.py +│ ├── settings.py +│ ├── urls.py +│ └── wsgi.py +├── player +│ ├── __init__.py +│ ├── admin.py +│ ├── apps.py +│ ├── migrations +│ │ ├── 0001_initial.py +│ │ └── __init__.py +│ ├── models.py +│ ├── tests.py +│ ├── urls.py +│ └── views.py +└── manage.py +``` + +其中: + +- 每一个文件夹中的 `__init__.py` 文件声明了该文件夹是一个 Python 包。 +- `manage.py` 为 Django 自动生成的用于管理项目的脚本。 +- `example_project` 包含项目级别的代码: + + - `settings.py` 声明了项目的配置,如数据库地址、密码、使用的数据库方言等。 + - `urls.py` 配置了项目的根路由。 + +- `player` 是项目中提供对 `Player` 数据模型管理、数据查询的包,这在 Django 中被称为应用。你可以使用 `python manage.py startapp player` 来创建一个空白的 `player` 应用。 + + - `models.py` 定义了 `Player` 数据模型。 + - `migrations` 是一组数据模型迁移脚本。你可以使用 `python manage.py makemigrations player` 命令自动分析 `models.py` 文件中定义的数据对象,并生成迁移脚本。 + - `urls.py` 定义了应用的路由。 + - `views.py` 提供了应用的逻辑代码。 + +> **注意:** +> +> 由于 Django 的设计采用了可插拔模式,因此,你需要在创建应用后,在项目中进行注册。在本示例中,注册过程就是在 `example_project/settings.py` 文件中,在 `INSTALLED_APPS` 对象内添加 `'player.apps.PlayerConfig'` 条目。你可以参考示例代码 [`settings.py`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/example_project/settings.py#L33-L41) 以获得更多信息。 + +### 项目配置 + +本节将简要介绍 `example_project` 包内 `settings.py` 的重要配置。这个文件包含了 Django 项目的配置,声明了项目包含的应用、中间件、连接的数据库等信息。你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选)这一节来了解此配置文件的生成流程,也可直接在项目中使用 `settings.py` 文件。关于 Django 配置的更多信息,参考 [Django 配置](https://docs.djangoproject.com/zh-hans/3.2/topics/settings/)文档。 + +```python +... + +# Application definition + +INSTALLED_APPS = [ + 'player.apps.PlayerConfig', + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +... + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + +... +``` + +其中: + +- `INSTALLED_APPS`:启用的应用全限定名称列表。 +- `MIDDLEWARE`:启用的中间件列表。由于本示例无需 `CsrfViewMiddleware` 中间件,因此其被注释。 +- `DATABASES`:数据库配置。其中,`ENGINE` 一项被配置为 `django_tidb`,这遵循了 [django-tidb](https://github.com/pingcap/django-tidb) 的配置要求。 + +### 根路由 + +在 `example_project` 包中的 `urls.py` 文件中编写了根路由: + +```python +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path('player/', include('player.urls')), + path('admin/', admin.site.urls), +] +``` + +在上面的示例中,根路由将 `player/` 路径指向 `player.urls`。即,`player` 包下的 `urls.py` 将负责处理所有以 `player/` 开头的 URL 请求。关于更多 Django URL 调度器的信息,请参考 [Django URL 调度器](https://docs.djangoproject.com/zh-hans/3.2/topics/http/urls/)文档。 + +### player 应用 + +`player` 应用实现了对 `Player` 对象的数据模型迁移、对象持久化、接口实现等功能。 + +#### 数据模型 + +`models.py` 文件内包含 `Player` 数据模型,这个模型对应了数据库的一张表。 + +```python +from django.db import models + +# Create your models here. + + +class Player(models.Model): + id = models.AutoField(primary_key=True) + coins = models.IntegerField() + goods = models.IntegerField() + + objects = models.Manager() + + class Meta: + db_table = "player" + + def as_dict(self): + return { + "id": self.id, + "coins": self.coins, + "goods": self.goods, + } +``` + +在上面的示例中,数据模型中有一个子类 `Meta`,这些子类给了 Django 额外的信息,用以指定数据模型的元信息。其中,`db_table` 声明此数据模型对应的表名为 `player`。关于模型元信息的全部选项可查看 [Django 模型 Meta 选项](https://docs.djangoproject.com/zh-hans/3.2/ref/models/options/)文档。 + +此外,数据模型中定义了 `id`、`coins` 及 `goods` 三个属性: + +- `id`:`models.AutoField(primary_key=True)` 表示其为一个自动递增的主键。 +- `coins`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 +- `goods`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 + +关于数据模型的详细信息,可查看 [Django 模型](https://docs.djangoproject.com/zh-hans/3.2/topics/db/models/)文档。 + +#### 数据模型迁移 + +Django 以 Python 数据模型定义代码为依赖,对数据库模型进行迁移。因此,它会生成一系列数据库模型迁移脚本,用于解决代码与数据库之间的差异。在 `models.py` 中定义完 `Player` 数据模型后,你可以使用 `python manage.py makemigrations player` 生成迁移脚本。在本文示例中,`migrations` 包内的 `0001_initial.py` 就是自动生成的迁移脚本。 + +```python +# Generated by Django 3.2.16 on 2022-11-16 11:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Player', + fields=[ + ('id', models.AutoField(primary_key=True, serialize=False)), + ('coins', models.IntegerField()), + ('goods', models.IntegerField()), + ], + options={ + 'db_table': 'player', + }, + ), + ] +``` + +你可以使用 `python manage.py sqlmigrate ...` 来预览迁移脚本最终将运行的 SQL 语句。这将极大地减少迁移脚本运行你意料之外的 SQL 语句的可能性。在生成迁移脚本后,推荐至少使用一次此命令预览并仔细检查生成的 SQL 语句。在本示例中,你可以运行 `python manage.py sqlmigrate player 0001`,其输出为可读的 SQL 语句,有助于开发者对语句进行审核: + +```sql +-- +-- Create model Player +-- +CREATE TABLE `player` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `coins` integer NOT NULL, `goods` integer NOT NULL); +``` + +生成迁移脚本后,你可以使用 `python manage.py migrate` 实施数据迁移。此命令拥有幂等性,其运行后将在数据库内保存一条运行记录以完成幂等保证。因此,你可以多次运行此命令,而无需担心重复运行 SQL 语句。 + +#### 应用路由 + +在[根路由](#根路由)一节中,示例程序将 `player/` 路径指向了 `player.urls`。本节将展开叙述 `player` 包下的 `urls.py` 应用路由: + +```python +from django.urls import path + +from . import views + +urlpatterns = [ + path('', views.create, name='create'), + path('count', views.count, name='count'), + path('limit/', views.limit_list, name='limit_list'), + path('', views.get_by_id, name='get_by_id'), + path('trade', views.trade, name='trade'), +] +``` + +应用路由注册了 5 个路径: + +- `''`:被指向了 `views.create` 函数。 +- `'count'`:被指向了 `views.count` 函数。 +- `'limit/'`:被指向了 `views.limit_list` 函数。此处路径包含一个 `` 路径变量,其中: + + - `int` 是指这个参数其将被验证是否为 `int` 类型。 + - `limit` 是指此参数的值将被映射至名为 `limit` 的函数入参中。 + +- `''`:被指向了 `views.get_by_id` 函数,此处路径包含一个 `` 路径变量。 +- `'trade'`:被指向了 `views.trade` 函数。 + +此外,应用路由是根路由转发而来的,因此将在 URL 匹配时包含根路由配置的路径。如上面示例所示,根路由配置为 `player/` 转发至此应用路由,那么,应用路由中的: + +- `''` 在实际的请求中为 `http(s)://(:)/player`。 +- `'count'` 在实际的请求中为 `http(s)://(:)/player/count`。 +- `'limit/'` 以 `limit` 为 `3` 为例,在实际的请求中为 `http(s)://(:)/player/limit/3`。 + +#### 逻辑实现 + +逻辑实现代码,在 `player` 包下的 `views.py` 内,这在 Django 中被称为视图。关于 Django 视图的更多信息,参考 [Django 视图](https://docs.djangoproject.com/zh-hans/3.2/topics/http/views/)文档。 + +```python +from django.db import transaction +from django.db.models import F +from django.shortcuts import get_object_or_404 + +from django.http import HttpResponse, JsonResponse +from django.views.decorators.http import * +from .models import Player +import json + + +@require_POST +def create(request): + dict_players = json.loads(request.body.decode('utf-8')) + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + + +@require_GET +def count(request): + return HttpResponse(Player.objects.count()) + + +@require_GET +def limit_list(request, limit: int = 0): + if limit == 0: + return HttpResponse("") + players = set(Player.objects.all()[:limit]) + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + + +@require_GET +def get_by_id(request, player_id: int): + result = get_object_or_404(Player, pk=player_id).as_dict() + return JsonResponse(result) + + +@require_POST +@transaction.atomic +def trade(request): + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + + return HttpResponse("trade successful") +``` + +下面将逐一解释代码中的重点部分: + +- 装饰器: + + - `@require_GET`:代表此函数仅接受 `GET` 类型的 HTTP 请求。 + - `@require_POST`:代表此函数仅接受 `POST` 类型的 HTTP 请求。 + - `@transaction.atomic`:代表此函数内的所有数据库操作将被包含于同一个事务中运行。关于在 Django 中使用事务的更多信息,可参考 [Django 数据库事务](https://docs.djangoproject.com/zh-hans/3.2/topics/db/transactions/)文档。关于 TiDB 中事物的详细信息,可参考 [TiDB 事务概览](/develop/dev-guide-transaction-overview.md)。 + +- `create` 函数: + + - 获取 `request` 对象中 `body` 的 Payload,并用 `utf-8` 解码: + + ```python + dict_players = json.loads(request.body.decode('utf-8')) + ``` + + - 使用 lambda 中的 `map` 函数,将 dict 类型的 `dict_players` 对象转换为 `Player` 数据模型的列表: + + ```python + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + ``` + + - 调用 `Player` 数据模型的 `bulk_create` 函数,批量添加 `players` 列表,并返回添加的数据条目: + + ```python + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + ``` + +- `count` 函数:调用 `Player` 数据模型的 `count` 函数,并返回所有的数据条目。 +- `limit_list` 函数: + + - 短路逻辑,`limit` 为 `0` 时不发送数据库请求: + + ```python + if limit == 0: + return HttpResponse("") + ``` + + - 调用 `Player` 数据模型的 `all` 函数,并使用切片操作符获取前 `limit` 个数据。需要注意的是,Django 不是获取所有数据并在内存中切分前 `limit` 个数据,而是在使用时请求数据库的前 `limit` 个数据。这是由于 Django 重写了切片操作符,并且 QuerySet 对象是**惰性**的。这意味着对一个未执行的 QuerySet 进行切片,将继续返回一个未执行的 QuerySet,直到你第一次真正的请求 QuerySet 内的数据。例如此处使用 `set` 函数对其进行迭代并返回整个集合。关于 Django QuerySet 的更多信息,你可以参考 [Django QuerySet API](https://docs.djangoproject.com/zh-hans/3.2/ref/models/querysets/) 文档。 + + ```python + players = set(Player.objects.all()[:limit]) + ``` + + - 将返回的 `Player` 数据模型的列表,转为对象为 dict 的列表,并使用 `JsonResponse` 输出。 + + ```python + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + ``` + +- `get_by_id` 函数: + + - 使用 `get_object_or_404` 语法糖传入 `player_id`,并将 `Player` 对象转为 dict。如数据不存在,将由此函数返回 `404` 状态码: + + ```python + result = get_object_or_404(Player, pk=player_id).as_dict() + ``` + + - 使用 `JsonResponse` 返回数据: + + ```python + return JsonResponse(result) + ``` + +- `trade` 函数: + + - 从 `POST` Payload 中接收 Form 形式的数据: + + ```python + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + ``` + + - 调用 `Player` 数据模型的 `select_for_update` 函数对卖家和买家的数据进行加锁,并检查卖家的货物数量和买家的货币数量是否足够。该函数使用了 `@transaction.atomic` 装饰器,任意异常都会导致事务回滚。可以利用这个机制,在任意检查失败的时候,抛出异常,由 Django 进行事务回滚。 + + ```python + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + ``` + + - 更新卖家与买家的数据。由于这里使用了 `@transaction.atomic` 装饰器,任何异常都将由 Django 回滚事务。因此,请不要在此处使用 `try-except` 语句进行异常处理。如果一定需要处理,请在 except 块中将异常继续抛向上层,以防止因 Django 误认为函数运行正常而提交事务,导致数据错误。 + + ```python + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + ``` + + - 返回交易成功字符串,因为其他情况将导致异常抛出返回: + + ```python + return HttpResponse("trade successful") + ``` + +## 创建相同依赖空白程序(可选) + +本程序使用 Django Admin CLI [django-admin](https://django-admin-cli.readthedocs.io/en/stable/index.html) 构建。你可以安装并使用 `django-admin` 来快速完成 Django 项目的初始化。如果需要快速获得与示例程序 `django_example` 相同的可运行空白应用程序,可以按照以下步骤操作: + +1. 初始化 Django 项目 `copy_django_example`: + + ```bash + pip install -r requirement.txt + django-admin startproject copy_django_example + cd copy_django_example + ``` + +2. 更改 `DATABASES` 配置: + + 1. 打开 `copy_django_example/settings.py` 配置文件 + 2. 将 `DATABASES` 部分从指向本地 SQLite 的配置更改为 TiDB 集群的信息: + + ```python + DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, + } + DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + ``` + + 3. 由于本示例不需要跨域校验,因此你需要注释或删除 `MIDDLEWARE` 中的 `CsrfViewMiddleware`。修改后的 `MIDDLEWARE` 为: + + ```python + MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + ] + ``` + +至此,你已经完成了一个空白的应用程序,此应用程序与示例应用程序的依赖完全相同。如果需要进一步了解 Django 的使用方法,参考: + +- [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/) +- [Django 入门教程](https://docs.djangoproject.com/zh-hans/3.2/intro/tutorial01/) \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md new file mode 100644 index 00000000..2e278371 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md @@ -0,0 +1,283 @@ +--- +title: TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-python'] +--- + + + + +# TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 + +[MySQL Connector/Python](https://dev.mysql.com/doc/connector-python/en/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 MySQL Connector/Python 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 MySQL Connector/Python **8.0.31** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +from mysql.connector import connect, MySQLConnection +from mysql.connector.cursor import MySQLCursor + + +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection + + +def create_player(cursor: MySQLCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: MySQLCursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: MySQLCursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((str(uuid.uuid4()), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: MySQLCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: MySQLCursor) -> int: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: MySQLConnection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # all players have random uuid + print(f'start to insert one by one, it will take a long time') + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + print(f'inserted {idx} players') + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 tuple 进行表示。 + +关于 MySQL Connector/Python 的更多使用方法,你可以参考 [MySQL Connector/Python 官方文档](https://dev.mysql.com/doc/connector-python/en/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `mysql_connector_python_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_ca='', + ssl_verify_identity=True + ) + connection.autocommit = autocommit + return connection +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysql_connector_python_example.py +``` + +## 第 4 步:预期输出 + +[MySQL Connector/Python 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysql-connector-python) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md new file mode 100644 index 00000000..6665889c --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md @@ -0,0 +1,282 @@ +--- +title: TiDB 和 mysqlclient 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 mysqlclient 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 mysqlclient 的简单 CRUD 应用程序 + +[mysqlclient](https://pypi.org/project/mysqlclient/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 mysqlclient 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 mysqlclient **2.1.1** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import MySQLdb +from MySQLdb import Connection +from MySQLdb.cursors import Cursor + +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) + + +def create_player(cursor: Cursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: Cursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: Cursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: Cursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: Cursor) -> None: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as conn: + with conn.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以元组 (tuple) 进行表示。 + +关于 mysqlclient 的更多使用方法,你可以参考 [mysqlclient 官方文档](https://mysqlclient.readthedocs.io/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `mysqlclient_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_mode="VERIFY_IDENTITY", + ssl={ + "ca": "" + } + ) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysqlclient_example.py +``` + +## 第 4 步:预期输出 + +[mysqlclient 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysqlclient) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md new file mode 100644 index 00000000..b3bd4519 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md @@ -0,0 +1,245 @@ +--- +title: TiDB 和 peewee 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 peewee 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 peewee 的简单 CRUD 应用程序 + +[peewee](http://docs.peewee-orm.com/en/latest/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 peewee 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 peewee **3.15.4** 版本进行说明。 + +```python +import os +import uuid +from typing import List + +from peewee import * + +from playhouse.db_url import connect + +db = connect('mysql://root:@127.0.0.1:4000/test') + + +class Player(Model): + id = CharField(max_length=36, primary_key=True) + coins = IntegerField() + goods = IntegerField() + + class Meta: + database = db + table_name = "player" + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + # create a player, who has a coin and a goods. + Player.create(id="test", coins=1, goods=1) + + # get this player, and print it. + test_player = Player.select().where(Player.id == "test").get() + print(f'id:{test_player.id}, coins:{test_player.coins}, goods:{test_player.goods}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + Player.bulk_create(player_list, 114) + + # print the number of players + count = Player.select().count() + print(f'number of players: {count}') + + # print 3 players. + three_players = Player.select().limit(3) + for player in three_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +def trade_check(sell_id: str, buy_id: str, amount: int, price: int) -> bool: + sell_goods = Player.select(Player.goods).where(Player.id == sell_id).get().goods + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + buy_coins = Player.select(Player.coins).where(Player.id == buy_id).get().coins + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + return True + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with db.atomic() as txn: + try: + if trade_check(sell_id, buy_id, amount, price) is False: + txn.rollback() + return + + # deduct the goods of seller, and raise his/her the coins + Player.update(goods=Player.goods - amount, coins=Player.coins + price).where(Player.id == sell_id).execute() + # deduct the coins of buyer, and raise his/her the goods + Player.update(goods=Player.goods + amount, coins=Player.coins - price).where(Player.id == buy_id).execute() + + except Exception as err: + txn.rollback() + print(f'something went wrong: {err}') + else: + txn.commit() + print("trade success") + + +def trade_example() -> None: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + Player.create(id="1", coins=100, goods=0) + Player.create(id="2", coins=114514, goods=20) + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + after_trade_players = Player.select().where(Player.id.in_(["1", "2"])) + for player in after_trade_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +db.connect() + +# recreate the player table +db.drop_tables([Player]) +db.create_tables([Player]) + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,peewee 屏蔽了创建数据库连接时,不同数据库差异的细节。peewee 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。peewee 使用 `Player` 类为了给 peewee 提供更多的信息,使用了形如以上示例中的 `id = CharField(max_length=36, primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = CharField(max_length=36, primary_key=True)` 表示 `id` 字段为 `CharField` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 peewee 的更多使用方法,你可以参考 [peewee 官网](http://docs.peewee-orm.com/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `peewee_example.py` 内 `connect` 函数的入参: + +```python +db = connect('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `connect` 更改为: + +- peewee 将 PyMySQL 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_verify_cert=True, ssl_ca="") + ``` + +- peewee 将 mysqlclient 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_mode="VERIFY_IDENTITY", ssl={"ca": ""}) + ``` + +由于 peewee 会将参数透传至 Driver 中,使用 peewee 时请注意 Driver 的使用类型。 + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 peewee_example.py +``` + +## 第 4 步:预期输出 + +[peewee 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#peewee) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md new file mode 100644 index 00000000..5657c625 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md @@ -0,0 +1,277 @@ +--- +title: TiDB 和 PyMySQL 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 PyMySQL 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 PyMySQL 的简单 CRUD 应用程序 + +[PyMySQL](https://pypi.org/project/PyMySQL/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 PyMySQL 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 PyMySQL **1.0.2** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import pymysql.cursors +from pymysql import Connection +from pymysql.cursors import DictCursor + + +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) + + +def create_player(cursor: DictCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: DictCursor, player_id: str) -> dict: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: DictCursor, limit: int) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: DictCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: DictCursor) -> int: + cursor.execute("SELECT count(*) as count FROM player") + return cursor.fetchone()['count'] + + +def trade_check(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + seller = cursor.fetchone() + if seller['goods'] < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buyer = cursor.fetchone() + if buyer['coins'] < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(test_player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(player) + + +def trade_example() -> None: + with get_connection(autocommit=False) as connection: + with connection.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + connection.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(connection, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(connection, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with connection.cursor() as cur: + print(get_player(cur, "1")) + print(get_player(cur, "2")) + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 dict 进行表示。 + +关于 PyMySQL 的更多使用方法,你可以参考 [PyMySQL 官方文档](https://pymysql.readthedocs.io/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `pymysql_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='xxx.tidbcloud.com', + port=4000, + user='2aEp24QWEDLqRFs.root', + password='123546', + database='test', + cursorclass=DictCursor, + autocommit=autocommit, + ssl_ca='', + ssl_verify_cert=True, + ssl_verify_identity=True) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 pymysql_example.py +``` + +## 第 4 步:预期输出 + +[PyMySQL 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#PyMySQL) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md new file mode 100644 index 00000000..0f32bc59 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md @@ -0,0 +1,238 @@ +--- +title: TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 SQLAlchemy 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 + +[SQLAlchemy](https://www.sqlalchemy.org/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 SQLAlchemy 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 SQLAlchemy **1.4.44** 版本进行说明。 + +```python +import uuid +from typing import List + +from sqlalchemy import create_engine, String, Column, Integer, select, func +from sqlalchemy.orm import declarative_base, sessionmaker + +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +Base = declarative_base() +Base.metadata.create_all(engine) +Session = sessionmaker(bind=engine) + + +class Player(Base): + __tablename__ = "player" + + id = Column(String(36), primary_key=True) + coins = Column(Integer) + goods = Column(Integer) + + def __repr__(self): + return f'Player(id={self.id!r}, coins={self.coins!r}, goods={self.goods!r})' + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + with Session() as session: + # create a player, who has a coin and a goods. + session.add(Player(id="test", coins=1, goods=1)) + + # get this player, and print it. + get_test_stmt = select(Player).where(Player.id == "test") + for player in session.scalars(get_test_stmt): + print(player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + session.bulk_save_objects(player_list[idx:idx + 114]) + + # print the number of players + count = session.query(func.count(Player.id)).scalar() + print(f'number of players: {count}') + + # print 3 players. + three_players = session.query(Player).limit(3).all() + for player in three_players: + print(player) + + session.commit() + + +def trade_check(session: Session, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + # sell player goods check + sell_player = session.query(Player.goods).filter(Player.id == sell_id).with_for_update().one() + if sell_player.goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + buy_player = session.query(Player.coins).filter(Player.id == buy_id).with_for_update().one() + if buy_player.coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with Session() as session: + if trade_check(session, sell_id, buy_id, amount, price) is False: + return + + # deduct the goods of seller, and raise his/her the coins + session.query(Player).filter(Player.id == sell_id). \ + update({'goods': Player.goods - amount, 'coins': Player.coins + price}) + # deduct the coins of buyer, and raise his/her the goods + session.query(Player).filter(Player.id == buy_id). \ + update({'goods': Player.goods + amount, 'coins': Player.coins - price}) + + session.commit() + print("trade success") + + +def trade_example() -> None: + with Session() as session: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + session.add(Player(id="1", coins=100, goods=0)) + session.add(Player(id="2", coins=114514, goods=20)) + session.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + with Session() as session: + traders = session.query(Player).filter(Player.id.in_(("1", "2"))).all() + for player in traders: + print(player) + session.commit() + + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,SQLAlchemy 屏蔽了创建数据库连接时,不同数据库差异的细节。SQLAlchemy 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。SQLAlchemy 使用 `Player` 类为了给 SQLAlchemy 提供更多的信息,使用了形如以上示例中的 `id = Column(String(36), primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = Column(String(36), primary_key=True)` 表示 `id` 字段为 `String` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 SQLAlchemy 的更多使用方法,你可以参考 [SQLAlchemy 官网](https://www.sqlalchemy.org/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `sqlalchemy_example.py` 内 `create_engine` 函数的入参: + +```python +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `create_engine` 更改为: + +```python +engine = create_engine('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', connect_args={ + "ssl_mode": "VERIFY_IDENTITY", + "ssl": { + "ca": "" + } +}) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 sqlalchemy_example.py +``` + +## 第 4 步:预期输出 + +[SQLAlchemy 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#SQLAlchemy) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/TOC.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/TOC.md new file mode 100644 index 00000000..f4091c4f --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/TOC.md @@ -0,0 +1,1188 @@ + + + +- [文档中心](https://docs.pingcap.com/zh) +- 关于 TiDB + - [TiDB 简介](/overview.md) + - [TiDB 7.2 Release Notes](/releases/release-7.2.0.md) + - [功能概览](/basic-features.md) + - [与 MySQL 的兼容性](/mysql-compatibility.md) + - [使用限制](/tidb-limitations.md) + - [荣誉列表](/credits.md) + - [路线图](/tidb-roadmap.md) +- 快速上手 + - [快速上手 TiDB](/quick-start-with-tidb.md) + - [快速上手 HTAP](/quick-start-with-htap.md) + - [SQL 基本操作](/basic-sql-operations.md) + - [深入探索 HTAP](/explore-htap.md) +- 应用开发 + - [概览](/develop/dev-guide-overview.md) + - 快速开始 + - [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md) + - [使用 TiDB 的增删改查 SQL](/develop/dev-guide-tidb-crud-sql.md) + - 示例程序 + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - [Django](/develop/dev-guide-sample-application-python-django.md) + - 连接到 TiDB + - [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) + - [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) + - [连接池与连接参数](/develop/dev-guide-connection-parameters.md) + - 数据库模式设计 + - [概览](/develop/dev-guide-schema-design-overview.md) + - [创建数据库](/develop/dev-guide-create-database.md) + - [创建表](/develop/dev-guide-create-table.md) + - [创建二级索引](/develop/dev-guide-create-secondary-indexes.md) + - 数据写入 + - [插入数据](/develop/dev-guide-insert-data.md) + - [更新数据](/develop/dev-guide-update-data.md) + - [删除数据](/develop/dev-guide-delete-data.md) + - [使用 TTL (Time to Live) 定期删除过期数据](/time-to-live.md) + - [预处理语句](/develop/dev-guide-prepared-statement.md) + - 数据读取 + - [单表读取](/develop/dev-guide-get-data-from-single-table.md) + - [多表连接查询](/develop/dev-guide-join-tables.md) + - [子查询](/develop/dev-guide-use-subqueries.md) + - [查询结果分页](/develop/dev-guide-paginate-results.md) + - [视图](/develop/dev-guide-use-views.md) + - [临时表](/develop/dev-guide-use-temporary-tables.md) + - [公共表表达式](/develop/dev-guide-use-common-table-expression.md) + - 读取副本数据 + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP 查询](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - 事务 + - [概览](/develop/dev-guide-transaction-overview.md) + - [乐观事务和悲观事务](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [事务限制](/develop/dev-guide-transaction-restraints.md) + - [事务错误处理](/develop/dev-guide-transaction-troubleshoot.md) + - 优化 SQL 性能 + - [概览](/develop/dev-guide-optimize-sql-overview.md) + - [SQL 性能调优](/develop/dev-guide-optimize-sql.md) + - [性能调优最佳实践](/develop/dev-guide-optimize-sql-best-practices.md) + - [索引的最佳实践](/develop/dev-guide-index-best-practice.md) + - 其他优化 + - [避免隐式类型转换](/develop/dev-guide-implicit-type-conversion.md) + - [唯一序列号生成方案](/develop/dev-guide-unique-serial-number-generation.md) + - 故障诊断 + - [SQL 或事务问题](/develop/dev-guide-troubleshoot-overview.md) + - [结果集不稳定](/develop/dev-guide-unstable-result-set.md) + - [超时](/develop/dev-guide-timeouts-in-tidb.md) + - 引用文档 + - [Bookshop 示例应用](/develop/dev-guide-bookshop-schema-design.md) + - 规范 + - [命名规范](/develop/dev-guide-object-naming-guidelines.md) + - [SQL 开发规范](/develop/dev-guide-sql-development-specification.md) + - 云原生开发环境 + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - 第三方工具支持 + - [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md) + - [已知的第三方工具兼容问题](/develop/dev-guide-third-party-tools-compatibility.md) + - [TiDB 与 ProxySQL 集成](/develop/dev-guide-proxysql-integration.md) +- 部署标准集群 + - [软硬件环境需求](/hardware-and-software-requirements.md) + - [环境与系统配置检查](/check-before-deployment.md) + - 规划集群拓扑 + - [最小部署拓扑结构](/minimal-deployment-topology.md) + - [TiFlash 部署拓扑](/tiflash-deployment-topology.md) + - [TiCDC 部署拓扑](/ticdc-deployment-topology.md) + - [TiDB Binlog 部署拓扑](/tidb-binlog-deployment-topology.md) + - [TiSpark 部署拓扑](/tispark-deployment-topology.md) + - [跨机房部署拓扑结构](/geo-distributed-deployment-topology.md) + - [混合部署拓扑结构](/hybrid-deployment-topology.md) + - 安装与启动 + - [使用 TiUP 部署](/production-deployment-using-tiup.md) + - [在 Kubernetes 上部署](/tidb-in-kubernetes.md) + - [验证集群状态](/post-installation-check.md) + - 测试集群性能 + - [用 Sysbench 测试 TiDB](/benchmark/benchmark-tidb-using-sysbench.md) + - [对 TiDB 进行 TPC-C 测试](/benchmark/benchmark-tidb-using-tpcc.md) + - [对 TiDB 进行 CH-benCHmark 测试](/benchmark/benchmark-tidb-using-ch.md) +- 数据迁移 + - [数据迁移概述](/migration-overview.md) + - [数据迁移工具](/migration-tools.md) + - [数据导入最佳实践](/tidb-lightning/data-import-best-practices.md) + - 数据迁移场景 + - [从 Aurora 迁移数据到 TiDB](/migrate-aurora-to-tidb.md) + - [从小数据量 MySQL 迁移数据到 TiDB](/migrate-small-mysql-to-tidb.md) + - [从大数据量 MySQL 迁移数据到 TiDB](/migrate-large-mysql-to-tidb.md) + - [从小数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-small-mysql-shards-to-tidb.md) + - [从大数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-large-mysql-shards-to-tidb.md) + - [从 CSV 文件迁移数据到 TiDB](/migrate-from-csv-files-to-tidb.md) + - [从 SQL 文件迁移数据到 TiDB](/migrate-from-sql-files-to-tidb.md) + - [从 Parquet 文件迁移数据到 TiDB](/migrate-from-parquet-files-to-tidb.md) + - [从 TiDB 集群迁移数据至另一 TiDB 集群](/migrate-from-tidb-to-tidb.md) + - [从 TiDB 集群迁移数据至兼容 MySQL 的数据库](/migrate-from-tidb-to-mysql.md) + - 复杂迁移场景 + - [上游使用 pt/gh-ost 工具的持续同步场景](/migrate-with-pt-ghost.md) + - [下游存在更多列的迁移场景](/migrate-with-more-columns-downstream.md) + - [如何根据类型或 DDL 内容过滤 binlog 事件](/filter-binlog-event.md) + - [如何通过 SQL 表达式过滤 DML binlog 事件](/filter-dml-event.md) +- 数据集成 + - [数据集成概述](/integration-overview.md) + - 数据集成场景 + - [与 Confluent Cloud 和 Snowflake 进行数据集成](/ticdc/integrate-confluent-using-ticdc.md) + - [与 Apache Kafka 和 Apache Flink 进行数据集成](/replicate-data-to-kafka.md) +- 运维操作 + - 升级 TiDB 版本 + - [使用 TiUP 升级](/upgrade-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [平滑升级 TiDB](/smooth-upgrade-tidb.md) + - [TiFlash v6.2 升级帮助](/tiflash-620-upgrade-guide.md) + - 扩缩容 + - [使用 TiUP(推荐)](/scale-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - 备份与恢复 + - [备份与恢复概述](/br/backup-and-restore-overview.md) + - 架构设计 + - [架构概述](/br/backup-and-restore-design.md) + - [快照备份与恢复架构](/br/br-snapshot-architecture.md) + - [日志备份与 PITR 架构](/br/br-log-architecture.md) + - 使用 BR 进行备份与恢复 + - [使用概述](/br/br-use-overview.md) + - [快照备份与恢复](/br/br-snapshot-guide.md) + - [日志备份与 PITR](/br/br-pitr-guide.md) + - [实践示例](/br/backup-and-restore-use-cases.md) + - [备份存储](/br/backup-and-restore-storages.md) + - br cli 命令手册 + - [命令概述](/br/use-br-command-line-tool.md) + - [快照备份与恢复命令手册](/br/br-snapshot-manual.md) + - [日志备份与 PITR 命令手册](/br/br-pitr-manual.md) + - 参考指南 + - BR 特性 + - [自动调节](/br/br-auto-tune.md) + - [批量建表](/br/br-batch-create-table.md) + - [断点备份](/br/br-checkpoint-backup.md) + - [断点恢复](/br/br-checkpoint-restore.md) + - [使用 Dumpling 和 TiDB Lightning 备份与恢复](/backup-and-restore-using-dumpling-lightning.md) + - [备份与恢复 RawKV](/br/rawkv-backup-and-restore.md) + - [增量备份与恢复](/br/br-incremental-guide.md) + - 集群容灾 + - [容灾方案介绍](/dr-solution-introduction.md) + - [基于主备集群的容灾](/dr-secondary-cluster.md) + - [基于多副本的单集群容灾](/dr-multi-replica.md) + - [基于备份与恢复的容灾](/dr-backup-restore.md) + - [使用资源管控 (Resource Control) 实现资源隔离](/tidb-resource-control.md) + - [修改时区](/configure-time-zone.md) + - [日常巡检](/daily-check.md) + - [TiFlash 常用运维操作](/tiflash/maintain-tiflash.md) + - [使用 TiUP 运维集群](/maintain-tidb-using-tiup.md) + - [在线修改集群配置](/dynamic-config.md) + - [在线有损恢复](/online-unsafe-recovery.md) + - [搭建双集群主从复制](/replicate-between-primary-and-secondary-clusters.md) +- 监控与告警 + - [监控框架概述](/tidb-monitoring-framework.md) + - [监控 API](/tidb-monitoring-api.md) + - [手动部署监控](/deploy-monitoring-services.md) + - [将 Grafana 监控数据导出成快照](/exporting-grafana-snapshots.md) + - [TiDB 集群报警规则与处理方法](/alert-rules.md) + - [TiFlash 报警规则与处理方法](/tiflash/tiflash-alert-rules.md) + - [自定义监控组件的配置](/tiup/customized-montior-in-tiup-environment.md) + - [BR 监控告警](/br/br-monitoring-and-alert.md) +- 故障诊断 + - 故障诊断问题汇总 + - [TiDB 集群问题导图](/tidb-troubleshooting-map.md) + - [TiDB 集群常见问题](/troubleshoot-tidb-cluster.md) + - [TiFlash 常见问题](/tiflash/troubleshoot-tiflash.md) + - 故障场景 + - 慢查询 + - [定位慢查询](/identify-slow-queries.md) + - [分析慢查询](/analyze-slow-queries.md) + - [TiDB OOM 故障排查](/troubleshoot-tidb-oom.md) + - [热点问题处理](/troubleshoot-hot-spot-issues.md) + - [CPU 占用过多导致读写延迟增加](/troubleshoot-cpu-issues.md) + - [写冲突与写性能下降](/troubleshoot-write-conflicts.md) + - [磁盘 I/O 过高](/troubleshoot-high-disk-io.md) + - [锁冲突与 TTL 超时](/troubleshoot-lock-conflicts.md) + - [数据索引不一致报错](/troubleshoot-data-inconsistency-errors.md) + - 故障诊断方法 + - [通过 SQL 诊断获取集群诊断信息](/information-schema/information-schema-sql-diagnostics.md) + - [通过 Statement Summary 排查 SQL 性能问题](/statement-summary-tables.md) + - [使用 Top SQL 定位系统资源消耗过多的查询](/dashboard/top-sql.md) + - [通过日志定位消耗系统资源多的查询](/identify-expensive-queries.md) + - [保存和恢复集群现场信息](/sql-plan-replayer.md) + - [获取支持](/support.md) +- 性能调优 + - 优化手册 + - [优化概述](/performance-tuning-overview.md) + - [优化方法](/performance-tuning-methods.md) + - [OLTP 负载性能优化实践](/performance-tuning-practices.md) + - [TiFlash 性能分析方法](/tiflash-performance-tuning-methods.md) + - [TiCDC 性能分析方法](/ticdc-performance-tuning-methods.md) + - [延迟的拆解分析](/latency-breakdown.md) + - 配置调优 + - [操作系统性能参数调优](/tune-operating-system.md) + - [TiDB 内存调优](/configure-memory-usage.md) + - [TiKV 线程调优](/tune-tikv-thread-performance.md) + - [TiKV 内存调优](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Region 性能调优](/tune-region-performance.md) + - [TiFlash 调优](/tiflash/tune-tiflash-performance.md) + - [下推计算结果缓存](/coprocessor-cache.md) + - 垃圾回收 (GC) + - [GC 机制简介](/garbage-collection-overview.md) + - [GC 配置](/garbage-collection-configuration.md) + - SQL 性能调优 + - [SQL 性能调优概览](/sql-tuning-overview.md) + - 理解 TiDB 执行计划 + - [TiDB 执行计划概览](/explain-overview.md) + - [使用 `EXPLAIN` 解读执行计划](/explain-walkthrough.md) + - [MPP 模式查询的执行计划](/explain-mpp.md) + - [索引查询的执行计划](/explain-indexes.md) + - [Join 查询的执行计划](/explain-joins.md) + - [子查询的执行计划](/explain-subqueries.md) + - [聚合查询的执行计划](/explain-aggregation.md) + - [视图查询的执行计划](/explain-views.md) + - [分区查询的执行计划](/explain-partitions.md) + - [开启 IndexMerge 查询的执行计划](/explain-index-merge.md) + - SQL 优化流程 + - [SQL 优化流程概览](/sql-optimization-concepts.md) + - 逻辑优化 + - [逻辑优化概览](/sql-logical-optimization.md) + - [子查询相关的优化](/subquery-optimization.md) + - [列裁剪](/column-pruning.md) + - [关联子查询去关联](/correlated-subquery-optimization.md) + - [Max/Min 消除](/max-min-eliminate.md) + - [谓词下推](/predicate-push-down.md) + - [分区裁剪](/partition-pruning.md) + - [TopN 和 Limit 下推](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [从窗口函数中推导 TopN 或 Limit](/derive-topn-from-window.md) + - 物理优化 + - [物理优化概览](/sql-physical-optimization.md) + - [索引的选择](/choose-index.md) + - [统计信息简介](/statistics.md) + - [错误索引的解决方案](/wrong-index-solution.md) + - [Distinct 优化](/agg-distinct-optimization.md) + - [代价模型](/cost-model.md) + - [Prepare 语句执行计划缓存](/sql-prepared-plan-cache.md) + - [非 Prepare 语句执行计划缓存](/sql-non-prepared-plan-cache.md) + - 控制执行计划 + - [控制执行计划概览](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [执行计划管理](/sql-plan-management.md) + - [优化规则及表达式下推的黑名单](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) +- 教程 + - [单区域多 AZ 部署](/multi-data-centers-in-one-city-deployment.md) + - [双区域多 AZ 部署](/three-data-centers-in-two-cities-deployment.md) + - [单区域双 AZ 部署](/two-data-centers-in-one-city-deployment.md) + - 读取历史数据 + - 使用 Stale Read 功能读取历史数据(推荐) + - [Stale Read 使用场景介绍](/stale-read.md) + - [使用 `AS OF TIMESTAMP` 语法读取历史数据](/as-of-timestamp.md) + - [使用系统变量 `tidb_read_staleness` 读取历史数据](/tidb-read-staleness.md) + - [使用系统变量 `tidb_external_ts` 读取历史数据](/tidb-external-ts.md) + - [使用系统变量 `tidb_snapshot` 读取历史数据](/read-historical-data.md) + - 最佳实践 + - [TiDB 最佳实践](/best-practices/tidb-best-practices.md) + - [Java 应用开发最佳实践](/best-practices/java-app-best-practices.md) + - [HAProxy 最佳实践](/best-practices/haproxy-best-practices.md) + - [高并发写入场景最佳实践](/best-practices/high-concurrency-best-practices.md) + - [Grafana 监控最佳实践](/best-practices/grafana-monitor-best-practices.md) + - [PD 调度策略最佳实践](/best-practices/pd-scheduling-best-practices.md) + - [海量 Region 集群调优](/best-practices/massive-regions-best-practices.md) + - [三节点混合部署最佳实践](/best-practices/three-nodes-hybrid-deployment.md) + - [在三数据中心下就近读取数据](/best-practices/three-dc-local-read.md) + - [使用 UUID](/best-practices/uuid.md) + - [只读存储节点最佳实践](/best-practices/readonly-nodes.md) + - [Placement Rules 使用文档](/configure-placement-rules.md) + - [Load Base Split 使用文档](/configure-load-base-split.md) + - [Store Limit 使用文档](/configure-store-limit.md) + - [DDL 执行原理及最佳实践](/ddl-introduction.md) +- TiDB 工具 + - [功能概览](/ecosystem-tool-user-guide.md) + - [使用场景](/ecosystem-tool-user-case.md) + - [工具下载](/download-ecosystem-tools.md) + - TiUP + - [文档地图](/tiup/tiup-documentation-guide.md) + - [概览](/tiup/tiup-overview.md) + - [术语及核心概念](/tiup/tiup-terminology-and-concepts.md) + - [TiUP 组件管理](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [故障排查](/tiup/tiup-troubleshooting-guide.md) + - TiUP 命令参考手册 + - [命令概览](/tiup/tiup-reference.md) + - TiUP 命令 + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [tiup mirror 概览](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster 命令 + - [TiUP Cluster 命令概览](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM 命令 + - [TiUP DM 命令概览](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB 集群拓扑文件配置](/tiup/tiup-cluster-topology-reference.md) + - [DM 集群拓扑文件配置](/tiup/tiup-dm-topology-reference.md) + - [TiUP 镜像参考指南](/tiup/tiup-mirror-reference.md) + - TiUP 组件文档 + - [tiup-playground 运行本地测试集群](/tiup/tiup-playground.md) + - [tiup-cluster 部署运维生产集群](/tiup/tiup-cluster.md) + - [tiup-mirror 定制离线镜像](/tiup/tiup-mirror.md) + - [tiup-bench 进行 TPCC/TPCH 压力测试](/tiup/tiup-bench.md) + - [TiDB Operator](/tidb-operator-overview.md) + - TiDB Data Migration + - [关于 Data Migration](/dm/dm-overview.md) + - [架构简介](/dm/dm-arch.md) + - [快速开始](/dm/quick-start-with-dm.md) + - [最佳实践](/dm/dm-best-practices.md) + - 部署 DM 集群 + - [软硬件要求](/dm/dm-hardware-and-software-requirements.md) + - [使用 TiUP 联网部署(推荐)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [使用 TiUP 离线部署](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [使用 Binary 部署](/dm/deploy-a-dm-cluster-using-binary.md) + - [在 Kubernetes 环境中部署](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev/deploy-tidb-dm) + - 入门指南 + - [创建数据源](/dm/quick-start-create-source.md) + - [数据源操作](/dm/dm-manage-source.md) + - [任务配置向导](/dm/dm-task-configuration-guide.md) + - [分库分表合并](/dm/dm-shard-merge.md) + - [表路由](/dm/dm-table-routing.md) + - [黑白名单](/dm/dm-block-allow-table-lists.md) + - [过滤 binlog 事件](/dm/dm-binlog-event-filter.md) + - [通过 SQL 表达式过滤 DML](/dm/feature-expression-filter.md) + - [Online DDL 工具支持](/dm/dm-online-ddl-tool-support.md) + - 迁移任务操作 + - [任务前置检查](/dm/dm-precheck.md) + - [创建任务](/dm/dm-create-task.md) + - [查询状态](/dm/dm-query-status.md) + - [暂停任务](/dm/dm-pause-task.md) + - [恢复任务](/dm/dm-resume-task.md) + - [停止任务](/dm/dm-stop-task.md) + - 进阶教程 + - 分库分表合并迁移 + - [概述](/dm/feature-shard-merge.md) + - [悲观模式](/dm/feature-shard-merge-pessimistic.md) + - [乐观模式](/dm/feature-shard-merge-optimistic.md) + - [手动处理 Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [迁移使用 GH-ost/PT-osc 的数据源](/dm/feature-online-ddl.md) + - [上下游列数量不一致的迁移](/migrate-with-more-columns-downstream.md) + - [增量数据校验](/dm/dm-continuous-data-validation.md) + - 运维管理 + - 集群版本升级 + - [使用 TiUP 运维集群(推荐)](/dm/maintain-dm-using-tiup.md) + - [1.0.x 到 2.0+ 手动升级](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - [在线应用 Hotfix 到 DM 集群](/tiup/tiup-component-dm-patch.md) + - 集群运维工具 + - [使用 WebUI 管理迁移任务](/dm/dm-webui-guide.md) + - [使用 dmctl 管理迁移任务](/dm/dmctl-introduction.md) + - 性能调优 + - [性能数据](/dm/dm-benchmark-v5.4.0.md) + - [配置调优](/dm/dm-tune-configuration.md) + - [如何进行压力测试](/dm/dm-performance-test.md) + - [性能问题及处理方法](/dm/dm-handle-performance-issues.md) + - 数据源管理 + - [变更同步的数据源地址](/dm/usage-scenario-master-slave-switch.md) + - 任务管理 + - [处理出错的 DDL 语句](/dm/handle-failed-ddl-statements.md) + - [管理迁移表的表结构](/dm/dm-manage-schema.md) + - [导出和导入集群的数据源和任务配置](/dm/dm-export-import-config.md) + - [处理告警](/dm/dm-handle-alerts.md) + - [日常巡检](/dm/dm-daily-check.md) + - 参考手册 + - 架构组件 + - [DM-worker 说明](/dm/dm-worker-intro.md) + - [安全模式](/dm/dm-safe-mode.md) + - [Relay Log](/dm/relay-log.md) + - [DDL 特殊处理说明](/dm/dm-ddl-compatible.md) + - 运行机制 + - [DML 同步机制](/dm/dm-dml-replication-logic.md) + - 命令行 + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - 配置文件 + - [概述](/dm/dm-config-overview.md) + - [数据源配置](/dm/dm-source-configuration-file.md) + - [迁移任务配置](/dm/task-configuration-file-full.md) + - [DM-master 配置](/dm/dm-master-configuration-file.md) + - [DM-worker 配置](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [兼容性目录](/dm/dm-compatibility-catalog.md) + - 安全 + - [为 DM 的连接开启加密传输](/dm/dm-enable-tls.md) + - [生成自签名证书](/dm/dm-generate-self-signed-certificates.md) + - 监控告警 + - [监控指标](/dm/monitor-a-dm-cluster.md) + - [告警信息](/dm/dm-alert-rules.md) + - [错误码](/dm/dm-error-handling.md#常见故障处理方法) + - [术语表](/dm/dm-glossary.md) + - 使用示例 + - [使用 DM 迁移数据](/dm/migrate-data-using-dm.md) + - [快速创建迁移任务](/dm/quick-start-create-task.md) + - [分表合并数据迁移最佳实践](/dm/shard-merge-best-practices.md) + - 异常解决 + - [常见问题](/dm/dm-faq.md) + - [错误处理及恢复](/dm/dm-error-handling.md) + - [版本发布历史](/dm/dm-release-notes.md) + - TiDB Lightning + - [概述](/tidb-lightning/tidb-lightning-overview.md) + - [快速上手](/get-started-with-tidb-lightning.md) + - [部署 TiDB Lightning](/tidb-lightning/deploy-tidb-lightning.md) + - [目标数据库要求](/tidb-lightning/tidb-lightning-requirements.md) + - 数据源 + - [文件匹配规则](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [自定义文件匹配](/tidb-lightning/tidb-lightning-data-source.md#自定义文件匹配) + - 物理导入模式 + - [概述](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-physical-import-mode.md#必要条件及限制) + - [配置及使用](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#性能调优) + - 逻辑导入模式 + - [概述](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-logical-import-mode.md#必要条件) + - [配置及使用](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#性能调优) + - [前置检查](/tidb-lightning/tidb-lightning-prechecks.md) + - [表库过滤](/table-filter.md) + - [断点续传](/tidb-lightning/tidb-lightning-checkpoints.md) + - [并行导入](/tidb-lightning/tidb-lightning-distributed-import.md) + - [可容忍错误](/tidb-lightning/tidb-lightning-error-resolution.md) + - [故障处理](/tidb-lightning/troubleshoot-tidb-lightning.md) + - 参考手册 + - [完整配置文件](/tidb-lightning/tidb-lightning-configuration.md) + - [命令行参数](/tidb-lightning/tidb-lightning-command-line-full.md) + - [监控告警](/tidb-lightning/monitor-tidb-lightning.md) + - [Web 界面](/tidb-lightning/tidb-lightning-web-interface.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [术语表](/tidb-lightning/tidb-lightning-glossary.md) + - [Dumpling](/dumpling-overview.md) + - TiCDC + - [概述](/ticdc/ticdc-overview.md) + - [安装部署与集群运维](/ticdc/deploy-ticdc.md) + - Changefeed + - [Changefeed 概述](/ticdc/ticdc-changefeed-overview.md) + - 创建 Changefeed + - [同步数据到 MySQL 兼容的数据库](/ticdc/ticdc-sink-to-mysql.md) + - [同步数据到 Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [同步数据到存储服务](/ticdc/ticdc-sink-to-cloud-storage.md) + - [管理 Changefeed](/ticdc/ticdc-manage-changefeed.md) + - [日志过滤器](/ticdc/ticdc-filter.md) + - [双向复制](/ticdc/ticdc-bidirectional-replication.md) + - [单行数据正确性校验](/ticdc/ticdc-integrity-check.md) + - 监控告警 + - [基本监控指标](/ticdc/ticdc-summary-monitor.md) + - [详细监控指标](/ticdc/monitor-ticdc.md) + - [报警规则](/ticdc/ticdc-alert-rules.md) + - 参考指南 + - [架构设计与原理](/ticdc/ticdc-architecture.md) + - [TiCDC Server 配置参数](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed 配置参数](/ticdc/ticdc-changefeed-config.md) + - 输出数据协议 + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API v2](/ticdc/ticdc-open-api-v2.md) + - [TiCDC Open API v1](/ticdc/ticdc-open-api.md) + - TiCDC 数据消费 + - [基于 Avro 的 TiCDC 行数据 Checksum 校验](/ticdc/ticdc-avro-checksum-verification.md) + - [Storage sink 消费程序编写指引](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [兼容性](/ticdc/ticdc-compatibility.md) + - [故障处理](/ticdc/troubleshoot-ticdc.md) + - [常见问题解答](/ticdc/ticdc-faq.md) + - [术语表](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [概述](/tidb-binlog/tidb-binlog-overview.md) + - [快速上手](/tidb-binlog/get-started-with-tidb-binlog.md) + - [部署使用](/tidb-binlog/deploy-tidb-binlog.md) + - [运维管理](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [配置说明](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [版本升级](/tidb-binlog/upgrade-tidb-binlog.md) + - [监控告警](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [增量恢复](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl 工具](/tidb-binlog/binlog-control.md) + - [Kafka 自定义开发](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [集群间双向同步](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [术语表](/tidb-binlog/tidb-binlog-glossary.md) + - 故障诊断 + - [故障诊断](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [常见错误修复](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - PingCAP Clinic 诊断服务 + - [概述](/clinic/clinic-introduction.md) + - [快速上手](/clinic/quick-start-with-clinic.md) + - [使用 PingCAP Clinic 诊断集群](/clinic/clinic-user-guide-for-tiup.md) + - [使用 PingCAP Clinic 生成诊断报告](/clinic/clinic-report.md) + - [采集 SQL 查询计划信息](/clinic/clinic-collect-sql-query-plan.md) + - [数据采集说明](/clinic/clinic-data-instruction-for-tiup.md) + - TiSpark + - [TiSpark 用户指南](/tispark-overview.md) + - sync-diff-inspector + - [概述](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [不同库名或表名的数据校验](/sync-diff-inspector/route-diff.md) + - [分库分表场景下的数据校验](/sync-diff-inspector/shard-diff.md) + - [TiDB 主从集群的数据校验](/sync-diff-inspector/upstream-downstream-diff.md) + - [基于 DM 同步场景下的数据校验](/sync-diff-inspector/dm-diff.md) + - TiUniManager + - [概述](/tiunimanager/tiunimanager-overview.md) + - [安装和运维](/tiunimanager/tiunimanager-install-and-maintain.md) + - [快速操作](/tiunimanager/tiunimanager-quick-start.md) + - 操作指南 + - [登录与初始化](/tiunimanager/tiunimanager-login-and-initialize.md) + - [管理集群资源](/tiunimanager/tiunimanager-manage-host-resources.md) + - [管理集群](/tiunimanager/tiunimanager-manage-clusters.md) + - [导入与导出数据](/tiunimanager/tiunimanager-import-and-export-data.md) + - [管理任务](/tiunimanager/tiunimanager-manage-tasks.md) + - [管理系统](/tiunimanager/tiunimanager-manage-system.md) + - [FAQ](/tiunimanager/tiunimanager-faq.md) + - 发布版本历史 + - [发布版本汇总](/tiunimanager/tiunimanager-release-notes.md) + - [v1.0.2](/tiunimanager/tiunimanager-release-1.0.2.md) + - [v1.0.1](/tiunimanager/tiunimanager-release-1.0.1.md) + - [v1.0.0](/tiunimanager/tiunimanager-release-1.0.0.md) +- 参考指南 + - 架构 + - [概述](/tidb-architecture.md) + - [存储](/tidb-storage.md) + - [计算](/tidb-computing.md) + - [调度](/tidb-scheduling.md) + - 存储引擎 TiKV + - [TiKV 简介](/tikv-overview.md) + - [RocksDB 简介](/storage-engine/rocksdb-overview.md) + - [Titan 简介](/storage-engine/titan-overview.md) + - [Titan 配置说明](/storage-engine/titan-configuration.md) + - [Partitioned Raft KV](/partitioned-raft-kv.md) + - 存储引擎 TiFlash + - [TiFlash 简介](/tiflash/tiflash-overview.md) + - [构建 TiFlash 副本](/tiflash/create-tiflash-replicas.md) + - [使用 TiDB 读取 TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [使用 TiSpark 读取 TiFlash](/tiflash/use-tispark-to-read-tiflash.md) + - [使用 MPP 模式](/tiflash/use-tiflash-mpp-mode.md) + - [TiFlash 存算分离架构与 S3 支持](/tiflash/tiflash-disaggregated-and-s3.md) + - [使用 FastScan 功能](/tiflash/use-fastscan.md) + - [TiFlash 支持的计算下推](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash 查询结果物化](/tiflash/tiflash-results-materialization.md) + - [TiFlash 延迟物化](/tiflash/tiflash-late-materialization.md) + - [TiFlash 数据落盘](/tiflash/tiflash-spill-disk.md) + - [TiFlash 数据校验](/tiflash/tiflash-data-validation.md) + - [TiFlash 兼容性说明](/tiflash/tiflash-compatibility.md) + - [TiFlash Pipeline Model 执行模型](/tiflash/tiflash-pipeline-model.md) + - [系统变量](/system-variables.md) + - 配置文件参数 + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - [binlog-ctl](/tidb-binlog/binlog-control.md) + - 命令行参数 + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - 监控指标 + - [Overview 面板](/grafana-overview-dashboard.md) + - [Performance Overview 面板](/grafana-performance-overview-dashboard.md) + - [TiDB 面板](/grafana-tidb-dashboard.md) + - [PD 面板](/grafana-pd-dashboard.md) + - [TiKV 面板](/grafana-tikv-dashboard.md) + - [TiFlash 监控指标](/tiflash/monitor-tiflash.md) + - [TiCDC 监控指标](/ticdc/monitor-ticdc.md) + - [Resource Control 监控指标](/grafana-resource-control-dashboard.md) + - 安全加固 + - [为 TiDB 客户端服务端间通信开启加密传输](/enable-tls-between-clients-and-servers.md) + - [为 TiDB 组件间通信开启加密传输](/enable-tls-between-components.md) + - [生成自签名证书](/generate-self-signed-certificates.md) + - [静态加密](/encryption-at-rest.md) + - [为 TiDB 落盘文件开启加密](/enable-disk-spill-encrypt.md) + - [日志脱敏](/log-redaction.md) + - 权限 + - [与 MySQL 安全特性差异](/security-compatibility-with-mysql.md) + - [权限管理](/privilege-management.md) + - [TiDB 用户账户管理](/user-account-management.md) + - [TiDB 密码管理](/password-management.md) + - [基于角色的访问控制](/role-based-access-control.md) + - [TiDB 证书鉴权使用指南](/certificate-authentication.md) + - SQL + - SQL 语言结构和语法 + - 属性 + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [字面值](/literal-values.md) + - [Schema 对象名](/schema-object-names.md) + - [关键字](/keywords.md) + - [用户自定义变量](/user-defined-variables.md) + - [表达式语法](/expression-syntax.md) + - [注释语法](/comment-syntax.md) + - SQL 语句 + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER RESOURCE GROUP`](/sql-statements/sql-statement-alter-resource-group.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CALIBRATE RESOURCE`](/sql-statements/sql-statement-calibrate-resource.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE RESOURCE GROUP`](/sql-statements/sql-statement-create-resource-group.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP RESOURCE GROUP`](/sql-statements/sql-statement-drop-resource-group.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` 和 `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET RESOURCE GROUP`](/sql-statements/sql-statement-set-resource-group.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE RESOURCE GROUP`](/sql-statements/sql-statement-show-create-resource-group.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - 数据类型 + - [数据类型概述](/data-type-overview.md) + - [数据类型默认值](/data-type-default-values.md) + - [数值类型](/data-type-numeric.md) + - [日期和时间类型](/data-type-date-and-time.md) + - [字符串类型](/data-type-string.md) + - [JSON 类型](/data-type-json.md) + - 函数与操作符 + - [函数与操作符概述](/functions-and-operators/functions-and-operators-overview.md) + - [表达式求值的类型转换](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [操作符](/functions-and-operators/operators.md) + - [控制流程函数](/functions-and-operators/control-flow-functions.md) + - [字符串函数](/functions-and-operators/string-functions.md) + - [数值函数与操作符](/functions-and-operators/numeric-functions-and-operators.md) + - [日期和时间函数](/functions-and-operators/date-and-time-functions.md) + - [位函数和操作符](/functions-and-operators/bit-functions-and-operators.md) + - [Cast 函数和操作符](/functions-and-operators/cast-functions-and-operators.md) + - [加密和压缩函数](/functions-and-operators/encryption-and-compression-functions.md) + - [锁函数](/functions-and-operators/locking-functions.md) + - [信息函数](/functions-and-operators/information-functions.md) + - [JSON 函数](/functions-and-operators/json-functions.md) + - [GROUP BY 聚合函数](/functions-and-operators/aggregate-group-by-functions.md) + - [窗口函数](/functions-and-operators/window-functions.md) + - [其它函数](/functions-and-operators/miscellaneous-functions.md) + - [精度数学](/functions-and-operators/precision-math.md) + - [集合运算](/functions-and-operators/set-operators.md) + - [下推到 TiKV 的表达式列表](/functions-and-operators/expressions-pushed-down.md) + - [TiDB 特有的函数](/functions-and-operators/tidb-functions.md) + - [Oracle 与 TiDB 函数和语法差异对照](/oracle-functions-to-tidb.md) + - [聚簇索引](/clustered-indexes.md) + - [约束](/constraints.md) + - [生成列](/generated-columns.md) + - [SQL 模式](/sql-mode.md) + - [表属性](/table-attributes.md) + - 事务 + - [事务概览](/transaction-overview.md) + - [隔离级别](/transaction-isolation-levels.md) + - [乐观事务](/optimistic-transaction.md) + - [悲观事务](/pessimistic-transaction.md) + - [非事务 DML 语句](/non-transactional-dml.md) + - [视图](/views.md) + - [分区表](/partitioned-table.md) + - [临时表](/temporary-tables.md) + - [缓存表](/cached-tables.md) + - [外键约束](/foreign-key.md) + - 字符集和排序 + - [概述](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - 系统表 + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`RESOURCE_GROUPS`](/information-schema/information-schema-resource-groups.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [元数据锁](/metadata-lock.md) + - UI + - TiDB Dashboard + - [简介](/dashboard/dashboard-intro.md) + - 运维 + - [部署](/dashboard/dashboard-ops-deploy.md) + - [反向代理](/dashboard/dashboard-ops-reverse-proxy.md) + - [用户管理](/dashboard/dashboard-user.md) + - [安全](/dashboard/dashboard-ops-security.md) + - [访问](/dashboard/dashboard-access.md) + - [概况页面](/dashboard/dashboard-overview.md) + - [集群信息页面](/dashboard/dashboard-cluster-info.md) + - [Top SQL 页面](/dashboard/top-sql.md) + - [流量可视化页面](/dashboard/dashboard-key-visualizer.md) + - [监控关系图](/dashboard/dashboard-metrics-relation.md) + - SQL 语句分析 + - [列表页面](/dashboard/dashboard-statement-list.md) + - [执行详情页面](/dashboard/dashboard-statement-details.md) + - [慢查询页面](/dashboard/dashboard-slow-query.md) + - 集群诊断页面 + - [访问](/dashboard/dashboard-diagnostics-access.md) + - [查看报告](/dashboard/dashboard-diagnostics-report.md) + - [使用示例](/dashboard/dashboard-diagnostics-usage.md) + - [监控指标页面](/dashboard/dashboard-monitoring.md) + - [日志搜索页面](/dashboard/dashboard-log-search.md) + - [资源管控页面](/dashboard/dashboard-resource-manager.md) + - 实例性能分析 + - [手动分析页面](/dashboard/dashboard-profiling.md) + - [持续分析页面](/dashboard/continuous-profiling.md) + - 会话管理与配置 + - [分享会话](/dashboard/dashboard-session-share.md) + - [配置 SSO 登录](/dashboard/dashboard-session-sso.md) + - [常见问题](/dashboard/dashboard-faq.md) + - [遥测](/telemetry.md) + - [错误码](/error-codes.md) + - [通过拓扑 label 进行副本调度](/schedule-replicas-by-topology-labels.md) + - 内部组件介绍 + - [TiDB 后端任务分布式并行执行框架](/tidb-distributed-execution-framework.md) +- 常见问题解答 (FAQ) + - [FAQ 汇总](/faq/faq-overview.md) + - [产品 FAQ](/faq/tidb-faq.md) + - [SQL FAQ](/faq/sql-faq.md) + - [安装部署 FAQ](/faq/deploy-and-maintain-faq.md) + - [迁移 FAQ](/faq/migration-tidb-faq.md) + - [升级 FAQ](/faq/upgrade-faq.md) + - [监控 FAQ](/faq/monitor-faq.md) + - [集群管理 FAQ](/faq/manage-cluster-faq.md) + - [高可用 FAQ](/faq/high-availability-faq.md) + - [高可靠 FAQ](/faq/high-reliability-faq.md) + - [备份恢复 FAQ](/faq/backup-and-restore-faq.md) +- 版本发布历史 + - [发布版本汇总](/releases/release-notes.md) + - [版本发布时间线](/releases/release-timeline.md) + - [TiDB 版本规则](/releases/versioning.md) + - [TiDB 离线包](/binary-package.md) + - v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) + - v7.1 + - [7.1.0](/releases/release-7.1.0.md) + - v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) + - v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) + - v6.5 + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.7](/releases/release-6.1.7.md) + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [术语表](/glossary.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-choose-driver-or-orm.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-choose-driver-or-orm.md new file mode 100644 index 00000000..369cec61 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-choose-driver-or-orm.md @@ -0,0 +1,303 @@ +--- +title: 选择驱动或 ORM 框架 +summary: 选择驱动或 ORM 框架连接 TiDB。 +aliases: ['/zh/tidb/dev/choose-driver-or-orm'] +--- + +# 选择驱动或 ORM 框架 + +> **注意:** +> +> TiDB 支持等级说明: +> +> - **Full**:表明 TiDB 已经兼容该工具的绝大多数功能,并且在该工具的新版本中对其保持兼容。PingCAP 将定期地对 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)中的新版本进行兼容性测试。 +> - **Compatible**:表明由于该工具已适配 MySQL,而 TiDB 高度兼容 MySQL 协议,因此 TiDB 可以兼容该工具的大部分功能。但 PingCAP 并未对该工具作出完整的兼容性验证,有可能出现一些意外的行为。 +> +> 关于更多 TiDB 支持的第三方工具,你可以查看 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)。 + +TiDB 兼容 MySQL 的协议,但存在部分与 MySQL 不兼容或有差异的特性,具体信息可查看[与 MySQL 兼容性对比](/mysql-compatibility.md)。 + +## Java + +本节介绍 Java 语言的 Driver 及 ORM 的使用方式。 + +### Java Drivers + + +
+ +支持等级:**Full** + +按照 [MySQL 文档](https://dev.mysql.com/doc/connector-j/8.0/en/)中的说明下载并配置 Java JDBC 驱动程序即可使用。对于 TiDB v6.3.0 及以上版本,建议使用 MySQL Connector/J 8.0.33 及以上版本。 + +> **建议:** +> +> 在 8.0.32 之前的 MySQL Connector/J 8.0 版本中存在一个 [bug](https://bugs.mysql.com/bug.php?id=106252),当与 TiDB v6.3.0 之前的版本一起使用时,可能会导致线程卡死。为了避免此问题,建议使用 MySQL Connector/J 8.0.32 或更高版本,或者使用 TiDB JDBC(见 *TiDB-JDBC* 标签)。 + +有关一个完整的实例应用程序,可参阅 [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md)。 + +
+
+ +支持等级:**Full** + +[TiDB-JDBC](https://github.com/pingcap/mysql-connector-j) 是基于 MySQL 8.0.29 的定制版本。TiDB-JDBC 基于 MySQL 官方 8.0.29 版本编译,修复了原 JDBC 在 prepare 模式下多参数、多字段 EOF 的错误,并新增 TiCDC snapshot 自动维护和 SM3 认证插件等功能。 + +基于 SM3 的认证仅在 TiDB 版本的 MySQL Connector/J 中支持。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + +``` + +如果你需要使用 SM3 认证,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + org.bouncycastle + bcprov-jdk15on + 1.67 + + + org.bouncycastle + bcpkix-jdk15on + 1.67 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'org.bouncycastle', name: 'bcprov-jdk15on', version: '1.67' +implementation group: 'org.bouncycastle', name: 'bcpkix-jdk15on', version: '1.67' +``` + +
+
+ +### Java ORM 框架 + +> **注意:** +> +> - Hibernate 当前[不支持嵌套事务](https://stackoverflow.com/questions/37927208/nested-transaction-in-spring-app-with-jpa-postgres)。 +> - TiDB 从 v6.2.0 版本开始支持 [Savepoint](/sql-statements/sql-statement-savepoint.md)。如需在 `@Transactional` 中使用 `Propagation.NESTED` 事务传播选项,即 `@Transactional(propagation = Propagation.NESTED)`,请确认你的 TiDB 版本为 v6.2.0 或以上。 + + +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取你的应用程序的所有依赖项,且会帮你下载依赖项的间接依赖,而无需你手动管理复杂的依赖关系。注意,只有 Hibernate `6.0.0.Beta2` 及以上版本才支持 TiDB 方言。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + org.hibernate.orm + hibernate-core + 6.0.0.CR2 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 `Gradle`,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.hibernate:hibernate-core:6.0.0.CR2' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +- 有关原生 Java 使用 Hibernate 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md)。 +- 有关 Spring 使用 Spring Data JPA、Hibernate 进行 TiDB 应用程序构建的例子,可参阅[使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md)。 + +额外的,你需要在 [Hibernate 配置文件](https://www.tutorialspoint.com/hibernate/hibernate_configuration.htm)中指定 TiDB 方言 `org.hibernate.dialect.TiDBDialect`,此方言在 Hibernate `6.0.0.Beta2` 以上才可支持。若你无法升级 Hibernate 版本,那么请你直接使用 MySQL 5.7 的方言 `org.hibernate.dialect.MySQL57Dialect`。但这可能造成不可预料的使用结果,及部分 TiDB 特有特性的缺失,如:[序列](/sql-statements/sql-statement-create-sequence.md)等。 + +
+ +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取应用程序的所有依赖项包括间接依赖,无需手动管理复杂的依赖关系。 + +如果你使用的是 Maven,请将以下内容添加到你的 ``: + +```xml + + org.mybatis + mybatis + 3.5.9 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.mybatis:mybatis:3.5.9' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +使用 MyBatis 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 MyBatis 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-mybatis.md)。 + +
+ +
+ +### Java 客户端负载均衡 + +**tidb-loadbalance** + +支持等级:**Full** + +[tidb-loadbalance](https://github.com/pingcap/tidb-loadbalance) 是应用端的负载均衡组件。通过 tidb-loadbalance,你可以实现自动维护 TiDB server 的节点信息,根据节点信息使用 tidb-loadbalance 策略在客户端分发 JDBC 连接。客户端应用与 TiDB server 之间使用 JDBC 直连,性能高于使用负载均衡组件。 + +目前 tidb-loadbalance 已实现轮询、随机、权重等负载均衡策略。 + +> **注意:** +> +> tidb-loadbalance 需配合 mysql-connector-j 一起使用。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + io.github.lastincisor + tidb-loadbalance + 0.0.5 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'io.github.lastincisor', name: 'tidb-loadbalance', version: '0.0.5' +``` + +## Golang + +本节介绍 Golang 语言的 Driver 及 ORM 的使用方式。 + +### Golang Drivers + +**go-sql-driver/mysql** + +支持等级:**Full** + +按照 [go-sql-driver/mysql 文档](https://github.com/go-sql-driver/mysql)中的说明获取并配置 Golang 驱动程序即可使用。 + +有关一个完整的实例应用程序,可参阅使用 [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md)。 + +### Golang ORM 框架 + +**GORM** + +支持等级:**Full** + +GORM 是一个流行的 Golang 的 ORM 框架,你可以使用 `go get` 获取你的应用程序的所有依赖项。 + +```shell +go get -u gorm.io/gorm +go get -u gorm.io/driver/mysql +``` + +使用 GORM 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md)。 + +## Python + +本节介绍 Python 语言的 Driver 及 ORM 的使用方式。 + +### Python Drivers + + +
+ +支持等级:**Compatible** + +按照 [PyMySQL 文档](https://pypi.org/project/PyMySQL/)中的说明下载并配置驱动程序即可使用。建议使用 **1.0.2** 及以上版本。 + +使用 PyMySQL 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [mysqlclient 文档](https://pypi.org/project/mysqlclient/)中的说明下载并配置驱动程序即可使用。建议使用 **2.1.1** 及以上版本。 + +使用 mysqlclient 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [MySQL Connector/Python 文档](https://dev.mysql.com/doc/connector-python/en/connector-python-installation-binary.html)中的说明下载并配置驱动程序即可使用。建议使用 **8.0.31** 及以上版本。 + +使用 MySQL Connector/Python 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md)。 + +
+
+ +### Python ORM 框架 + + + +
+ +支持等级:**Full** + +[Django](https://docs.djangoproject.com/) 是一个流行的 Python 的开发框架,你可以使用 `pip install Django==3.2.16 django-tidb>=3.0.0` 获取你的应用程序的所有依赖项。建议使用 Django **3.2.16** 及以上版本。 + +使用 Django 构建 TiDB 应用程序的例子,可参阅[使用 Django 构建 TiDB 应用程序](/develop/dev-guide-sample-application-python-django.md)。 + +
+ +
+ +支持等级:**Full** + +[SQLAlchemy](https://www.sqlalchemy.org/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install SQLAlchemy==1.4.44` 获取你的应用程序的所有依赖项。建议使用 **1.4.44** 及以上版本。 + +使用 SQLAlchemy 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md)。 + +
+ +
+ +支持等级:**Compatible** + +[peewee](http://docs.peewee-orm.com/en/latest/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install peewee==3.15.4` 获取你的应用程序的所有依赖项。建议使用 **3.15.4** 及以上版本。 + +使用 peewee 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md)。 + +
+ +
diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-insert-data.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-insert-data.md new file mode 100644 index 00000000..aef7b5a7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-insert-data.md @@ -0,0 +1,293 @@ +--- +title: 插入数据 +summary: 插入数据、批量导入数据的方法、最佳实践及例子。 +aliases: ['/zh/tidb/dev/insert-data'] +--- + + + +# 插入数据 + +此页面将展示使用 SQL 语言,配合各种编程语言将数据插入到 TiDB 中。 + +## 在开始之前 + +在阅读本页面之前,你需要准备以下事项: + +- [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md)。 +- 阅读[数据库模式概览](/develop/dev-guide-schema-design-overview.md),并[创建数据库](/develop/dev-guide-create-database.md)、[创建表](/develop/dev-guide-create-table.md)、[创建二级索引](/develop/dev-guide-create-secondary-indexes.md)。 + +## 插入行 + +假设你需要插入多行数据,那么会有两种插入的办法,假设需要插入 3 个玩家数据: + +- 一个**多行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2), (3, 300, 5); + ``` + +- 多个**单行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (2, 230, 2); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (3, 300, 5); + ``` + +一般来说使用一个`多行插入语句`,会比多个`单行插入语句`快。 + + +
+ +在 SQL 中插入多行数据的示例: + +```sql +CREATE TABLE `player` (`id` INT, `coins` INT, `goods` INT); +INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2); +``` + +有关如何使用此 SQL,可查阅[连接到 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-2-步连接到集群)文档部分,按文档步骤使用客户端连接到 TiDB 集群后,输入 SQL 语句即可。 + +
+ +
+ +在 Java 中插入多行数据的示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + connection.setAutoCommit(false); + + PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) + + // first player + pstmt.setInt(1, 1); + pstmt.setInt(2, 1000); + pstmt.setInt(3, 1); + pstmt.addBatch(); + + // second player + pstmt.setInt(1, 2); + pstmt.setInt(2, 230); + pstmt.setInt(3, 2); + pstmt.addBatch(); + + pstmt.executeBatch(); + connection.commit(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +另外,由于 MySQL JDBC Driver 默认设置问题,你需更改部分参数,以获得更好的批量插入性能: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :------------------------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | +| `rewriteBatchedStatements` | 是否重写 Batch 语句 | 需要批量操作时 | `true` | +| `allowMultiQueries` | 开启批量操作 | 因为一个[客户端 Bug](https://bugs.mysql.com/bug.php?id=96623) 在 `rewriteBatchedStatements = true` 和 `useServerPrepStmts = true` 时,需设置此项 | `true` | + +MySQL JDBC Driver 还提供了一个集成配置项:`useConfigs`。当它配置为 `maxPerformance` 时,相当于配置了一组配置,以 `mysql:mysql-connector-java:8.0.28` 为例,`useConfigs=maxPerformance` 包含: + +```properties +cachePrepStmts=true +cacheCallableStmts=true +cacheServerConfiguration=true +useLocalSessionState=true +elideSetAutoCommits=true +alwaysSendSetIsolation=false +enableQueryTimeouts=false +connectionAttributes=none +useInformationSchema=true +``` + +你可以自行查看 `mysql-connector-java-{version}.jar!/com/mysql/cj/configurations/maxPerformance.properties` 来获得对应版本 MySQL JDBC Driver 的 `useConfigs=maxPerformance` 包含配置。 + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户名: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
+ +在 Golang 中插入多行数据的示例: + +```go +package main + +import ( + "database/sql" + "strings" + + _ "github.com/go-sql-driver/mysql" +) + +type Player struct { + ID string + Coins int + Goods int +} + +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + strings.Repeat(",(?,?,?)", amount-1) +} +``` + +有关 Golang 的完整示例,可参阅: + +- [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md) +- [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md) + +
+ +
+ +在 Python 中插入多行数据的示例: + +```python +import MySQLdb + +connection = MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="bookshop", + autocommit=True +) +with get_connection(autocommit=True) as connection: + + with connection.cursor() as cur: + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + cur.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player_list[idx:idx + 114]) +``` + +有关 Python 的完整示例,可参阅: + +- [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md) +- [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md) +- [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md) +- [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md) +- [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md) + +
+ +
+ +## 批量插入 + +如果你需要快速地将大量数据导入 TiDB 集群,最好的方式并不是使用 `INSERT` 语句,这并不是最高效的方法,而且需要你自行处理异常等问题。推荐使用 PingCAP 提供的一系列工具进行数据迁移: + +- 数据导出工具:[Dumpling](/dumpling-overview.md)。可以导出 MySQL 或 TiDB 的数据到本地或 Amazon S3 中。 +- 数据导入工具:[TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md)。可以导入 `Dumpling` 导出的数据、CSV 文件,或者 [Amazon Aurora 生成的 Apache Parquet 文件](/migrate-aurora-to-tidb.md)。同时支持在本地盘或 Amazon S3 云盘读取数据。 +- 数据同步工具:[TiDB Data Migration](/dm/dm-overview.md)。可同步 MySQL、MariaDB、Amazon Aurora 数据库到 TiDB 中。且支持分库分表数据库的迁移。 +- 数据备份恢复工具:[Backup & Restore (BR)](/br/backup-and-restore-overview.md)。相对于 `Dumpling`,BR 更适合**_大数据量_**的场景。 + +## 避免热点 + +在设计表时需要考虑是否存在大量插入行为,若有,需在表设计期间对热点进行规避。请查看[创建表 - 选择主键](/develop/dev-guide-create-table.md#选择主键)部分,并遵从[选择主键时应遵守的规则](/develop/dev-guide-create-table.md#选择主键时应遵守的规则)。 + +更多有关热点问题的处理办法,请参考 [TiDB 热点问题处理](/troubleshoot-hot-spot-issues.md)文档。 + +## 主键为 `AUTO_RANDOM` 表插入数据 + +在插入的表主键为 `AUTO_RANDOM` 时,这时默认情况下,不能指定主键。例如 [bookshop](/develop/dev-guide-bookshop-schema-design.md) 数据库中,可以看到 [users 表](/develop/dev-guide-bookshop-schema-design.md#users-表) 的 `id` 字段含有 `AUTO_RANDOM` 属性。 + +此时,不可使用类似以下 SQL 进行插入: + +```sql +INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); +``` + +将会产生错误: + +``` +ERROR 8216 (HY000): Invalid auto random: Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true. +``` + +这是旨在提示你,不建议在插入时手动指定 `AUTO_RANDOM` 的列。这时,你有两种解决办法处理此错误: + +- (推荐) 插入语句中去除此列,使用 TiDB 帮你初始化的 `AUTO_RANDOM` 值。这样符合 `AUTO_RANDOM` 的语义。 + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `bookshop`.`users` (`balance`, `nickname`) VALUES (0.00, 'nicky'); + ``` + +- 如果你确认一定需要指定此列,那么可以使用 [SET 语句](/sql-statements/sql-statement-set-variable.md)通过更改用户变量的方式,允许在插入时,指定 `AUTO_RANDOM` 的列。 + + {{< copyable "sql" >}} + + ```sql + SET @@allow_auto_random_explicit_insert = true; + INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); + ``` + +## 使用 HTAP + +在 TiDB 中,使用 HTAP 能力无需你在插入数据时进行额外操作。不会有任何额外的插入逻辑,由 TiDB 自动进行数据的一致性保证。你只需要在创建表后,[开启列存副本同步](/develop/dev-guide-create-table.md#使用-htap-能力),就可以直接使用列存副本来加速你的查询。 diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-overview.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-overview.md new file mode 100644 index 00000000..ae76dfec --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-overview.md @@ -0,0 +1,49 @@ +--- +title: 开发者手册概览 +summary: 整体叙述了开发者手册,罗列了开发者手册的大致脉络。 +aliases: ['/zh/tidb/dev/developer-guide-overview'] +--- + +# 开发者手册概览 + +本文是为应用程序开发者所编写的,如果你对 TiDB 的内部原理感兴趣,或希望参与到 TiDB 的开发中来,那么可前往阅读 [TiDB Kernel Development Guide](https://pingcap.github.io/tidb-dev-guide/) 来获取更多 TiDB 的相关信息。 + +本手册将展示如何使用 TiDB 来快速构建一个应用,并且阐述使用 TiDB 期间可能出现的场景以及可能会遇到的问题。因此,在阅读此页面之前,建议你先行阅读 [TiDB 数据库快速上手指南](/quick-start-with-tidb.md)。 + +此外,你还可以通过视频的形式学习免费的 [TiDB SQL 开发在线课程](https://pingcap.com/zh/courses-catalog/back-end-developer/?utm_source=docs-cn-dev-guide)。 + +## TiDB 基础 + +在你开始使用 TiDB 之前,你需要了解一些关于 TiDB 数据库的一些重要工作机制: + +- 阅读 [TiDB 事务概览](/transaction-overview.md)来了解 TiDB 的事务运作方式或查看[为应用开发程序员准备的事务说明](/develop/dev-guide-transaction-overview.md)查看应用开发程序员需要了解的事务部分。 +- 学习免费在线课程 [TiDB 架构与特点](https://learn.pingcap.com/learner/course/600003/?utm_source=docs-cn-dev-guide),了解构建 TiDB 分布式数据库集群的核心组件及其概念。 +- 了解[应用程序与 TiDB 交互的方式](#应用程序与-tidb-交互的方式)。 + +## TiDB 事务机制 + +TiDB 支持分布式事务,而且提供[乐观事务](/optimistic-transaction.md)与[悲观事务](/pessimistic-transaction.md)两种事务模式。TiDB 当前版本中默认采用 **悲观事务** 模式,这让你在 TiDB 事务时可以像使用传统的单体数据库 (如: MySQL) 事务一样。 + +你可以使用 [BEGIN](/sql-statements/sql-statement-begin.md) 开启一个事务,或者使用 `BEGIN PESSIMISTIC` 显式的指定开启一个**悲观事务**,使用 `BEGIN OPTIMISTIC` 显式的指定开启一个**乐观事务**。随后,使用 [COMMIT](/sql-statements/sql-statement-commit.md) 提交事务,或使用 [ROLLBACK](/sql-statements/sql-statement-rollback.md) 回滚事务。 + +TiDB 会为你保证 `BEGIN` 开始到 `COMMIT` 或 `ROLLBACK` 结束间的所有语句的原子性,即在这期间的所有语句全部成功,或者全部失败。用以保证你在应用开发时所需的数据一致性。 + +若你不清楚**乐观事务**是什么,请暂时不要使用它。因为使用**乐观事务**的前提是需要应用程序可以正确的处理 `COMMIT` 语句所返回的[所有错误](/error-codes.md)。如果不确定应用程序如何处理,请直接使用**悲观事务**。 + +## 应用程序与 TiDB 交互的方式 + +TiDB 高度兼容 MySQL 协议,TiDB 支持[大多数 MySQL 的语法及特性](/mysql-compatibility.md),因此大部分的 MySQL 的连接库都与 TiDB 兼容。如果你的应用程序框架或语言无 PingCAP 的官方适配,那么建议你使用 MySQL 的客户端库。同时,也有越来越多的三方数据库主动支持 TiDB 的差异特性。 + +因为 TiDB 兼容 MySQL 协议,且兼容 MySQL 语法,因此大多数支持 MySQL 的 ORM 也兼容 TiDB。 + +## 扩展阅读 + +- [快速开始](/develop/dev-guide-build-cluster-in-cloud.md) +- [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) +- [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) +- [数据库模式设计](/develop/dev-guide-schema-design-overview.md) +- [数据写入](/develop/dev-guide-insert-data.md) +- [数据读取](/develop/dev-guide-get-data-from-single-table.md) +- [事务](/develop/dev-guide-transaction-overview.md) +- [优化 SQL 性能](/develop/dev-guide-optimize-sql-overview.md) +- [示例程序](/develop/dev-guide-sample-application-java-spring-boot.md) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-playground-gitpod.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-playground-gitpod.md new file mode 100644 index 00000000..531143c2 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-playground-gitpod.md @@ -0,0 +1,169 @@ +--- +title: Gitpod +--- + + + +# Gitpod + +使用 [Gitpod](https://www.gitpod.io/),只需单击一个按钮或链接即可在浏览器中获得完整的开发环境,并且可以立即编写代码。 + +Gitpod 是一个开源 Kubernetes 应用程序(GitHub 仓库地址 ),适用于可直接编写代码的开发环境,可为云中的每个任务提供全新的自动化开发环境,非常迅速。此外,Gitpod 能够将你的开发环境描述为代码,并直接从你的浏览器或桌面 IDE 启动即时、远程和基于云的开发环境。 + +## 快速开始 + +1. Fork 出 TiDB 应用开发的示例代码仓库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java)。 + +2. 通过浏览器的地址栏,在示例代码仓库的 URL 前加上 `https://gitpod.io/#` 来启动你的 gitpod 工作区。 + + - 例如,`https://gitpod.io/#https://github.com/pingcap-inc/tidb-example-java`。 + + - 支持在 URL 中配置环境变量。例如,`https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java`。 + +3. 使用列出的提供商之一登录并启动工作区,例如,`Github`。 + +## 使用默认的 Gitpod 配置和环境 + +完成[快速开始](#快速开始) 的步骤之后,Gitpod 会需要一段时间来设置你的工作区。 + +以 [Spring Boot Web](/develop/dev-guide-sample-application-java-spring-boot.md) 应用程序为例,通过 URL `https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java` 可以创建一个新工作区。 + +完成后,你将看到如下所示的页面。 + +![playground gitpod workspace init](/media/develop/playground-gitpod-workspace-init.png) + +页面中的这个场景使用了 [TiUP](https://docs.pingcap.com/zh/tidb/stable/tiup-overview) 来搭建一个 TiDB Playground。你可以在终端的左侧查看进度。 + +一旦 TiDB Playground 准备就绪,另一个 `Spring JPA Hibernate` 任务将运行。 你可以在终端的右侧查看进度。 + +完成所有任务后,你可以看到如下所示的页面,并在左侧导航栏的 `REMOTE EXPLORER` 中找到你的端口 `8080` URL(Gitpod 支持基于 URL 的端口转发)。 + +![playground gitpod workspace ready](/media/develop/playground-gitpod-workspace-ready.png) + +你可以按照[该指南](/develop/dev-guide-sample-application-java-spring-boot.md#第-6-步http-请求)测试 API。注意请将 URL `http://localhost:8080` 替换为你在 `REMOTE EXPLORER` 中找到的那个。 + +## 使用自定义的 Gitpod 配置和 Docker 镜像 + +### 自定义 Gitpod 配置 + +在项目的根目录中,参考[示例 .gitpod.yml](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.yml),创建一个 `.gitpod.yml` 文件用于配置 Gitpod 工作空间。 + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +# image: +# file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 自定义 Gitpod Docker 镜像 + +默认情况下,Gitpod 使用名为 Workspace-Full 的标准 Docker 镜像作为工作空间的基础。 基于此默认镜像启动的工作区预装了 Docker、Go、Java、Node.js、C/C++、Python、Ruby、Rust、PHP 以及 Homebrew、Tailscale、Nginx 等工具。 + +你可以提供公共 Docker 镜像或 Dockerfile。 并为你的项目安装所需的任何依赖项。 + +这是一个 Dockerfile 示例:[示例 .gitpod.Dockerfile](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.Dockerfile) + +```dockerfile +FROM gitpod/workspace-java-17 + +RUN sudo apt install mysql-client -y +RUN curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh +``` + +然后需要更新`.gitpod.yml`: + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +image: + # 在这里导入你的 Dockerfile + file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 应用更改 + +完成对 `.gitpod.yml` 文件配置后,请保证最新的代码已在你对应的 GitHub 代码仓库中可用。 + +访问 `https://gitpod.io/#` 以建立新的 Gitpod 工作区,新工作区会应用最新的代码。 + +访问 `https://gitpod.io/workspaces` 以获取所有建立的工作区。 + +## 总结 + +Gitpod 提供了完整的、自动化的、预配置的云原生开发环境。无需本地配置,你可以直接在浏览器中开发、运行、测试代码。 + +![playground gitpod summary](/media/develop/playground-gitpod-summary.png) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-prepared-statement.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-prepared-statement.md new file mode 100644 index 00000000..dada8dfd --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-prepared-statement.md @@ -0,0 +1,233 @@ +--- +title: 预处理语句 +summary: 介绍 TiDB 的预处理语句功能。 +aliases: ['/zh/tidb/dev/prepared-statement'] +--- + +# 预处理语句 + +[预处理语句](/sql-statements/sql-statement-prepare.md)是一种将多个仅有参数不同的 SQL 语句进行模板化的语句,它让 SQL 语句与参数进行了分离。可以用它提升 SQL 语句的: + +- 安全性:因为参数和语句已经分离,所以避免了 [SQL 注入攻击](https://en.wikipedia.org/wiki/SQL_injection)的风险。 +- 性能:因为语句在 TiDB 端被预先解析,后续执行只需要传递参数,节省了完整 SQL 解析、拼接 SQL 语句字符串以及网络传输的代价。 + +在大部分的应用程序中,SQL 语句是可以被枚举的,可以使用有限个 SQL 语句来完成整个应用程序的数据查询,所以使用预处理语句是最佳实践之一。 + +## SQL 语法 + +本节将介绍创建、使用及删除预处理语句的 SQL 语法。 + +### 创建预处理语句 + +```sql +PREPARE {prepared_statement_name} FROM '{prepared_statement_sql}'; +``` + +| 参数 | 描述 | +| :-------------------------: | :------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称 | +| `{prepared_statement_sql}` | 预处理语句 SQL,以英文半角问号做占位符 | + +你可查看 [PREPARE 语句](/sql-statements/sql-statement-prepare.md) 获得更多信息。 + +### 使用预处理语句 + +预处理语句仅可使用用户变量作为参数,因此,需先使用 [SET 语句](/sql-statements/sql-statement-set-variable.md) 设置变量后,供 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 调用预处理语句。 + +```sql +SET @{parameter_name} = {parameter_value}; +EXECUTE {prepared_statement_name} USING @{parameter_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{parameter_name}` | 用户参数名 | +| `{parameter_value}` | 用户参数值 | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 获得更多信息。 + +### 删除预处理语句 + +```sql +DEALLOCATE PREPARE {prepared_statement_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [DEALLOCATE 语句](/sql-statements/sql-statement-deallocate.md) 获得更多信息。 + +## 例子 + +本节以使用预处理语句,完成查询数据和插入数据两个场景的示例。 + +### 查询示例 + +例如,需要查询 [Bookshop 应用](/develop/dev-guide-bookshop-schema-design.md#books-表) 中,`id` 为 1 的书籍信息。 + + + +
+ +使用 SQL 查询示例: + +```sql +PREPARE `books_query` FROM 'SELECT * FROM `books` WHERE `id` = ?'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.01 sec) +``` + +```sql +SET @id = 1; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_query` USING @id; +``` + +运行结果为: + +``` ++---------+---------------------------------+--------+---------------------+-------+--------+ +| id | title | type | published_at | stock | price | ++---------+---------------------------------+--------+---------------------+-------+--------+ +| 1 | The Adventures of Pierce Wehner | Comics | 1904-06-06 20:46:25 | 586 | 411.66 | ++---------+---------------------------------+--------+---------------------+-------+--------+ +1 row in set (0.05 sec) +``` + +
+ +
+ +使用 Java 查询示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM `books` WHERE `id` = ?"); + preparedStatement.setLong(1, 1); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.println("No books in the table with id 1"); + } else { + // got book's info, which id is 1 + System.out.println(res.getLong("id")); + System.out.println(res.getString("title")); + System.out.println(res.getString("type")); + } +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +
+ +
+ +### 插入示例 + +还是使用 [books 表](/develop/dev-guide-bookshop-schema-design.md#books-表) 为例,需要插入一个 `title` 为 `TiDB Developer Guide`, `type` 为 `Science & Technology`, `stock` 为 `100`, `price` 为 `0.0`, `published_at` 为 `插入的当前时间` 的书籍信息。需要注意的是,`books` 表的主键包含 `AUTO_RANDOM` 属性,无需指定它。如果你对插入数据还不了解,可以在[插入数据](/develop/dev-guide-insert-data.md)一节了解更多数据插入的相关信息。 + + + +
+ +使用 SQL 插入数据示例如下: + +```sql +PREPARE `books_insert` FROM 'INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.03 sec) +``` + +```sql +SET @title = 'TiDB Developer Guide'; +SET @type = 'Science & Technology'; +SET @stock = 100; +SET @price = 0.0; +SET @published_at = NOW(); +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_insert` USING @title, @type, @stock, @price, @published_at; +``` + +运行结果为: + +``` +Query OK, 1 row affected (0.03 sec) +``` + +
+ +
+ +使用 Java 插入数据示例如下: + +```java +try (Connection connection = ds.getConnection()) { + String sql = "INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);"; + PreparedStatement preparedStatement = connection.prepareStatement(sql); + + preparedStatement.setString(1, "TiDB Developer Guide"); + preparedStatement.setString(2, "Science & Technology"); + preparedStatement.setInt(3, 100); + preparedStatement.setBigDecimal(4, new BigDecimal("0.0")); + preparedStatement.setTimestamp(5, new Timestamp(Calendar.getInstance().getTimeInMillis())); + + preparedStatement.executeUpdate(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +可以看到,JDBC 帮你管控了预处理语句的生命周期,而无需你在应用程序里手动使用预处理语句的创建、使用、删除等。但值得注意的是,因为 TiDB 兼容 MySQL 协议,在客户端使用 MySQL JDBC Driver 的过程中,其默认配置并非开启 **_服务端_** 的预处理语句选项,而是使用客户端的预处理语句。你需要关注以下配置项,来获得在 JDBC 下 TiDB 服务端预处理语句的支持,及在你的使用场景下的最佳配置: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :---------------------: | :-----------------------------------: | :--------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +你也可以查看[插入行](/develop/dev-guide-insert-data.md#插入行)一章,来查看是否需要在插入数据场景下更改其他 JDBC 的参数。 + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-gorm.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-gorm.md new file mode 100644 index 00000000..49f8d1c7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-gorm.md @@ -0,0 +1,291 @@ +--- +title: TiDB 和 GORM 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 GORM 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 GORM 的简单 CRUD 应用程序 + +[GORM](https://gorm.io/) 为当前比较流行的 Golang 开源 ORM 之一。 + +本文档将展示如何使用 TiDB 和 GORM 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +此处将以 GORM v1.23.5 版本进行说明。 + +封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "gorm.io/gorm" +) + +// TiDBGormBegin start a TiDB and Gorm transaction as a block. If no error is returned, the transaction will be committed. Otherwise, the transaction will be rolled back. +func TiDBGormBegin(db *gorm.DB, pessimistic bool, fc func(tx *gorm.DB) error) (err error) { + session := db.Session(&gorm.Session{}) + if session.Error != nil { + return session.Error + } + + if pessimistic { + session = session.Exec("set @@tidb_txn_mode=pessimistic") + } else { + session = session.Exec("set @@tidb_txn_mode=optimistic") + } + + if session.Error != nil { + return session.Error + } + return session.Transaction(fc) +} +``` + +进入目录 `gorm`: + +```shell +cd gorm +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── go.mod +├── go.sum +└── gorm.go +``` + +其中,`gorm.go` 是 `gorm` 这个示例程序的主体。使用 gorm 时,相较于 go-sql-driver/mysql,gorm 屏蔽了创建数据库连接时,不同数据库差异的细节,其还封装了大量的操作,如 AutoMigrate、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 是数据结构体,为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。相较于 go-sql-driver/mysql,gorm 的 `Player` 数据结构体为了给 gorm 提供更多的信息,加入了形如 `gorm:"primaryKey;type:VARCHAR(36);column:id"` 的注解,用来指示映射关系。 + +```go + +package main + +import ( + "fmt" + "math/rand" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" + + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/logger" +) + +type Player struct { + ID string `gorm:"primaryKey;type:VARCHAR(36);column:id"` + Coins int `gorm:"column:coins"` + Goods int `gorm:"column:goods"` +} + +func (*Player) TableName() string { + return "player" +} + +func main() { + // 1. Configure the example database connection. + db := createDB() + + // AutoMigrate for player table + db.AutoMigrate(&Player{}) + + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) +} + +func tradeExample(db *gorm.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := &Player{ID: "1", Coins: 100} + player2 := &Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player1) + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player2) + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func simpleExample(db *gorm.DB) { + // Create a player, who has a coin and a goods. + if err := db.Clauses(clause.OnConflict{UpdateAll: true}). + Create(&Player{ID: "test", Coins: 1, Goods: 1}).Error; err != nil { + panic(err) + } + + // Get a player. + var testPlayer Player + db.Find(&testPlayer, "id = ?", "test") + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + bulkInsertPlayers := make([]Player, 1919, 1919) + total, batch := 1919, 114 + for i := 0; i < total; i++ { + bulkInsertPlayers[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + if err := db.Session(&gorm.Session{Logger: db.Logger.LogMode(logger.Error)}). + CreateInBatches(bulkInsertPlayers, batch).Error; err != nil { + panic(err) + } + + // Count players amount. + playersCount := int64(0) + db.Model(&Player{}).Count(&playersCount) + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers := make([]Player, 3, 3) + db.Limit(3).Find(&threePlayers) + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func createDB() *gorm.DB { + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + panic(err) + } + + return db +} + +func buyGoods(db *gorm.DB, sellID, buyID string, amount, price int) error { + return util.TiDBGormBegin(db, true, func(tx *gorm.DB) error { + var sellPlayer, buyPlayer Player + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&sellPlayer, "id = ?", sellID).Error; err != nil { + return err + } + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&buyPlayer, "id = ?", buyID).Error; err != nil { + return err + } + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateSQL := "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + if err := tx.Exec(updateSQL, -amount, price, sellID).Error; err != nil { + return err + } + + if err := tx.Exec(updateSQL, amount, -price, buyID).Error; err != nil { + return err + } + + fmt.Println("\n[buyGoods]:\n 'trade success'") + return nil + }) +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `gorm.go` 内 `dsn` 参数值: + +```go +dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `mysql.RegisterTLSConfig` 和 `dsn` 更改为: + +```go +mysql.RegisterTLSConfig("register-tidb-tls", &tls.Config { + MinVersion: tls.VersionTLS12, + ServerName: "xxx.tidbcloud.com", +}) + +dsn := "2aEp24QWEDLqRFs.root:123456@tcp(xxx.tidbcloud.com:4000)/test?charset=utf8mb4&tls=register-tidb-tls" +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `go build -o bin/gorm-example` +make run # this command executes `./bin/gorm-example` +``` + +或者你也可以直接使用原生的命令: + +```shell +go build -o bin/gorm-example +./bin/gorm-example +``` + +再或者直接运行 `make all` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[GORM 预期输出](https://github.com/pingcap-inc/tidb-example-golang/blob/main/Expected-Output.md#gorm) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-sql-driver.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-sql-driver.md new file mode 100644 index 00000000..2adddfc7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-golang-sql-driver.md @@ -0,0 +1,537 @@ +--- +title: TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-golang'] +--- + + + + +# TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +进入目录 `sqldriver`: + +```shell +cd sqldriver +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── dao.go +├── go.mod +├── go.sum +├── sql +│   └── dbinit.sql +├── sql.go +└── sqldriver.go +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`sqldriver.go` 是 `sqldriver` 这个示例程序的主体。与 GORM 对比,go-sql-driver/mysql 的实现方式并非最优体验。你需要自行编写错误处理逻辑,手动关闭 `*sql.Rows`,并且代码无法简单复用。这会使你的代码有些冗余。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `db, err := sql.Open("mysql", dsn)`,以此连接到 TiDB。并在其后,调用 `dao.go` 中的一系列方法,用来管理数据对象,进行增删改查等操作。 + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/go-sql-driver/mysql" +) + +func main() { + // 1. Configure the example database connection. + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + openDB("mysql", dsn, func(db *sql.DB) { + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) + }) +} + +func simpleExample(db *sql.DB) { + // Create a player, who has a coin and a goods. + err := createPlayer(db, Player{ID: "test", Coins: 1, Goods: 1}) + if err != nil { + panic(err) + } + + // Get a player. + testPlayer, err := getPlayer(db, "test") + if err != nil { + panic(err) + } + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + + err = bulkInsertPlayers(db, randomPlayers(1919), 114) + if err != nil { + panic(err) + } + + // Count players amount. + playersCount, err := getCount(db) + if err != nil { + panic(err) + } + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers, err := getPlayerByLimit(db, 3) + if err != nil { + panic(err) + } + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func tradeExample(db *sql.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := Player{ID: "1", Coins: 100} + player2 := Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + if err := createPlayer(db, player1); err != nil { + panic(err) + } + if err := createPlayer(db, player2); err != nil { + panic(err) + } + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func openDB(driverName, dataSourceName string, runnable func(db *sql.DB)) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + defer db.Close() + + runnable(db) +} +``` + +随后,封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "context" + "database/sql" +) + +type TiDBSqlTx struct { + *sql.Tx + conn *sql.Conn + pessimistic bool +} + +func TiDBSqlBegin(db *sql.DB, pessimistic bool) (*TiDBSqlTx, error) { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return nil, err + } + if pessimistic { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "pessimistic") + } else { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "optimistic") + } + if err != nil { + return nil, err + } + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &TiDBSqlTx{ + conn: conn, + Tx: tx, + pessimistic: pessimistic, + }, nil +} + +func (tx *TiDBSqlTx) Commit() error { + defer tx.conn.Close() + return tx.Tx.Commit() +} + +func (tx *TiDBSqlTx) Rollback() error { + defer tx.conn.Close() + return tx.Tx.Rollback() +} +``` + +在 `dao.go` 中定义一系列数据的操作方法,用来对提供数据的写入能力。这也是本例子中核心部分。 + +```go +package main + +import ( + "database/sql" + "fmt" + "math/rand" + "strings" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" +) + +type Player struct { + ID string + Coins int + Goods int +} + +// createPlayer create a player +func createPlayer(db *sql.DB, player Player) error { + _, err := db.Exec(CreatePlayerSQL, player.ID, player.Coins, player.Goods) + return err +} + +// getPlayer get a player +func getPlayer(db *sql.DB, id string) (Player, error) { + var player Player + + rows, err := db.Query(GetPlayerSQL, id) + if err != nil { + return player, err + } + defer rows.Close() + + if rows.Next() { + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + return player, nil + } else { + return player, err + } + } + + return player, fmt.Errorf("can not found player") +} + +// getPlayerByLimit get players by limit +func getPlayerByLimit(db *sql.DB, limit int) ([]Player, error) { + var players []Player + + rows, err := db.Query(GetPlayerByLimitSQL, limit) + if err != nil { + return players, err + } + defer rows.Close() + + for rows.Next() { + player := Player{} + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + players = append(players, player) + } else { + return players, err + } + } + + return players, nil +} + +// bulk-insert players +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func getCount(db *sql.DB) (int, error) { + count := 0 + + rows, err := db.Query(GetCountSQL) + if err != nil { + return count, err + } + + defer rows.Close() + + if rows.Next() { + if err := rows.Scan(&count); err != nil { + return count, err + } + } + + return count, nil +} + +func buyGoods(db *sql.DB, sellID, buyID string, amount, price int) error { + var sellPlayer, buyPlayer Player + + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + buyExec := func() error { + stmt, err := tx.Prepare(GetPlayerWithLockSQL) + if err != nil { + return err + } + defer stmt.Close() + + sellRows, err := stmt.Query(sellID) + if err != nil { + return err + } + defer sellRows.Close() + + if sellRows.Next() { + if err := sellRows.Scan(&sellPlayer.ID, &sellPlayer.Coins, &sellPlayer.Goods); err != nil { + return err + } + } + sellRows.Close() + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + buyRows, err := stmt.Query(buyID) + if err != nil { + return err + } + defer buyRows.Close() + + if buyRows.Next() { + if err := buyRows.Scan(&buyPlayer.ID, &buyPlayer.Coins, &buyPlayer.Goods); err != nil { + return err + } + } + buyRows.Close() + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateStmt, err := tx.Prepare(UpdatePlayerSQL) + if err != nil { + return err + } + defer updateStmt.Close() + + if _, err := updateStmt.Exec(-amount, price, sellID); err != nil { + return err + } + + if _, err := updateStmt.Exec(amount, -price, buyID); err != nil { + return err + } + + return nil + } + + err = buyExec() + if err == nil { + fmt.Println("\n[buyGoods]:\n 'trade success'") + tx.Commit() + } else { + tx.Rollback() + } + + return err +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return CreatePlayerSQL + strings.Repeat(",(?,?,?)", amount-1) +} + +func randomPlayers(amount int) []Player { + players := make([]Player, amount, amount) + for i := 0; i < amount; i++ { + players[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + return players +} +``` + +`sql.go` 中存放了 SQL 语句的常量。 + +```go +package main + +const ( + CreatePlayerSQL = "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + GetPlayerSQL = "SELECT id, coins, goods FROM player WHERE id = ?" + GetCountSQL = "SELECT count(*) FROM player" + GetPlayerWithLockSQL = GetPlayerSQL + " FOR UPDATE" + UpdatePlayerSQL = "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + GetPlayerByLimitSQL = "SELECT id, coins, goods FROM player LIMIT ?" +) +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 go-sql-driver/mysql 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `sqldriver` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 Hibernate 的简单 CRUD 应用程序 + +[Hibernate](https://hibernate.org/) 是当前比较流行的开源 Java 应用持久层框架,且 Hibernate 在版本 `6.0.0.Beta2` 及以后支持了 TiDB 方言,完美适配了 TiDB 的特性。 + +本文档将展示如何使用 TiDB 和 Java 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [Hibernate](https://hibernate.org/orm/) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +此处将以 `6.0.0.Beta2` 版本进行说明。 + +进入目录 `plain-java-hibernate`: + +```shell +cd plain-java-hibernate +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-hibernate.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── HibernateExample.java + └── resources + └── hibernate.cfg.xml +``` + +其中,`hibernate.cfg.xml` 为 Hibernate 配置文件,定义了: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +`HibernateExample.java` 是 `plain-java-hibernate` 这个示例程序的主体。使用 Hibernate 时,相较于 JDBC,这里仅需写入配置文件地址,Hibernate 屏蔽了创建数据库连接时,不同数据库差异的细节。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。其中定义了一系列数据的操作方法,用来提供数据的写入能力。相较于 JDBC,Hibernate 封装了大量的操作,如对象映射、基本对象的 CRUD 等,极大地简化了代码量。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。相较于 JDBC,Hibernate 的 `PlayerBean` 实体类为了给 Hibernate 提供更多的信息,加入了注解,用来指示映射关系。 + +```java +package com.pingcap; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; +import org.hibernate.JDBCException; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; +import org.hibernate.cfg.Configuration; +import org.hibernate.query.NativeQuery; +import org.hibernate.query.Query; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +@Entity +@Table(name = "player_hibernate") +class PlayerBean { + @Id + private String id; + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } +} + +/** + * Main class for the basic Hibernate example. + **/ +public class HibernateExample +{ + public static class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we don't have to duplicate it in + // various places. + public Object runTransaction(Session session, Function fn) { + Object resultObject = null; + + Transaction txn = session.beginTransaction(); + try { + resultObject = fn.apply(session); + txn.commit(); + System.out.println("APP: COMMIT;"); + } catch (JDBCException e) { + System.out.println("APP: ROLLBACK BY JDBC ERROR;"); + txn.rollback(); + } catch (NotEnoughException e) { + System.out.printf("APP: ROLLBACK BY LOGIC; %s", e.getMessage()); + txn.rollback(); + } + return resultObject; + } + + public Function createPlayers(List players) throws JDBCException { + return session -> { + Integer addedPlayerAmount = 0; + for (PlayerBean player: players) { + session.persist(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) throws JDBCException { + return session -> { + PlayerBean sellPlayer = session.get(PlayerBean.class, sellId); + PlayerBean buyPlayer = session.get(PlayerBean.class, buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + session.persist(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + session.persist(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return 0; + }; + } + + public Function getPlayerByID(String id) throws JDBCException { + return session -> session.get(PlayerBean.class, id); + } + + public Function printPlayers(Integer limit) throws JDBCException { + return session -> { + NativeQuery limitQuery = session.createNativeQuery("SELECT * FROM player_hibernate LIMIT :limit", PlayerBean.class); + limitQuery.setParameter("limit", limit); + List players = limitQuery.getResultList(); + + for (PlayerBean player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() throws JDBCException { + return session -> { + Query countQuery = session.createQuery("SELECT count(player_hibernate) FROM PlayerBean player_hibernate", Long.class); + return countQuery.getSingleResult(); + }; + } + } + + public static void main(String[] args) { + // 1. Create a SessionFactory based on our hibernate.cfg.xml configuration + // file, which defines how to connect to the database. + SessionFactory sessionFactory + = new Configuration() + .configure("hibernate.cfg.xml") + .addAnnotatedClass(PlayerBean.class) + .buildSessionFactory(); + + try (Session session = sessionFactory.openSession()) { + // 2. And then, create DAO to manager your data. + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(session, playerDAO.createPlayers(Collections.singletonList( + new PlayerBean("test", 1, 1)))); + + // Get a player. + PlayerBean testPlayer = (PlayerBean)playerDAO.runTransaction(session, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Long count = (Long)playerDAO.runTransaction(session, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(session, playerDAO.printPlayers(3)); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } finally { + sessionFactory.close(); + } + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `hibernate.cfg.xml` 内关于 `hibernate.connection.url`、`hibernate.connection.username`、`hibernate.connection.password` 的参数: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件更改为: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + 2aEp24QWEDLqRFs.root + 123456 + false + + + create-drop + + + true + true + + +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mvn clean package +java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[Hibernate 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-hibernate) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-jdbc.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-jdbc.md new file mode 100644 index 00000000..250943e0 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-jdbc.md @@ -0,0 +1,576 @@ +--- +title: TiDB 和 JDBC 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 JDBC 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 JDBC 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 JDBC 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +进入目录 `plain-java-jdbc`: + +```shell +cd plain-java-jdbc +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-jdbc.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── JDBCExample.java + └── resources + └── dbinit.sql +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`JDBCExample.java` 是 `plain-java-jdbc` 这个示例程序的主体。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `MysqlDataSource`,以此连接到 TiDB。并在其后,初始化 `PlayerDAO`,用来管理数据对象,进行增删改查等操作。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用来对提供数据的写入能力。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap; + +import com.mysql.cj.jdbc.MysqlDataSource; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + +/** + * Main class for the basic JDBC example. + **/ +public class JDBCExample +{ + public static class PlayerBean { + private String id; + private Integer coins; + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } + } + + /** + * Data access object used by 'ExampleDataSource'. + * Example for CURD and bulk insert. + */ + public static class PlayerDAO { + private final MysqlDataSource ds; + private final Random rand = new Random(); + + PlayerDAO(MysqlDataSource ds) { + this.ds = ds; + } + + /** + * Create players by passing in a List of PlayerBean. + * + * @param players Will create players list + * @return The number of create accounts + */ + public int createPlayers(List players){ + int rows = 0; + + Connection connection = null; + PreparedStatement preparedStatement = null; + try { + connection = ds.getConnection(); + preparedStatement = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)"); + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + + return -1; + } + + try { + for (PlayerBean player : players) { + preparedStatement.setString(1, player.getId()); + preparedStatement.setInt(2, player.getCoins()); + preparedStatement.setInt(3, player.getGoods()); + + preparedStatement.execute(); + rows += preparedStatement.getUpdateCount(); + } + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + } finally { + try { + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + System.out.printf("\n[createPlayers]:\n '%s'\n", preparedStatement); + return rows; + } + + /** + * Buy goods and transfer funds between one player and another in one transaction. + * @param sellId Sell player id. + * @param buyId Buy player id. + * @param amount Goods amount, if sell player has not enough goods, the trade will break. + * @param price Price should pay, if buy player has not enough coins, the trade will break. + * + * @return The number of effected players. + */ + public int buyGoods(String sellId, String buyId, Integer amount, Integer price) { + int effectPlayers = 0; + + Connection connection = null; + try { + connection = ds.getConnection(); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + return effectPlayers; + } + + try { + connection.setAutoCommit(false); + + PreparedStatement playerQuery = connection.prepareStatement("SELECT * FROM player WHERE id=? OR id=? FOR UPDATE"); + playerQuery.setString(1, sellId); + playerQuery.setString(2, buyId); + playerQuery.execute(); + + PlayerBean sellPlayer = null; + PlayerBean buyPlayer = null; + + ResultSet playerQueryResultSet = playerQuery.getResultSet(); + while (playerQueryResultSet.next()) { + PlayerBean player = new PlayerBean( + playerQueryResultSet.getString("id"), + playerQueryResultSet.getInt("coins"), + playerQueryResultSet.getInt("goods") + ); + + System.out.println("\n[buyGoods]:\n 'check goods and coins enough'"); + System.out.println(player); + + if (sellId.equals(player.getId())) { + sellPlayer = player; + } else { + buyPlayer = player; + } + } + + if (sellPlayer == null || buyPlayer == null) { + throw new SQLException("player not exist."); + } + + if (sellPlayer.getGoods().compareTo(amount) < 0) { + throw new SQLException(String.format("sell player %s goods not enough.", sellId)); + } + + if (buyPlayer.getCoins().compareTo(price) < 0) { + throw new SQLException(String.format("buy player %s coins not enough.", buyId)); + } + + PreparedStatement transfer = connection.prepareStatement("UPDATE player set goods = goods + ?, coins = coins + ? WHERE id=?"); + transfer.setInt(1, -amount); + transfer.setInt(2, price); + transfer.setString(3, sellId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + transfer.setInt(1, amount); + transfer.setInt(2, -price); + transfer.setString(3, buyId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + connection.commit(); + + System.out.println("\n[buyGoods]:\n 'trade success'"); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + + try { + System.out.println("[buyGoods] Rollback"); + + connection.rollback(); + } catch (SQLException ex) { + // do nothing + } + } finally { + try { + connection.close(); + } catch (SQLException e) { + // do nothing + } + } + + return effectPlayers; + } + + /** + * Get the player info by id. + * + * @param id Player id. + * @return The player of this id. + */ + public PlayerBean getPlayer(String id) { + PlayerBean player = null; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player WHERE id = ?"); + preparedStatement.setString(1, id); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.printf("No players in the table with id %s", id); + } else { + player = new PlayerBean(res.getString("id"), res.getInt("coins"), res.getInt("goods")); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.getPlayer ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return player; + } + + /** + * Insert randomized account data (id, coins, goods) using the JDBC fast path for + * bulk inserts. The fastest way to get data into TiDB is using the + * TiDB Lightning(https://docs.pingcap.com/tidb/stable/tidb-lightning-overview). + * However, if you must bulk insert from the application using INSERT SQL, the best + * option is the method shown here. It will require the following: + * + * Add `rewriteBatchedStatements=true` to your JDBC connection settings. + * Setting rewriteBatchedStatements to true now causes CallableStatements + * with batched arguments to be re-written in the form "CALL (...); CALL (...); ..." + * to send the batch in as few client/server round trips as possible. + * https://dev.mysql.com/doc/relnotes/connector-j/5.1/en/news-5-1-3.html + * + * You can see the `rewriteBatchedStatements` param effect logic at + * implement function: `com.mysql.cj.jdbc.StatementImpl.executeBatchUsingMultiQueries` + * + * @param total Add players amount. + * @param batchSize Bulk insert size for per batch. + * + * @return The number of new accounts inserted. + */ + public int bulkInsertRandomPlayers(Integer total, Integer batchSize) { + int totalNewPlayers = 0; + + try (Connection connection = ds.getConnection()) { + // We're managing the commit lifecycle ourselves, so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) { + for (int i=0; i<=(total/batchSize);i++) { + for (int j=0; j %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewPlayers; + } + + + /** + * Print a subset of players from the data store by limit. + * + * @param limit Print max size. + */ + public void printPlayers(Integer limit) { + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player LIMIT ?"); + preparedStatement.setInt(1, limit); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + while (!res.next()) { + PlayerBean player = new PlayerBean(res.getString("id"), + res.getInt("coins"), res.getInt("goods")); + System.out.println("\n[printPlayers]:\n" + player); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.printPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + + /** + * Count players from the data store. + * + * @return All players count + */ + public int countPlayers() { + int count = 0; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT count(*) FROM player"); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(res.next()) { + count = res.getInt(1); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.countPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return count; + } + } + + public static void main(String[] args) { + // 1. Configure the example database connection. + + // 1.1 Create a mysql data source instance. + MysqlDataSource mysqlDataSource = new MysqlDataSource(); + + // 1.2 Set server name, port, database name, username and password. + mysqlDataSource.setServerName("localhost"); + mysqlDataSource.setPortNumber(4000); + mysqlDataSource.setDatabaseName("test"); + mysqlDataSource.setUser("root"); + mysqlDataSource.setPassword(""); + + // Or you can use jdbc string instead. + // mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/test?user={user}&password={password}"); + + // 2. And then, create DAO to manager your data. + PlayerDAO dao = new PlayerDAO(mysqlDataSource); + + // 3. Run some simple examples. + + // Create a player, who has a coin and a goods.. + dao.createPlayers(Collections.singletonList(new PlayerBean("test", 1, 1))); + + // Get a player. + PlayerBean testPlayer = dao.getPlayer("test"); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + int addedCount = dao.bulkInsertRandomPlayers(1919, 114); + System.out.printf("PlayerDAO.bulkInsertRandomPlayers:\n => %d total inserted players\n", addedCount); + + // Count players amount. + int count = dao.countPlayers(); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + dao.printPlayers(3); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + addedCount = dao.createPlayers(Arrays.asList(player1, player2)); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + int updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 10, 500); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 2, 100); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:JDBC 表初始化 + +使用 JDBC 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-jdbc` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 MyBatis 的简单 CRUD 应用程序 + +[Mybatis](https://mybatis.org/mybatis-3/index.html) 是当前比较流行的开源 Java 应用持久层框架。 + +本文档将展示如何使用 TiDB 和 MyBatis 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [MyBatis](https://mybatis.org/mybatis-3/index.html) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +本文将以 Maven 插件的方式使用 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 生成部分持久层代码。 + +进入目录 `plain-java-mybatis`: + +```shell +cd plain-java-mybatis +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── pom.xml +└── src + └── main + ├── java + │   └── com + │   └── pingcap + │   ├── MybatisExample.java + │   ├── dao + │   │   └── PlayerDAO.java + │   └── model + │   ├── Player.java + │   ├── PlayerMapper.java + │   └── PlayerMapperEx.java + └── resources + ├── dbinit.sql + ├── log4j.properties + ├── mapper + │   ├── PlayerMapper.xml + │   └── PlayerMapperEx.xml + ├── mybatis-config.xml + └── mybatis-generator.xml +``` + +其中,自动生成的文件有: + +- `src/main/java/com/pingcap/model/Player.java`:Player 实体类文件 +- `src/main/java/com/pingcap/model/PlayerMapper.java`:Player Mapper 的接口文件 +- `src/main/resources/mapper/PlayerMapper.xml`:Player Mapper 的 XML 映射,它是 MyBatis 用于生成 Player Mapper 接口的实现类的配置 + +这些文件的生成策略被写在了 `mybatis-generator.xml` 配置文件内,它是 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 的配置文件,下面配置文件中添加了使用方法的说明: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +``` + +`mybatis-generator.xml` 在 `pom.xml` 中,以 `mybatis-generator-maven-plugin` 插件配置的方式被引入: + +```xml + + org.mybatis.generator + mybatis-generator-maven-plugin + 1.4.1 + + src/main/resources/mybatis-generator.xml + true + true + + + + + + mysql + mysql-connector-java + 5.1.49 + + + +``` + +在 Maven 插件内引入后,可删除旧的生成文件后,通过命令 `mvn mybatis-generate` 生成新的文件。或者你也可以使用已经编写好的 `make` 命令,通过 `make gen` 来同时删除旧文件,并生成新文件。 + +> **注意:** +> +> `mybatis-generator.xml` 中的属性 `configuration.overwrite` 仅可控制新生成的 Java 代码文件使用覆盖方式被写入,但 XML 映射文件仍会以追加方式写入。因此,推荐在 MyBatis Generator 生成新的文件前,先删除掉旧的文件。 + +`Player.java` 是使用 MyBatis Generator 生成出的数据实体类文件,为数据库表在程序内的映射。`Player` 类的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap.model; + +public class Player { + private String id; + + private Integer coins; + + private Integer goods; + + public Player(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public Player() { + super(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +`PlayerMapper.java` 是使用 MyBatis Generator 生成出的映射接口文件,它仅规定了接口,接口的实现类是由 MyBatis 来通过 XML 或注解自动生成的: + +```java +package com.pingcap.model; + +import com.pingcap.model.Player; + +public interface PlayerMapper { + int deleteByPrimaryKey(String id); + + int insert(Player row); + + int insertSelective(Player row); + + Player selectByPrimaryKey(String id); + + int updateByPrimaryKeySelective(Player row); + + int updateByPrimaryKey(Player row); +} +``` + +`PlayerMapper.xml` 是使用 MyBatis Generator 生成出的映射 XML 文件,MyBatis 将使用这个文件自动生成 `PlayerMapper` 接口的实现类: + +```xml + + + + + + + + + + + + id, coins, goods + + + + delete from player + where id = #{id,jdbcType=VARCHAR} + + + insert into player (id, coins, goods + ) + values (#{id,jdbcType=VARCHAR}, #{coins,jdbcType=INTEGER}, #{goods,jdbcType=INTEGER} + ) + + + insert into player + + + id, + + + coins, + + + goods, + + + + + #{id,jdbcType=VARCHAR}, + + + #{coins,jdbcType=INTEGER}, + + + #{goods,jdbcType=INTEGER}, + + + + + update player + + + coins = #{coins,jdbcType=INTEGER}, + + + goods = #{goods,jdbcType=INTEGER}, + + + where id = #{id,jdbcType=VARCHAR} + + + update player + set coins = #{coins,jdbcType=INTEGER}, + goods = #{goods,jdbcType=INTEGER} + where id = #{id,jdbcType=VARCHAR} + + +``` + +由于 MyBatis Generator 需要逆向生成源码,因此,数据库中需先行有此表结构,可使用 `dbinit.sql` 生成表结构: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +额外拆分接口 `PlayerMapperEx` 继承 `PlayerMapper`,并且编写与之匹配的 `PlayerMapperEx.xml`。避免直接更改 `PlayerMapper.java` 和 `PlayerMapper.xml`。这是为了规避 MyBatis Generator 的反复生成,影响到自行编写的代码。 + +在 `PlayerMapperEx.java` 中定义自行增加的接口: + +```java +package com.pingcap.model; + +import java.util.List; + +public interface PlayerMapperEx extends PlayerMapper { + Player selectByPrimaryKeyWithLock(String id); + + List selectByLimit(Integer limit); + + Integer count(); +} +``` + +在 `PlayerMapperEx.xml` 中定义映射规则: + +```xml + + + + + + + + + + + + id, coins, goods + + + + + + + + + +``` + +`PlayerDAO.java` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用于数据的写入。 + +```java +package com.pingcap.dao; + +import com.pingcap.model.Player; +import com.pingcap.model.PlayerMapperEx; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; + +import java.util.List; +import java.util.function.Function; + +public class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic, so we don't have to duplicate it in + // various places. + public Object runTransaction(SqlSessionFactory sessionFactory, Function fn) { + Object resultObject = null; + SqlSession session = null; + + try { + // open a session with autoCommit is false + session = sessionFactory.openSession(false); + + // get player mapper + PlayerMapperEx playerMapperEx = session.getMapper(PlayerMapperEx.class); + + resultObject = fn.apply(playerMapperEx); + session.commit(); + System.out.println("APP: COMMIT;"); + } catch (Exception e) { + if (e instanceof NotEnoughException) { + System.out.printf("APP: ROLLBACK BY LOGIC; \n%s\n", e.getMessage()); + } else { + System.out.printf("APP: ROLLBACK BY ERROR; \n%s\n", e.getMessage()); + } + + if (session != null) { + session.rollback(); + } + } finally { + if (session != null) { + session.close(); + } + } + + return resultObject; + } + + public Function createPlayers(List players) { + return playerMapperEx -> { + Integer addedPlayerAmount = 0; + for (Player player: players) { + playerMapperEx.insert(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) { + return playerMapperEx -> { + Player sellPlayer = playerMapperEx.selectByPrimaryKeyWithLock(sellId); + Player buyPlayer = playerMapperEx.selectByPrimaryKeyWithLock(buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + int affectRows = 0; + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + affectRows += playerMapperEx.updateByPrimaryKey(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + affectRows += playerMapperEx.updateByPrimaryKey(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return affectRows; + }; + } + + public Function getPlayerByID(String id) { + return playerMapperEx -> playerMapperEx.selectByPrimaryKey(id); + } + + public Function printPlayers(Integer limit) { + return playerMapperEx -> { + List players = playerMapperEx.selectByLimit(limit); + + for (Player player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() { + return PlayerMapperEx::count; + } +} +``` + +`MybatisExample` 是 `plain-java-mybatis` 这个示例程序的主类。其中定义了入口函数: + +```java +package com.pingcap; + +import com.pingcap.dao.PlayerDAO; +import com.pingcap.model.Player; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; + +public class MybatisExample { + public static void main( String[] args ) throws IOException { + // 1. Create a SqlSessionFactory based on our mybatis-config.xml configuration + // file, which defines how to connect to the database. + InputStream inputStream = Resources.getResourceAsStream("mybatis-config.xml"); + SqlSessionFactory sessionFactory = new SqlSessionFactoryBuilder().build(inputStream); + + // 2. And then, create DAO to manager your data + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(sessionFactory, playerDAO.createPlayers( + Collections.singletonList(new Player("test", 1, 1)))); + + // Get a player. + Player testPlayer = (Player)playerDAO.runTransaction(sessionFactory, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Integer count = (Integer)playerDAO.runTransaction(sessionFactory, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(sessionFactory, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + Player player1 = new Player("1", 100, 0); + Player player2 = new Player("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 MyBatis 时,需手动初始化数据库表。若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-mybatis` 目录下通过 `make prepare` 运行: + +```shell +make prepare +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +``` + +若你不使用本地集群,或未安装 `mysql-client`,请直接登录你的集群,并运行 `src/main/resources/dbinit.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `mybatis-config.xml` 内关于 `dataSource.url`、`dataSource.username`、`dataSource.password` 的参数: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件中 `dataSource` 节点内更改为: + +```xml + + + + + ... + + + + + + + + ... + + +``` + +### 第 3 步第 3 部分:运行 + +你可以分别运行 `make prepare`, `make gen`, `make build` 和 `make run` 以运行此代码: + +```shell +make prepare +# this command executes : +# - `mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql` +# - `mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player"` + +make gen +# this command executes : +# - `rm -f src/main/java/com/pingcap/model/Player.java` +# - `rm -f src/main/java/com/pingcap/model/PlayerMapper.java` +# - `rm -f src/main/resources/mapper/PlayerMapper.xml` +# - `mvn mybatis-generator:generate` + +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player" +rm -f src/main/java/com/pingcap/model/Player.java +rm -f src/main/java/com/pingcap/model/PlayerMapper.java +rm -f src/main/resources/mapper/PlayerMapper.xml +mvn mybatis-generator:generate +mvn clean package +java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make prepare`, `make gen`, `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[MyBatis 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-mybatis) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-spring-boot.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-spring-boot.md new file mode 100644 index 00000000..f823f4e8 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-java-spring-boot.md @@ -0,0 +1,1019 @@ +--- +title: 使用 Spring Boot 构建 TiDB 应用程序 +summary: 给出一个 Spring Boot 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-spring-boot', '/zh/tidb/dev/sample-application-spring-boot'] +--- + + + +# 使用 Spring Boot 构建 TiDB 应用程序 + +本教程向你展示如何使用 TiDB 构建 [Spring Boot](https://spring.io/projects/spring-boot) Web 应用程序。使用 [Spring Data JPA](https://spring.io/projects/spring-data-jpa) 模块作为数据访问能力的框架。此示例应用程序的代码仓库可在 [Github](https://github.com/pingcap-inc/tidb-example-java) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 **TiDB** 作为数据库的通用 **Spring Boot** 后端服务。设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由的交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 JDK + +请在你的计算机上下载并安装 **Java Development Kit** (JDK),这是 Java 开发的必备工具。**Spring Boot** 支持 Java 版本 8 以上的 JDK,由于 **Hibernate** 版本的缘故,推荐使用 Java 版本 11 以上的 JDK。 + +示例应用程序同时支持 **Oracle JDK** 和 **OpenJDK**,请自行选择,本教程将使用版本 17 的 **OpenJDK**。 + +## 第 3 步:安装 Maven + +此示例应用程序使用 **Maven** 来管理应用程序的依赖项。Spring 支持的 **Maven** 版本为 3.2 以上,作为依赖管理软件,推荐使用当前最新稳定版本的 **Maven**。 + +这里给出命令行安装 **Maven** 的办法: + +- macOS 安装: + + {{< copyable "shell-regular" >}} + + ``` + brew install maven + ``` + +- 基于 Debian 的 Linux 发行版上安装(如 Ubuntu 等): + + {{< copyable "shell-regular" >}} + + ``` + apt-get install maven + ``` + +- 基于 Red Hat 的 Linux 发行版上安装(如 Fedora、CentOS 等): + +- dnf 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + dnf install maven + ``` + +- yum 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + yum install maven + ``` + +其他安装方法,请参考 [Maven 官方文档](https://maven.apache.org/install.html)。 + +## 第 4 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java),并进入到目录 `spring-jpa-hibernate` 中。 + +## 第 5 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。Hibernate 将在数据库 `test` 中创建一个表 `player_jpa`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 5 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `application.yml`(位于 `src/main/resources` 内)关于 `spring.datasource.url`、`spring.datasource.username`、`spring.datasource.password` 的参数: + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将参数更改为: + +```yaml +spring: + datasource: + url: jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + username: 2aEp24QWEDLqRFs.root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +### 第 5 步第 2 部分:运行 + +打开终端,进入 `tidb-example-java/spring-jpa-hibernate` 代码示例目录: + +```shell +cd /tidb-example-java/spring-jpa-hibernate +``` + +#### 使用 Make 构建并运行(推荐) + +```shell +make +``` + +#### 手动构建并运行 + +推荐你使用 Make 方式进行构建并运行,当然,若你希望手动进行构建,请依照以下步骤逐步运行,可以得到相同的结果: + +清除缓存并打包: + +```shell +mvn clean package +``` + +运行应用程序的 JAR 文件: + +```shell +java -jar target/spring-jpa-hibernate-0.0.1.jar +``` + +### 第 5 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v3.0.1) + +2023-01-05T14:06:54.427+08:00 INFO 22005 --- [ main] com.pingcap.App : Starting App using Java 17.0.2 with PID 22005 (/Users/cheese/IdeaProjects/tidb-example-java/spring-jpa-hibernate/target/classes started by cheese in /Users/cheese/IdeaProjects/tidb-example-java) +2023-01-05T14:06:54.428+08:00 INFO 22005 --- [ main] com.pingcap.App : No active profile set, falling back to 1 default profile: "default" +2023-01-05T14:06:54.642+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data JPA repositories in DEFAULT mode. +2023-01-05T14:06:54.662+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 17 ms. Found 1 JPA repository interfaces. +2023-01-05T14:06:54.830+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http) +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.4] +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 421 ms +2023-01-05T14:06:54.916+08:00 INFO 22005 --- [ main] o.hibernate.jpa.internal.util.LogHelper : HHH000204: Processing PersistenceUnitInfo [name: default] +2023-01-05T14:06:54.929+08:00 INFO 22005 --- [ main] org.hibernate.Version : HHH000412: Hibernate ORM core version 6.1.6.Final +2023-01-05T14:06:54.969+08:00 WARN 22005 --- [ main] org.hibernate.orm.deprecation : HHH90000021: Encountered deprecated setting [javax.persistence.sharedCache.mode], use [jakarta.persistence.sharedCache.mode] instead +2023-01-05T14:06:55.005+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting... +2023-01-05T14:06:55.074+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.pool.HikariPool : HikariPool-1 - Added connection com.mysql.cj.jdbc.ConnectionImpl@5e905f2c +2023-01-05T14:06:55.075+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed. +2023-01-05T14:06:55.089+08:00 INFO 22005 --- [ main] SQL dialect : HHH000400: Using dialect: org.hibernate.dialect.TiDBDialect +Hibernate: drop table if exists player_jpa +Hibernate: drop sequence player_jpa_id_seq +Hibernate: create sequence player_jpa_id_seq start with 1 increment by 1 +Hibernate: create table player_jpa (id bigint not null, coins integer, goods integer, primary key (id)) engine=InnoDB +2023-01-05T14:06:55.332+08:00 INFO 22005 --- [ main] o.h.e.t.j.p.i.JtaPlatformInitiator : HHH000490: Using JtaPlatform implementation: [org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform] +2023-01-05T14:06:55.335+08:00 INFO 22005 --- [ main] j.LocalContainerEntityManagerFactoryBean : Initialized JPA EntityManagerFactory for persistence unit 'default' +2023-01-05T14:06:55.579+08:00 WARN 22005 --- [ main] JpaBaseConfiguration$JpaWebConfiguration : spring.jpa.open-in-view is enabled by default. Therefore, database queries may be performed during view rendering. Explicitly configure spring.jpa.open-in-view to disable this warning +2023-01-05T14:06:55.710+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path '' +2023-01-05T14:06:55.714+08:00 INFO 22005 --- [ main] com.pingcap.App : Started App in 1.432 seconds (process running for 1.654) +``` + +输出日志中,提示应用程序在启动过程中做了什么,这里显示应用程序使用 [Tomcat](https://tomcat.apache.org/) 启动了一个 **Servlet**,使用 Hibernate 作为 ORM,[HikariCP](https://github.com/brettwooldridge/HikariCP) 作为数据库连接池的实现,使用了 `org.hibernate.dialect.TiDBDialect` 作为数据库方言。启动后,Hibernate 删除并重新创建了表 `player_jpa`,及序列 `player_jpa_id_seq`。在启动的最后,监听了 8080 端口,对外提供 HTTP 服务。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅本教程下方的[实现细节](#实现细节)。 + +## 第 6 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + ![Postman-Create](/media/develop/postman_player_create.png) + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + ![Postman-GetByID](/media/develop/postman_player_getbyid.png) + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + ![Postman-GetByLimit](/media/develop/postman_player_getbylimit.png) + + - 分页获取玩家信息 + + 点击 **GetByPage** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8080/player/page?index=0&size=2` 请求。返回值为 index 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ![Postman-GetByPage](/media/develop//postman_player_getbypage.png) + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + ![Postman-Count](/media/develop/postman_player_count.png) + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `PUT` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + + ![Postman-Trade](/media/develop/postman_player_trade.png) + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8080/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ```json + 1 + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 分页获取玩家信息 + + 使用 `GET` 方法向 `/player/page` 端点发送请求来分页获取玩家信息。额外地需要使用 URL 参数,例如在请求页面序号 `index` 为 0,每页最大请求量 `size` 为 2 时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/page?index=0&size=2' + ``` + + 返回值为 `index` 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ```json + { + "content": [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + } + ], + "empty": false, + "first": true, + "last": false, + "number": 0, + "numberOfElements": 2, + "pageable": { + "offset": 0, + "pageNumber": 0, + "pageSize": 2, + "paged": true, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "unpaged": false + }, + "size": 2, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "totalElements": 4, + "totalPages": 2 + } + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8080/player/count' + ``` + + 返回值为玩家个数: + + ```json + 4 + ``` + +- 玩家交易 + + 使用 `PUT` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request PUT 'http://localhost:8080/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-java/blob/main/spring-jpa-hibernate/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取 `index` 为 0,`size` 为 2 的一页玩家信息 +5. 获取玩家总数 +6. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `make request` 或 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> make request +./request.sh +loop to create 10 players: +1111111111 + +get player 1: +{"id":1,"coins":200,"goods":10} + +get players by limit 3: +[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30},{"id":3,"coins":100,"goods":20}] + +get first players: +{"content":[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30}],"pageable":{"sort":{"empty":true,"unsorted":true,"sorted":false},"offset":0,"pageNumber":0,"pageSize":2,"paged":true,"unpaged":false},"last":false,"totalPages":7,"totalElements":14,"first":true,"size":2,"number":0,"sort":{"empty":true,"unsorted":true,"sorted":false},"numberOfElements":2,"empty":false} + +get players count: +14 + +trade by two players: +false +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的大致目录树如下所示(删除了有碍理解的部分): + +``` +. +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ ├── App.java + │ ├── controller + │ │ └── PlayerController.java + │ ├── dao + │ │ ├── PlayerBean.java + │ │ └── PlayerRepository.java + │ └── service + │ ├── PlayerService.java + │ └── impl + │ └── PlayerServiceImpl.java + └── resources + └── application.yml +``` + +其中: + +- `pom.xml` 内声明了项目的 Maven 配置,如依赖,打包等 +- `application.yml` 内声明了项目的用户配置,如数据库地址、密码、使用的数据库方言等 +- `App.java` 是项目的入口 +- `controller` 是项目对外暴露 HTTP 接口的包 +- `service` 是项目实现接口与逻辑的包 +- `dao` 是项目实现与数据库连接并完成数据持久化的包 + +### 配置 + +本节将简要介绍 `pom.xml` 文件中的 Maven 配置,及 `application.yml` 文件中的用户配置。 + +#### Maven 配置 + +`pom.xml` 文件为 Maven 配置,在文件内声明了项目的 Maven 依赖,打包方法,打包信息等,你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选) 这一节来复刻此配置文件的生成流程,当然,也可直接复制至你的项目来使用。 + +```xml + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.0.1 + + + + com.pingcap + spring-jpa-hibernate + 0.0.1 + spring-jpa-hibernate + an example for spring boot, jpa, hibernate and TiDB + + + 17 + 17 + 17 + + + + + org.springframework.boot + spring-boot-starter-data-jpa + + + + org.springframework.boot + spring-boot-starter-web + + + + mysql + mysql-connector-java + runtime + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +#### 用户配置 + +`application.yml` 此配置文件声明了用户配置,如数据库地址、密码、使用的数据库方言等。 + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +此配置格式为 [YAML](https://yaml.org/) 格式。其中: + +- `spring.datasource.url`:数据库连接的 URL。 +- `spring.datasource.url`:数据库用户名。 +- `spring.datasource.password`:数据库密码,此项为空,需注释或删除。 +- `spring.datasource.driver-class-name`:数据库驱动,因为 TiDB 与 MySQL 兼容,则此处使用与 mysql-connector-java 适配的驱动类 `com.mysql.cj.jdbc.Driver`。 +- `jpa.show-sql`:为 true 时将打印 JPA 运行的 SQL。 +- `jpa.database-platform`:选用的数据库方言,此处连接了 TiDB,自然选择 TiDB 方言,注意,此方言在 6.0.0.Beta2 版本后的 Hibernate 中才可选择,请注意依赖版本。 +- `jpa.hibernate.ddl-auto`:此处选择的 create-drop 将会在程序开始时创建表,退出时删除表。请勿在正式环境使用,但此处为示例程序,希望尽量不影响数据库数据,因此选择了此选项。 + +### 入口文件 + +入口文件 `App.java`: + +```java +package com.pingcap; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.ApplicationPidFileWriter; + +@SpringBootApplication +public class App { + public static void main(String[] args) { + SpringApplication springApplication = new SpringApplication(App.class); + springApplication.addListeners(new ApplicationPidFileWriter("spring-jpa-hibernate.pid")); + springApplication.run(args); + } +} +``` + +入口类比较简单,首先,有一个 Spring Boot 应用程序的标准配置注解 [@SpringBootApplication](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/SpringBootApplication.html)。有关详细信息,请参阅 Spring Boot 官方文档中的 [Using the @SpringBootApplication Annotation](https://docs.spring.io/spring-boot/docs/current/reference/html/using-spring-boot.html#using-boot-using-springbootapplication-annotation)。随后,使用 `ApplicationPidFileWriter` 在程序启动过程中,写下一个名为 `spring-jpa-hibernate.pid` 的 PID (process identification number) 文件,可从外部使用此 PID 文件关闭此应用程序。 + +### 数据库持久层 + +数据库持久层,即 `dao` 包内,实现了数据对象的持久化。 + +#### 实体对象 + +`PlayerBean.java` 文件为实体对象,这个对象对应了数据库的一张表。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.*; + +/** + * it's core entity in hibernate + * @Table appoint to table name + */ +@Entity +@Table(name = "player_jpa") +public class PlayerBean { + /** + * @ID primary key + * @GeneratedValue generated way. this field will use generator named "player_id" + * @SequenceGenerator using `sequence` feature to create a generator, + * and it named "player_jpa_id_seq" in database, initial form 1 (by `initialValue` + * parameter default), and every operator will increase 1 (by `allocationSize`) + */ + @Id + @GeneratedValue(generator="player_id") + @SequenceGenerator(name="player_id", sequenceName="player_jpa_id_seq", allocationSize=1) + private Long id; + + /** + * @Column field + */ + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +这里可以看到,实体类中有很多注解,这些注解给了 Hibernate 额外的信息,用以绑定实体类和表: + +- `@Entity` 声明 `PlayerBean` 是一个实体类。 +- `@Table` 使用注解属性 `name` 将此实体类和表 `player_jpa` 关联。 +- `@Id` 声明此属性关联表的主键列。 +- `@GeneratedValue` 表示自动生成该列的值,而不应手动设置,使用属性 `generator` 指定生成器的名称为 `player_id`。 +- `@SequenceGenerator` 声明一个使用[序列](/sql-statements/sql-statement-create-sequence.md)的生成器,使用注解属性 `name` 声明生成器的名称为 `player_id` (与 `@GeneratedValue` 中指定的名称需保持一致)。随后使用注解属性 `sequenceName` 指定数据库中序列的名称。最后,使用注解属性 `allocationSize` 声明序列的步长为 1。 +- `@Column` 将每个私有属性声明为表 `player_jpa` 的一列,使用注解属性 `name` 确定属性对应的列名。 + +#### 存储库 + +为了抽象数据库层,Spring 应用程序使用 [Repository](https://docs.spring.io/spring-data/jpa/docs/current/reference/html/#repositories) 接口,或者 Repository 的子接口。 这个接口映射到一个数据库对象,常见的,比如会映射到一个表上。JPA 会实现一些预制的方法,比如 [INSERT](/sql-statements/sql-statement-insert.md),或使用主键的 [SELECT](/sql-statements/sql-statement-select.md) 等。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.LockModeType; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Lock; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface PlayerRepository extends JpaRepository { + /** + * use HQL to query by page + * @param pageable a pageable parameter required by hibernate + * @return player list package by page message + */ + @Query(value = "SELECT player_jpa FROM PlayerBean player_jpa") + Page getPlayersByPage(Pageable pageable); + + /** + * use SQL to query by limit, using named parameter + * @param limit sql parameter + * @return player list (max size by limit) + */ + @Query(value = "SELECT * FROM player_jpa LIMIT :limit", nativeQuery = true) + List getPlayersByLimit(@Param("limit") Integer limit); + + /** + * query player and add a lock for update + * @param id player id + * @return player + */ + @Lock(value = LockModeType.PESSIMISTIC_WRITE) + @Query(value = "SELECT player FROM PlayerBean player WHERE player.id = :id") + // @Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) + PlayerBean getPlayerAndLock(@Param("id") Long id); +} +``` + +`PlayerRepository` 拓展了 Spring 用于 JPA 数据访问所使用的接口 `JpaRepository`。使用 `@Query` 注解,告诉 Hibernate 此接口如何实现查询。在此处使用了两种查询语句的语法,其中,在接口 `getPlayersByPage` 中的查询语句使用的是一种被 Hibernate 称为 [HQL](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#hql) (Hibernate Query Language) 的语法。而接口 `getPlayersByLimit` 中使用的是普通的 SQL,在使用 SQL 语法时,需要将 `@Query` 的注解参数 `nativeQuery` 设置为 true。 + +在 `getPlayersByLimit` 注解的 SQL 中,`:limit` 在 Hibernate 中被称为[命名参数](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#jpql-query-parameters),Hibernate 将按名称自动寻找并拼接注解所在接口内的参数。你也可以使用 `@Param` 来指定与参数不同的名称用于注入。 + +在 `getPlayerAndLock` 中,使用了一个注解 [@Lock](https://docs.spring.io/spring-data/jpa/docs/current/api/org/springframework/data/jpa/repository/Lock.html),此注解声明此处使用悲观锁进行锁定,如需了解更多其他锁定方式,可查看[实体锁定](https://openjpa.apache.org/builds/2.2.2/apache-openjpa/docs/jpa_overview_em_locking.html)文档。此处的 `@Lock` 仅可与 HQL 搭配使用,否则将会产生错误。当然,如果你希望直接使用 SQL 进行锁定,可直接使用注释部分的注解: + +```java +@Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) +``` + +直接使用 SQL 的 `FOR UPDATE` 来增加锁。你也可通过 TiDB [SELECT 文档](/sql-statements/sql-statement-select.md) 进行更深层次的原理学习。 + +### 逻辑实现 + +逻辑实现层,即 `service` 包,内含了项目实现的接口与逻辑 + +#### 接口 + +`PlayerService.java` 文件内定义了逻辑接口,实现接口,而不是直接编写一个类的原因,是尽量使例子贴近实际使用,体现设计的开闭原则。你也可以省略掉此接口,在依赖类中直接注入实现类,但并不推荐这样做。 + +```java +package com.pingcap.service; + +import com.pingcap.dao.PlayerBean; +import org.springframework.data.domain.Page; + +import java.util.List; + +public interface PlayerService { + /** + * create players by passing in a List of PlayerBean + * + * @param players will create players list + * @return The number of create accounts + */ + Integer createPlayers(List players); + + /** + * buy goods and transfer funds between one player and another in one transaction + * @param sellId sell player id + * @param buyId buy player id + * @param amount goods amount, if sell player has not enough goods, the trade will break + * @param price price should pay, if buy player has not enough coins, the trade will break + */ + void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException; + + /** + * get the player info by id. + * + * @param id player id + * @return the player of this id + */ + PlayerBean getPlayerByID(Long id); + + /** + * get a subset of players from the data store by limit. + * + * @param limit return max size + * @return player list + */ + List getPlayers(Integer limit); + + /** + * get a page of players from the data store. + * + * @param index page index + * @param size page size + * @return player list + */ + Page getPlayersByPage(Integer index, Integer size); + + /** + * count players from the data store. + * + * @return all players count + */ + Long countPlayers(); +} +``` + +#### 实现(重要) + +`PlayerService.java` 文件内实现了 `PlayerService` 接口,所有数据操作逻辑都编写在这里。 + +```java +package com.pingcap.service.impl; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.dao.PlayerRepository; +import com.pingcap.service.PlayerService; +import jakarta.transaction.Transactional; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageRequest; +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * PlayerServiceImpl implements PlayerService interface + * @Transactional it means every method in this class, will package by a pair of + * transaction.begin() and transaction.commit(). and it will be call + * transaction.rollback() when method throw an exception + */ +@Service +@Transactional +public class PlayerServiceImpl implements PlayerService { + @Autowired + private PlayerRepository playerRepository; + + @Override + public Integer createPlayers(List players) { + return playerRepository.saveAll(players).size(); + } + + @Override + public void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException { + PlayerBean buyPlayer = playerRepository.getPlayerAndLock(buyId); + PlayerBean sellPlayer = playerRepository.getPlayerAndLock(sellId); + if (buyPlayer == null || sellPlayer == null) { + throw new RuntimeException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new RuntimeException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + playerRepository.save(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + playerRepository.save(sellPlayer); + } + + @Override + public PlayerBean getPlayerByID(Long id) { + return playerRepository.findById(id).orElse(null); + } + + @Override + public List getPlayers(Integer limit) { + return playerRepository.getPlayersByLimit(limit); + } + + @Override + public Page getPlayersByPage(Integer index, Integer size) { + return playerRepository.getPlayersByPage(PageRequest.of(index, size)); + } + + @Override + public Long countPlayers() { + return playerRepository.count(); + } +} +``` + +这里使用了 `@Service` 这个注解,声明此对象的生命周期交由 Spring 管理。 + +注意,除了有 `@Service` 注解之外,PlayerServiceImpl 实现类还有一个 [@Transactional](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#transaction-declarative-annotations) 注解。当在应用程序中启用事务管理时 (可使用 [@EnableTransactionManagement](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/transaction/annotation/EnableTransactionManagement.html) 打开,但 Spring Boot 默认开启,无需再次手动配置),Spring 会自动将所有带有 `@Transactional` 注释的对象包装在一个代理中,使用该代理对对象的调用进行处理。 + +你可以简单的认为,代理在带有 `@Transactional` 注释的对象内的函数调用时:在函数顶部将使用 `transaction.begin()` 开启事务,函数返回后,调用 `transaction.commit()` 进行事务提交,而出现任何运行时错误时,代理将会调用 `transaction.rollback()` 来回滚。 + +你可参阅[数据库事务](/develop/dev-guide-transaction-overview.md)来获取更多有关事务的信息,或者阅读 Spring 官网中的文章[理解 Spring 框架的声明式事务实现](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#tx-decl-explained)。 + +整个实现类中,`buyGoods` 函数需重点关注,其在不符合逻辑时将抛出异常,引导 Hibernate 进行事务回滚,防止出现错误数据。 + +### 外部接口 + +`controller` 包对外暴露 HTTP 接口,可以通过 [REST API](https://www.redhat.com/en/topics/api/what-is-a-rest-api#) 来访问服务。 + +```java +package com.pingcap.controller; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.service.PlayerService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.lang.NonNull; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RestController +@RequestMapping("/player") +public class PlayerController { + @Autowired + private PlayerService playerService; + + @PostMapping + public Integer createPlayer(@RequestBody @NonNull List playerList) { + return playerService.createPlayers(playerList); + } + + @GetMapping("/{id}") + public PlayerBean getPlayerByID(@PathVariable Long id) { + return playerService.getPlayerByID(id); + } + + @GetMapping("/limit/{limit_size}") + public List getPlayerByLimit(@PathVariable("limit_size") Integer limit) { + return playerService.getPlayers(limit); + } + + @GetMapping("/page") + public Page getPlayerByPage(@RequestParam Integer index, @RequestParam("size") Integer size) { + return playerService.getPlayersByPage(index, size); + } + + @GetMapping("/count") + public Long getPlayersCount() { + return playerService.countPlayers(); + } + + @PutMapping("/trade") + public Boolean trade(@RequestParam Long sellID, @RequestParam Long buyID, @RequestParam Integer amount, @RequestParam Integer price) { + try { + playerService.buyGoods(sellID, buyID, amount, price); + } catch (RuntimeException e) { + return false; + } + + return true; + } +} +``` + +`PlayerController` 中使用了尽可能多的注解方式来作为示例展示功能,在实际项目中,请尽量保持风格的统一,同时遵循你公司或团体的规则。`PlayerController` 有许多注解,下方将进行逐一解释: + +- [@RestController](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) 将 `PlayerController` 声明为一个 [Web Controller](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller),且将返回值序列化为 JSON 输出。 +- [@RequestMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestMapping.html) 映射 URL 端点为 `/player`,即此 `Web Controller` 仅监听 `/player` URL 下的请求。 +- `@Autowired` 用于 Spring 的自动装配,可以看到,此处声明需要一个 `PlayerService` 对象,此对象为接口,并未指定使用哪一个实现类,这是由 Spring 自动装配的,有关此装配规则,可查看 Spirng 官网中的 [The IoC container](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/beans.html) 一文。 +- [@PostMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PostMapping.html) 声明此函数将响应 HTTP 中的 [POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST) 类型请求。 + - `@RequestBody` 声明此处将 HTTP 的整个载荷解析到参数 `playerList` 中。 + - `@NonNull` 声明参数不可为空,否则将校验并返回错误。 +- [@GetMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/GetMapping.html) 声明此函数将响应 HTTP 中的 [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) 类型请求。 + - [@PathVariable](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PathVariable.html) 可以看到注解中有形如 `{id}` 、`{limit_size}` 这样的占位符,这种占位符将被绑定到 `@PathVariable` 注释的变量中,绑定的依据是注解中的注解属性 `name`(变量名可省略,即 `@PathVariable(name="limit_size")` 可写成 `@PathVariable("limit_size")` ),不特殊指定时,与变量名名称相同。 +- [@PutMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PutMapping.html) 声明此函数将响应 HTTP 中的 [PUT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT) 类型请求。 +- [@RequestParam](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestParam.html) 此声明将解析请求中的 URL 参数、表单参数等参数,绑定至注解的变量中。 + +## 创建相同依赖空白程序(可选) + +本程序使用 [Spring Initializr](https://start.spring.io/) 构建。你可以在这个网页上通过点选以下选项并更改少量配置,来快速得到一个与本示例程序相同依赖的空白应用程序,配置项如下: + +**Project** + +- Maven Project + +**Language** + +- Java + +**Spring Boot** + +- 最新稳定版本 + +**Project Metadata** + +- Group: com.pingcap +- Artifact: spring-jpa-hibernate +- Name: spring-jpa-hibernate +- Package name: com.pingcap +- Packaging: Jar +- Java: 17 + +**Dependencies** + +- Spring Web +- Spring Data JPA +- MySQL Driver + +> **注意:** +> +> 尽管 SQL 相对标准化,但每个数据库供应商都使用 ANSI SQL 定义语法的子集和超集。这被称为数据库的方言。 Hibernate 通过其 org.hibernate.dialect.Dialect 类和每个数据库供应商的各种子类来处理这些方言的变化。 +> +> 在大多数情况下,Hibernate 将能够通过在启动期间通过 JDBC 连接的一些返回值来确定要使用的正确方言。有关 Hibernate 确定要使用的正确方言的能力(以及你影响该解析的能力)的信息,请参阅[方言解析](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#portability-dialectresolver)。 +> +> 如果由于某种原因无法确定正确的方言,或者你想使用自定义方言,则需要设置 hibernate.dialect 配置项。 +> +> _—— 节选自 Hibernate 官方文档: [Database Dialect](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#database-dialect)_ + +随后,即可获取一个拥有与示例程序相同依赖的空白 **Spring Boot** 应用程序。 \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-django.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-django.md new file mode 100644 index 00000000..80e143b7 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-django.md @@ -0,0 +1,783 @@ +--- +title: 使用 Django 构建 TiDB 应用程序 +summary: 给出一个 Django 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-django'] +--- + + + +# 使用 Django 构建 TiDB 应用程序 + +本文档将展示如何使用 [Django](https://www.djangoproject.com/) 构建一个 TiDB Web 应用程序。使用 [django-tidb](https://github.com/pingcap/django-tidb) 模块作为数据访问能力的框架。示例应用程序的代码可从 [Github](https://github.com/pingcap-inc/tidb-example-python) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 TiDB 作为数据库的通用 Django 后端服务。该示例设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由地交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 Python + +请在你的计算机上下载并安装 **Python**。本文的示例使用 [Django 3.2.16](https://docs.djangoproject.com/zh-hans/3.2/) 版本。根据 [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/faq/install/#what-python-version-can-i-use-with-django),Django 3.2.16 版本支持 Python 3.6、3.7、3.8、3.9 和 3.10 版本,推荐使用 Python 3.10 版本。 + +## 第 3 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-python](https://github.com/pingcap-inc/tidb-example-python),并进入到目录 `django_example` 中。 + +## 第 4 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。你可以使用 `python manage.py migrate` 命令,要求 Django 在数据库 `django` 中创建一个表 `player`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 4 步第 1 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `example_project/settings.py` 中的 `DATABASES` 参数: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +下面以 macOS 为例,应将参数更改为: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': '2aEp24QWEDLqRFs.root', + 'PASSWORD': '123456', + 'HOST': 'xxx.tidbcloud.com', + 'PORT': 4000, + 'OPTIONS': { + 'ssl': { + "ca": "" + }, + }, + }, +} +``` + +### 第 4 步第 2 部分:运行 + +1. 打开终端,进入 `tidb-example-python` 代码示例目录: + + ```bash + cd /tidb-example-python + ``` + +2. 安装项目依赖并进入 `django_example` 目录: + + ```bash + pip install -r requirement.txt + cd django_example + ``` + +3. 运行数据模型迁移: + + > **注意:** + > + > - 此步骤假定已经存在 `django` 数据库。 + > - 若未创建 `django` 数据库,可通过 `CREATE DATABASE django` 语句进行创建。关于创建数据库语句的详细信息,参考 [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md#create-database)。 + > - 数据库名称 `NAME` 可在 `example_project/settings.py` 的 `DATABASES` 属性中更改。 + + 这将在你连接的数据库内生成 Django 所需的相应数据表。 + + ```bash + python manage.py migrate + ``` + +4. 运行应用程序: + + ```bash + python manage.py runserver + ``` + +### 第 4 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` +Watching for file changes with StatReloader +Performing system checks... + +System check identified no issues (0 silenced). +December 12, 2022 - 08:21:50 +Django version 3.2.16, using settings 'example_project.settings' +Starting development server at http://127.0.0.1:8000/ +Quit the server with CONTROL-C. +``` + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +## 第 5 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ``` + create 1 players. + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8000/player/count' + ``` + + 返回值为玩家个数: + + ``` + 4 + ``` + +- 玩家交易 + + 使用 `POST` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取玩家总数 +5. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> ./request.sh +loop to create 10 players: +create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players. + +get player 1: +{"id": 1, "coins": 100, "goods": 20} + +get players by limit 3: +[{"id": 1, "coins": 100, "goods": 20}, {"id": 2, "coins": 100, "goods": 20}, {"id": 3, "coins": 100, "goods": 20}] + +get players count: +10 + +trade by two players: +trade successful +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的目录树大致如下所示: + +``` +. +├── example_project +│ ├── __init__.py +│ ├── asgi.py +│ ├── settings.py +│ ├── urls.py +│ └── wsgi.py +├── player +│ ├── __init__.py +│ ├── admin.py +│ ├── apps.py +│ ├── migrations +│ │ ├── 0001_initial.py +│ │ └── __init__.py +│ ├── models.py +│ ├── tests.py +│ ├── urls.py +│ └── views.py +└── manage.py +``` + +其中: + +- 每一个文件夹中的 `__init__.py` 文件声明了该文件夹是一个 Python 包。 +- `manage.py` 为 Django 自动生成的用于管理项目的脚本。 +- `example_project` 包含项目级别的代码: + + - `settings.py` 声明了项目的配置,如数据库地址、密码、使用的数据库方言等。 + - `urls.py` 配置了项目的根路由。 + +- `player` 是项目中提供对 `Player` 数据模型管理、数据查询的包,这在 Django 中被称为应用。你可以使用 `python manage.py startapp player` 来创建一个空白的 `player` 应用。 + + - `models.py` 定义了 `Player` 数据模型。 + - `migrations` 是一组数据模型迁移脚本。你可以使用 `python manage.py makemigrations player` 命令自动分析 `models.py` 文件中定义的数据对象,并生成迁移脚本。 + - `urls.py` 定义了应用的路由。 + - `views.py` 提供了应用的逻辑代码。 + +> **注意:** +> +> 由于 Django 的设计采用了可插拔模式,因此,你需要在创建应用后,在项目中进行注册。在本示例中,注册过程就是在 `example_project/settings.py` 文件中,在 `INSTALLED_APPS` 对象内添加 `'player.apps.PlayerConfig'` 条目。你可以参考示例代码 [`settings.py`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/example_project/settings.py#L33-L41) 以获得更多信息。 + +### 项目配置 + +本节将简要介绍 `example_project` 包内 `settings.py` 的重要配置。这个文件包含了 Django 项目的配置,声明了项目包含的应用、中间件、连接的数据库等信息。你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选)这一节来了解此配置文件的生成流程,也可直接在项目中使用 `settings.py` 文件。关于 Django 配置的更多信息,参考 [Django 配置](https://docs.djangoproject.com/zh-hans/3.2/topics/settings/)文档。 + +```python +... + +# Application definition + +INSTALLED_APPS = [ + 'player.apps.PlayerConfig', + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +... + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + +... +``` + +其中: + +- `INSTALLED_APPS`:启用的应用全限定名称列表。 +- `MIDDLEWARE`:启用的中间件列表。由于本示例无需 `CsrfViewMiddleware` 中间件,因此其被注释。 +- `DATABASES`:数据库配置。其中,`ENGINE` 一项被配置为 `django_tidb`,这遵循了 [django-tidb](https://github.com/pingcap/django-tidb) 的配置要求。 + +### 根路由 + +在 `example_project` 包中的 `urls.py` 文件中编写了根路由: + +```python +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path('player/', include('player.urls')), + path('admin/', admin.site.urls), +] +``` + +在上面的示例中,根路由将 `player/` 路径指向 `player.urls`。即,`player` 包下的 `urls.py` 将负责处理所有以 `player/` 开头的 URL 请求。关于更多 Django URL 调度器的信息,请参考 [Django URL 调度器](https://docs.djangoproject.com/zh-hans/3.2/topics/http/urls/)文档。 + +### player 应用 + +`player` 应用实现了对 `Player` 对象的数据模型迁移、对象持久化、接口实现等功能。 + +#### 数据模型 + +`models.py` 文件内包含 `Player` 数据模型,这个模型对应了数据库的一张表。 + +```python +from django.db import models + +# Create your models here. + + +class Player(models.Model): + id = models.AutoField(primary_key=True) + coins = models.IntegerField() + goods = models.IntegerField() + + objects = models.Manager() + + class Meta: + db_table = "player" + + def as_dict(self): + return { + "id": self.id, + "coins": self.coins, + "goods": self.goods, + } +``` + +在上面的示例中,数据模型中有一个子类 `Meta`,这些子类给了 Django 额外的信息,用以指定数据模型的元信息。其中,`db_table` 声明此数据模型对应的表名为 `player`。关于模型元信息的全部选项可查看 [Django 模型 Meta 选项](https://docs.djangoproject.com/zh-hans/3.2/ref/models/options/)文档。 + +此外,数据模型中定义了 `id`、`coins` 及 `goods` 三个属性: + +- `id`:`models.AutoField(primary_key=True)` 表示其为一个自动递增的主键。 +- `coins`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 +- `goods`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 + +关于数据模型的详细信息,可查看 [Django 模型](https://docs.djangoproject.com/zh-hans/3.2/topics/db/models/)文档。 + +#### 数据模型迁移 + +Django 以 Python 数据模型定义代码为依赖,对数据库模型进行迁移。因此,它会生成一系列数据库模型迁移脚本,用于解决代码与数据库之间的差异。在 `models.py` 中定义完 `Player` 数据模型后,你可以使用 `python manage.py makemigrations player` 生成迁移脚本。在本文示例中,`migrations` 包内的 `0001_initial.py` 就是自动生成的迁移脚本。 + +```python +# Generated by Django 3.2.16 on 2022-11-16 11:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Player', + fields=[ + ('id', models.AutoField(primary_key=True, serialize=False)), + ('coins', models.IntegerField()), + ('goods', models.IntegerField()), + ], + options={ + 'db_table': 'player', + }, + ), + ] +``` + +你可以使用 `python manage.py sqlmigrate ...` 来预览迁移脚本最终将运行的 SQL 语句。这将极大地减少迁移脚本运行你意料之外的 SQL 语句的可能性。在生成迁移脚本后,推荐至少使用一次此命令预览并仔细检查生成的 SQL 语句。在本示例中,你可以运行 `python manage.py sqlmigrate player 0001`,其输出为可读的 SQL 语句,有助于开发者对语句进行审核: + +```sql +-- +-- Create model Player +-- +CREATE TABLE `player` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `coins` integer NOT NULL, `goods` integer NOT NULL); +``` + +生成迁移脚本后,你可以使用 `python manage.py migrate` 实施数据迁移。此命令拥有幂等性,其运行后将在数据库内保存一条运行记录以完成幂等保证。因此,你可以多次运行此命令,而无需担心重复运行 SQL 语句。 + +#### 应用路由 + +在[根路由](#根路由)一节中,示例程序将 `player/` 路径指向了 `player.urls`。本节将展开叙述 `player` 包下的 `urls.py` 应用路由: + +```python +from django.urls import path + +from . import views + +urlpatterns = [ + path('', views.create, name='create'), + path('count', views.count, name='count'), + path('limit/', views.limit_list, name='limit_list'), + path('', views.get_by_id, name='get_by_id'), + path('trade', views.trade, name='trade'), +] +``` + +应用路由注册了 5 个路径: + +- `''`:被指向了 `views.create` 函数。 +- `'count'`:被指向了 `views.count` 函数。 +- `'limit/'`:被指向了 `views.limit_list` 函数。此处路径包含一个 `` 路径变量,其中: + + - `int` 是指这个参数其将被验证是否为 `int` 类型。 + - `limit` 是指此参数的值将被映射至名为 `limit` 的函数入参中。 + +- `''`:被指向了 `views.get_by_id` 函数,此处路径包含一个 `` 路径变量。 +- `'trade'`:被指向了 `views.trade` 函数。 + +此外,应用路由是根路由转发而来的,因此将在 URL 匹配时包含根路由配置的路径。如上面示例所示,根路由配置为 `player/` 转发至此应用路由,那么,应用路由中的: + +- `''` 在实际的请求中为 `http(s)://(:)/player`。 +- `'count'` 在实际的请求中为 `http(s)://(:)/player/count`。 +- `'limit/'` 以 `limit` 为 `3` 为例,在实际的请求中为 `http(s)://(:)/player/limit/3`。 + +#### 逻辑实现 + +逻辑实现代码,在 `player` 包下的 `views.py` 内,这在 Django 中被称为视图。关于 Django 视图的更多信息,参考 [Django 视图](https://docs.djangoproject.com/zh-hans/3.2/topics/http/views/)文档。 + +```python +from django.db import transaction +from django.db.models import F +from django.shortcuts import get_object_or_404 + +from django.http import HttpResponse, JsonResponse +from django.views.decorators.http import * +from .models import Player +import json + + +@require_POST +def create(request): + dict_players = json.loads(request.body.decode('utf-8')) + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + + +@require_GET +def count(request): + return HttpResponse(Player.objects.count()) + + +@require_GET +def limit_list(request, limit: int = 0): + if limit == 0: + return HttpResponse("") + players = set(Player.objects.all()[:limit]) + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + + +@require_GET +def get_by_id(request, player_id: int): + result = get_object_or_404(Player, pk=player_id).as_dict() + return JsonResponse(result) + + +@require_POST +@transaction.atomic +def trade(request): + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + + return HttpResponse("trade successful") +``` + +下面将逐一解释代码中的重点部分: + +- 装饰器: + + - `@require_GET`:代表此函数仅接受 `GET` 类型的 HTTP 请求。 + - `@require_POST`:代表此函数仅接受 `POST` 类型的 HTTP 请求。 + - `@transaction.atomic`:代表此函数内的所有数据库操作将被包含于同一个事务中运行。关于在 Django 中使用事务的更多信息,可参考 [Django 数据库事务](https://docs.djangoproject.com/zh-hans/3.2/topics/db/transactions/)文档。关于 TiDB 中事物的详细信息,可参考 [TiDB 事务概览](/develop/dev-guide-transaction-overview.md)。 + +- `create` 函数: + + - 获取 `request` 对象中 `body` 的 Payload,并用 `utf-8` 解码: + + ```python + dict_players = json.loads(request.body.decode('utf-8')) + ``` + + - 使用 lambda 中的 `map` 函数,将 dict 类型的 `dict_players` 对象转换为 `Player` 数据模型的列表: + + ```python + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + ``` + + - 调用 `Player` 数据模型的 `bulk_create` 函数,批量添加 `players` 列表,并返回添加的数据条目: + + ```python + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + ``` + +- `count` 函数:调用 `Player` 数据模型的 `count` 函数,并返回所有的数据条目。 +- `limit_list` 函数: + + - 短路逻辑,`limit` 为 `0` 时不发送数据库请求: + + ```python + if limit == 0: + return HttpResponse("") + ``` + + - 调用 `Player` 数据模型的 `all` 函数,并使用切片操作符获取前 `limit` 个数据。需要注意的是,Django 不是获取所有数据并在内存中切分前 `limit` 个数据,而是在使用时请求数据库的前 `limit` 个数据。这是由于 Django 重写了切片操作符,并且 QuerySet 对象是**惰性**的。这意味着对一个未执行的 QuerySet 进行切片,将继续返回一个未执行的 QuerySet,直到你第一次真正的请求 QuerySet 内的数据。例如此处使用 `set` 函数对其进行迭代并返回整个集合。关于 Django QuerySet 的更多信息,你可以参考 [Django QuerySet API](https://docs.djangoproject.com/zh-hans/3.2/ref/models/querysets/) 文档。 + + ```python + players = set(Player.objects.all()[:limit]) + ``` + + - 将返回的 `Player` 数据模型的列表,转为对象为 dict 的列表,并使用 `JsonResponse` 输出。 + + ```python + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + ``` + +- `get_by_id` 函数: + + - 使用 `get_object_or_404` 语法糖传入 `player_id`,并将 `Player` 对象转为 dict。如数据不存在,将由此函数返回 `404` 状态码: + + ```python + result = get_object_or_404(Player, pk=player_id).as_dict() + ``` + + - 使用 `JsonResponse` 返回数据: + + ```python + return JsonResponse(result) + ``` + +- `trade` 函数: + + - 从 `POST` Payload 中接收 Form 形式的数据: + + ```python + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + ``` + + - 调用 `Player` 数据模型的 `select_for_update` 函数对卖家和买家的数据进行加锁,并检查卖家的货物数量和买家的货币数量是否足够。该函数使用了 `@transaction.atomic` 装饰器,任意异常都会导致事务回滚。可以利用这个机制,在任意检查失败的时候,抛出异常,由 Django 进行事务回滚。 + + ```python + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + ``` + + - 更新卖家与买家的数据。由于这里使用了 `@transaction.atomic` 装饰器,任何异常都将由 Django 回滚事务。因此,请不要在此处使用 `try-except` 语句进行异常处理。如果一定需要处理,请在 except 块中将异常继续抛向上层,以防止因 Django 误认为函数运行正常而提交事务,导致数据错误。 + + ```python + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + ``` + + - 返回交易成功字符串,因为其他情况将导致异常抛出返回: + + ```python + return HttpResponse("trade successful") + ``` + +## 创建相同依赖空白程序(可选) + +本程序使用 Django Admin CLI [django-admin](https://django-admin-cli.readthedocs.io/en/stable/index.html) 构建。你可以安装并使用 `django-admin` 来快速完成 Django 项目的初始化。如果需要快速获得与示例程序 `django_example` 相同的可运行空白应用程序,可以按照以下步骤操作: + +1. 初始化 Django 项目 `copy_django_example`: + + ```bash + pip install -r requirement.txt + django-admin startproject copy_django_example + cd copy_django_example + ``` + +2. 更改 `DATABASES` 配置: + + 1. 打开 `copy_django_example/settings.py` 配置文件 + 2. 将 `DATABASES` 部分从指向本地 SQLite 的配置更改为 TiDB 集群的信息: + + ```python + DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, + } + DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + ``` + + 3. 由于本示例不需要跨域校验,因此你需要注释或删除 `MIDDLEWARE` 中的 `CsrfViewMiddleware`。修改后的 `MIDDLEWARE` 为: + + ```python + MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + ] + ``` + +至此,你已经完成了一个空白的应用程序,此应用程序与示例应用程序的依赖完全相同。如果需要进一步了解 Django 的使用方法,参考: + +- [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/) +- [Django 入门教程](https://docs.djangoproject.com/zh-hans/3.2/intro/tutorial01/) \ No newline at end of file diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysql-connector.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysql-connector.md new file mode 100644 index 00000000..2e278371 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysql-connector.md @@ -0,0 +1,283 @@ +--- +title: TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-python'] +--- + + + + +# TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 + +[MySQL Connector/Python](https://dev.mysql.com/doc/connector-python/en/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 MySQL Connector/Python 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 MySQL Connector/Python **8.0.31** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +from mysql.connector import connect, MySQLConnection +from mysql.connector.cursor import MySQLCursor + + +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection + + +def create_player(cursor: MySQLCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: MySQLCursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: MySQLCursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((str(uuid.uuid4()), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: MySQLCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: MySQLCursor) -> int: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: MySQLConnection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # all players have random uuid + print(f'start to insert one by one, it will take a long time') + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + print(f'inserted {idx} players') + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 tuple 进行表示。 + +关于 MySQL Connector/Python 的更多使用方法,你可以参考 [MySQL Connector/Python 官方文档](https://dev.mysql.com/doc/connector-python/en/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `mysql_connector_python_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_ca='', + ssl_verify_identity=True + ) + connection.autocommit = autocommit + return connection +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysql_connector_python_example.py +``` + +## 第 4 步:预期输出 + +[MySQL Connector/Python 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysql-connector-python) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysqlclient.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysqlclient.md new file mode 100644 index 00000000..6665889c --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-mysqlclient.md @@ -0,0 +1,282 @@ +--- +title: TiDB 和 mysqlclient 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 mysqlclient 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 mysqlclient 的简单 CRUD 应用程序 + +[mysqlclient](https://pypi.org/project/mysqlclient/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 mysqlclient 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 mysqlclient **2.1.1** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import MySQLdb +from MySQLdb import Connection +from MySQLdb.cursors import Cursor + +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) + + +def create_player(cursor: Cursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: Cursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: Cursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: Cursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: Cursor) -> None: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as conn: + with conn.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以元组 (tuple) 进行表示。 + +关于 mysqlclient 的更多使用方法,你可以参考 [mysqlclient 官方文档](https://mysqlclient.readthedocs.io/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `mysqlclient_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_mode="VERIFY_IDENTITY", + ssl={ + "ca": "" + } + ) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysqlclient_example.py +``` + +## 第 4 步:预期输出 + +[mysqlclient 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysqlclient) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-peewee.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-peewee.md new file mode 100644 index 00000000..b3bd4519 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-peewee.md @@ -0,0 +1,245 @@ +--- +title: TiDB 和 peewee 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 peewee 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 peewee 的简单 CRUD 应用程序 + +[peewee](http://docs.peewee-orm.com/en/latest/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 peewee 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 peewee **3.15.4** 版本进行说明。 + +```python +import os +import uuid +from typing import List + +from peewee import * + +from playhouse.db_url import connect + +db = connect('mysql://root:@127.0.0.1:4000/test') + + +class Player(Model): + id = CharField(max_length=36, primary_key=True) + coins = IntegerField() + goods = IntegerField() + + class Meta: + database = db + table_name = "player" + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + # create a player, who has a coin and a goods. + Player.create(id="test", coins=1, goods=1) + + # get this player, and print it. + test_player = Player.select().where(Player.id == "test").get() + print(f'id:{test_player.id}, coins:{test_player.coins}, goods:{test_player.goods}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + Player.bulk_create(player_list, 114) + + # print the number of players + count = Player.select().count() + print(f'number of players: {count}') + + # print 3 players. + three_players = Player.select().limit(3) + for player in three_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +def trade_check(sell_id: str, buy_id: str, amount: int, price: int) -> bool: + sell_goods = Player.select(Player.goods).where(Player.id == sell_id).get().goods + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + buy_coins = Player.select(Player.coins).where(Player.id == buy_id).get().coins + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + return True + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with db.atomic() as txn: + try: + if trade_check(sell_id, buy_id, amount, price) is False: + txn.rollback() + return + + # deduct the goods of seller, and raise his/her the coins + Player.update(goods=Player.goods - amount, coins=Player.coins + price).where(Player.id == sell_id).execute() + # deduct the coins of buyer, and raise his/her the goods + Player.update(goods=Player.goods + amount, coins=Player.coins - price).where(Player.id == buy_id).execute() + + except Exception as err: + txn.rollback() + print(f'something went wrong: {err}') + else: + txn.commit() + print("trade success") + + +def trade_example() -> None: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + Player.create(id="1", coins=100, goods=0) + Player.create(id="2", coins=114514, goods=20) + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + after_trade_players = Player.select().where(Player.id.in_(["1", "2"])) + for player in after_trade_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +db.connect() + +# recreate the player table +db.drop_tables([Player]) +db.create_tables([Player]) + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,peewee 屏蔽了创建数据库连接时,不同数据库差异的细节。peewee 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。peewee 使用 `Player` 类为了给 peewee 提供更多的信息,使用了形如以上示例中的 `id = CharField(max_length=36, primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = CharField(max_length=36, primary_key=True)` 表示 `id` 字段为 `CharField` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 peewee 的更多使用方法,你可以参考 [peewee 官网](http://docs.peewee-orm.com/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `peewee_example.py` 内 `connect` 函数的入参: + +```python +db = connect('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `connect` 更改为: + +- peewee 将 PyMySQL 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_verify_cert=True, ssl_ca="") + ``` + +- peewee 将 mysqlclient 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_mode="VERIFY_IDENTITY", ssl={"ca": ""}) + ``` + +由于 peewee 会将参数透传至 Driver 中,使用 peewee 时请注意 Driver 的使用类型。 + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 peewee_example.py +``` + +## 第 4 步:预期输出 + +[peewee 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#peewee) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-pymysql.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-pymysql.md new file mode 100644 index 00000000..5657c625 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-pymysql.md @@ -0,0 +1,277 @@ +--- +title: TiDB 和 PyMySQL 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 PyMySQL 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 PyMySQL 的简单 CRUD 应用程序 + +[PyMySQL](https://pypi.org/project/PyMySQL/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 PyMySQL 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 PyMySQL **1.0.2** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import pymysql.cursors +from pymysql import Connection +from pymysql.cursors import DictCursor + + +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) + + +def create_player(cursor: DictCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: DictCursor, player_id: str) -> dict: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: DictCursor, limit: int) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: DictCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: DictCursor) -> int: + cursor.execute("SELECT count(*) as count FROM player") + return cursor.fetchone()['count'] + + +def trade_check(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + seller = cursor.fetchone() + if seller['goods'] < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buyer = cursor.fetchone() + if buyer['coins'] < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(test_player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(player) + + +def trade_example() -> None: + with get_connection(autocommit=False) as connection: + with connection.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + connection.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(connection, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(connection, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with connection.cursor() as cur: + print(get_player(cur, "1")) + print(get_player(cur, "2")) + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 dict 进行表示。 + +关于 PyMySQL 的更多使用方法,你可以参考 [PyMySQL 官方文档](https://pymysql.readthedocs.io/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `pymysql_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='xxx.tidbcloud.com', + port=4000, + user='2aEp24QWEDLqRFs.root', + password='123546', + database='test', + cursorclass=DictCursor, + autocommit=autocommit, + ssl_ca='', + ssl_verify_cert=True, + ssl_verify_identity=True) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 pymysql_example.py +``` + +## 第 4 步:预期输出 + +[PyMySQL 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#PyMySQL) diff --git a/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-sqlalchemy.md b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-sqlalchemy.md new file mode 100644 index 00000000..0f32bc59 --- /dev/null +++ b/test/sync_mult_prs/data/markdown-pages/zh/tidb/release-6.7/develop/dev-guide-sample-application-python-sqlalchemy.md @@ -0,0 +1,238 @@ +--- +title: TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 SQLAlchemy 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 + +[SQLAlchemy](https://www.sqlalchemy.org/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 SQLAlchemy 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 SQLAlchemy **1.4.44** 版本进行说明。 + +```python +import uuid +from typing import List + +from sqlalchemy import create_engine, String, Column, Integer, select, func +from sqlalchemy.orm import declarative_base, sessionmaker + +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +Base = declarative_base() +Base.metadata.create_all(engine) +Session = sessionmaker(bind=engine) + + +class Player(Base): + __tablename__ = "player" + + id = Column(String(36), primary_key=True) + coins = Column(Integer) + goods = Column(Integer) + + def __repr__(self): + return f'Player(id={self.id!r}, coins={self.coins!r}, goods={self.goods!r})' + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + with Session() as session: + # create a player, who has a coin and a goods. + session.add(Player(id="test", coins=1, goods=1)) + + # get this player, and print it. + get_test_stmt = select(Player).where(Player.id == "test") + for player in session.scalars(get_test_stmt): + print(player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + session.bulk_save_objects(player_list[idx:idx + 114]) + + # print the number of players + count = session.query(func.count(Player.id)).scalar() + print(f'number of players: {count}') + + # print 3 players. + three_players = session.query(Player).limit(3).all() + for player in three_players: + print(player) + + session.commit() + + +def trade_check(session: Session, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + # sell player goods check + sell_player = session.query(Player.goods).filter(Player.id == sell_id).with_for_update().one() + if sell_player.goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + buy_player = session.query(Player.coins).filter(Player.id == buy_id).with_for_update().one() + if buy_player.coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with Session() as session: + if trade_check(session, sell_id, buy_id, amount, price) is False: + return + + # deduct the goods of seller, and raise his/her the coins + session.query(Player).filter(Player.id == sell_id). \ + update({'goods': Player.goods - amount, 'coins': Player.coins + price}) + # deduct the coins of buyer, and raise his/her the goods + session.query(Player).filter(Player.id == buy_id). \ + update({'goods': Player.goods + amount, 'coins': Player.coins - price}) + + session.commit() + print("trade success") + + +def trade_example() -> None: + with Session() as session: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + session.add(Player(id="1", coins=100, goods=0)) + session.add(Player(id="2", coins=114514, goods=20)) + session.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + with Session() as session: + traders = session.query(Player).filter(Player.id.in_(("1", "2"))).all() + for player in traders: + print(player) + session.commit() + + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,SQLAlchemy 屏蔽了创建数据库连接时,不同数据库差异的细节。SQLAlchemy 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。SQLAlchemy 使用 `Player` 类为了给 SQLAlchemy 提供更多的信息,使用了形如以上示例中的 `id = Column(String(36), primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = Column(String(36), primary_key=True)` 表示 `id` 字段为 `String` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 SQLAlchemy 的更多使用方法,你可以参考 [SQLAlchemy 官网](https://www.sqlalchemy.org/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `sqlalchemy_example.py` 内 `create_engine` 函数的入参: + +```python +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `create_engine` 更改为: + +```python +engine = create_engine('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', connect_args={ + "ssl_mode": "VERIFY_IDENTITY", + "ssl": { + "ca": "" + } +}) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 sqlalchemy_example.py +``` + +## 第 4 步:预期输出 + +[SQLAlchemy 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#SQLAlchemy) diff --git a/test/test.py b/test/test.py index a64bd5f8..43ac5ff0 100644 --- a/test/test.py +++ b/test/test.py @@ -33,6 +33,7 @@ class TestConfig: diff_command: str test_target: str test_cases: List[TestCase] + test_dependencies: List[str] = None class TestRunner: @@ -56,6 +57,7 @@ def _load_config() -> List[TestConfig]: config.append(TestConfig( diff_command=test["diff_command"], test_target=test["test_target"], + test_dependencies=test.get("test_dependencies"), test_cases=test_cases)) return config @@ -87,7 +89,7 @@ def run(self) -> None: test_dir = os.path.abspath(case.directory) script_args = case.args - test = DocSitePreviewTest(test_dir, feature_dir, script_name) + test = DocSitePreviewTest(test_dir, feature_dir, script_name, config.test_dependencies) if test.execute(args=script_args, env=self._env) and test.verify(diff_command): self.report.success_tests.append(case_name) diff --git a/test/test_util.py b/test/test_util.py index 671fd0a1..24b2caf0 100644 --- a/test/test_util.py +++ b/test/test_util.py @@ -9,7 +9,7 @@ class DocSitePreviewTest: - def __init__(self, test_dir: str, feature_dir: str, script_name: str): + def __init__(self, test_dir: str, feature_dir: str, script_name: str, test_dependencies: List[str] = None): self.test_dir = test_dir self.feature_dir = feature_dir self.script_name = script_name @@ -18,6 +18,8 @@ def __init__(self, test_dir: str, feature_dir: str, script_name: str): self.test_script = os.path.join(self.test_output, self.script_name) self._setup_test_env() + if test_dependencies: + self._load_dependencies(test_dependencies) def _setup_test_env(self) -> None: """ @@ -43,6 +45,16 @@ def _copy_setup_script(self) -> None: shutil.copy(os.path.join(self.feature_dir, self.script_name), self.test_script) self._make_executable(self.test_script) + def _load_dependencies(self, dependencies: List[str]) -> None: + """ + Copy the dependencies to the test environment. + """ + for dependency in dependencies: + dependency_script = os.path.join(self.feature_dir, dependency) + test_dependency_script = os.path.join(self.test_output, dependency) + shutil.copy(dependency_script, test_dependency_script) + self._make_executable(test_dependency_script) + @staticmethod def _make_executable(script: str) -> None: """ diff --git a/test_config.toml b/test_config.toml index 1ec0247c..fa567d41 100644 --- a/test_config.toml +++ b/test_config.toml @@ -37,3 +37,15 @@ directory = "test/sync_pr_docs_cn/" name = "Sync markdown-pages from a TiDB Operator PR" args = "preview-operator/pingcap/docs-tidb-operator/2397" directory = "test/sync_pr_operator/" + +[sync_mult_prs] + +diff_command = "diff -qrs data actual --exclude temp --exclude '*.log' --exclude sync_mult_prs.sh --exclude sync_pr.sh" +test_target = "sync_mult_prs.sh" +test_dependencies = ["sync_pr.sh"] + +[[sync_mult_prs.test_cases]] + +name = "Sync markdown-pages from multiple PRs" +args = "" +directory = "test/sync_mult_prs/"