diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 84db54b81e70b..5d6ef82fc76ca 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,7 +7,15 @@ concurrency: cancel-in-progress: true jobs: - tidb-check: + duplicated-file-names: + runs-on: ubuntu-latest + steps: + - name: Check out + uses: actions/checkout@v4 + - name: Verify duplicated file names + run: ./scripts/verify-duplicated-file-name.sh + + internal-links-files: runs-on: ubuntu-latest steps: - name: Check out @@ -15,20 +23,14 @@ jobs: - uses: actions/setup-node@v4 with: node-version: "18" - - name: Verify duplicated file names - run: ./scripts/verify-duplicated-file-name.sh - - name: Verify internal links and anchors - tidb only - run: | - npm i - node ./scripts/filterNonCloudDoc.js - cp -r ./scripts ./tmp - cp -r ./media ./tmp - cp .gitignore ./tmp/ - cd ./tmp - ./scripts/verify-links.sh - ./scripts/verify-link-anchors.sh + cache: npm + cache-dependency-path: package-lock.json + - name: Install Node dependencies + run: npm ci + - name: Verify internal links (full repo) - files + run: ./scripts/verify-links.sh - tidb-cloud-check: + internal-links-anchors: runs-on: ubuntu-latest steps: - name: Check out @@ -36,22 +38,27 @@ jobs: - uses: actions/setup-node@v4 with: node-version: "18" - - name: Check TOC-tidb-cloud.md existence - id: check_cloud_toc - uses: andstor/file-existence-action@v2 + cache: npm + cache-dependency-path: package-lock.json + - name: Install Node dependencies + run: npm ci + - name: Verify internal links (full repo) - anchors + run: ./scripts/verify-link-anchors.sh + + internal-links-toc: + runs-on: ubuntu-latest + steps: + - name: Check out + uses: actions/checkout@v4 + - uses: actions/setup-node@v4 with: - files: "TOC-tidb-cloud.md" - - name: Verify internal links - cloud only - if: steps.check_cloud_toc.outputs.files_exists == 'true' - run: | - npm i - node ./scripts/filterCloudDoc.js - cp -r ./scripts ./tmp - cp -r ./media ./tmp - cp .gitignore ./tmp/ - cd ./tmp - ./scripts/verify-links.sh - ./scripts/verify-link-anchors.sh + node-version: "18" + cache: npm + cache-dependency-path: package-lock.json + - name: Install Node dependencies + run: npm ci + - name: Verify internal links (full repo) - TOC membership + run: node ./scripts/verify-internal-links-in-toc.js vale: runs-on: ubuntu-latest diff --git a/TOC-ai.md b/TOC-ai.md new file mode 100644 index 0000000000000..e79d9a34cc2be --- /dev/null +++ b/TOC-ai.md @@ -0,0 +1,86 @@ + + + +# Table of Contents + +## QUICK START + +- [Get Started via Python](/ai/quickstart-via-python.md) +- [Get Started via SQL](/ai/quickstart-via-sql.md) + +## CONCEPTS + +- [Vector Search](/ai/concepts/vector-search-overview.md) + +## GUIDES + +- [Connect to TiDB](/ai/guides/connect.md) +- [Working with Tables](/ai/guides/tables.md) +- Search Features + - [Vector Search](/ai/guides/vector-search.md) + - Full-Text Search + - [Full-Text Search via Python](/ai/guides/vector-search-full-text-search-python.md) + - [Full-Text Search via SQL](/ai/guides/vector-search-full-text-search-sql.md) + - [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) + - [Image Search](/ai/guides/image-search.md) +- Advanced Features + - [Auto Embedding](/ai/guides/auto-embedding.md) + - [Filtering](/ai/guides/filtering.md) + - [Reranking](/ai/guides/reranking.md) + - [Join Queries](/ai/guides/join-queries.md) + - [Raw SQL Queries](/ai/guides/raw-queries.md) + - [Transactions](/ai/guides/transactions.md) + +## EXAMPLES + +- [Basic CRUD Operations](/ai/examples/basic-with-pytidb.md) +- [Auto Embedding](/ai/examples/auto-embedding-with-pytidb.md) +- Search & Retrieval + - [Vector Search](/ai/examples/vector-search-with-pytidb.md) + - [Full-Text Search](/ai/examples/fulltext-search-with-pytidb.md) + - [Hybrid Search](/ai/examples/hybrid-search-with-pytidb.md) + - [Image Search](/ai/examples/image-search-with-pytidb.md) +- AI Applications + - [RAG Application](/ai/examples/rag-with-pytidb.md) + - [Conversational Memory](/ai/examples/memory-with-pytidb.md) + - [Text-to-SQL](/ai/examples/text2sql-with-pytidb.md) + +## INTEGRATIONS + +- [Integration Overview](/ai/integrations/vector-search-integration-overview.md) +- Auto Embedding + - [Overview](/ai/integrations/vector-search-auto-embedding-overview.md) + - [OpenAI](/ai/integrations/vector-search-auto-embedding-openai.md) + - [OpenAI Compatible](/ai/integrations/embedding-openai-compatible.md) + - [Jina AI](/ai/integrations/vector-search-auto-embedding-jina-ai.md) + - [Cohere](/ai/integrations/vector-search-auto-embedding-cohere.md) + - [Google Gemini](/ai/integrations/vector-search-auto-embedding-gemini.md) + - [Hugging Face](/ai/integrations/vector-search-auto-embedding-huggingface.md) + - [NVIDIA NIM](/ai/integrations/vector-search-auto-embedding-nvidia-nim.md) + - [Amazon Titan](/ai/integrations/vector-search-auto-embedding-amazon-titan.md) +- AI Frameworks + - [LangChain](/ai/integrations/vector-search-integrate-with-langchain.md) + - [LlamaIndex](/ai/integrations/vector-search-integrate-with-llamaindex.md) +- ORM Libraries + - [SQLAlchemy](/ai/integrations/vector-search-integrate-with-sqlalchemy.md) + - [Django ORM](/ai/integrations/vector-search-integrate-with-django-orm.md) + - [Peewee](/ai/integrations/vector-search-integrate-with-peewee.md) +- Cloud Services + - [Jina AI Embedding](/ai/integrations/vector-search-integrate-with-jinaai-embedding.md) + - [Amazon Bedrock](/ai/integrations/vector-search-integrate-with-amazon-bedrock.md) +- MCP Server + - [Overview](/ai/integrations/tidb-mcp-server.md) + - [Claude Code](/ai/integrations/tidb-mcp-claude-code.md) + - [Claude Desktop](/ai/integrations/tidb-mcp-claude-desktop.md) + - [Cursor](/ai/integrations/tidb-mcp-cursor.md) + - [VS Code](/ai/integrations/tidb-mcp-vscode.md) + - [Windsurf](/ai/integrations/tidb-mcp-windsurf.md) + +## REFERENCE + +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) +- [Performance Tuning](/ai/reference/vector-search-improve-performance.md) +- [Limitations](/ai/reference/vector-search-limitations.md) +- [Changelogs](/ai/reference/vector-search-changelogs.md) diff --git a/TOC-api.md b/TOC-api.md new file mode 100644 index 0000000000000..b31833afb27c3 --- /dev/null +++ b/TOC-api.md @@ -0,0 +1,18 @@ + + + +# Table of Contents + +## TIDB CLOUD + +- [API Overview](/api/tidb-cloud-api-overview.md) +- [API v1beta1](/api/tidb-cloud-api-v1beta1.md) +- [API v1beta](/api/tidb-cloud-api-v1beta.md) + +## TIDB SELF-MANAGED + +- [TiProxy API](/api/tiproxy-api-overview.md) +- [Data Migration API](/api/dm-api-overview.md) +- [Monitoring API](/api/monitoring-api-overview.md) +- [TiCDC API](/api/ticdc-api-overview.md) +- [TiDB Operator API](/api/tidb-operator-api-overview.md) diff --git a/TOC-best-practices.md b/TOC-best-practices.md new file mode 100644 index 0000000000000..aa9299436e768 --- /dev/null +++ b/TOC-best-practices.md @@ -0,0 +1,35 @@ + + + +# Table of Contents + +## Overview + +- [Use TiDB](/best-practices/tidb-best-practices.md) + +## Schema Design + +- [Manage DDL](/best-practices/ddl-introduction.md) +- [Use UUIDs as Primary Keys](/best-practices/uuid.md) +- [Use TiDB Partitioned Tables](/best-practices/tidb-partitioned-tables-best-practices.md) +- [Optimize Multi-Column Indexes](/best-practices/multi-column-index-best-practices.md) +- [Manage Indexes and Identify Unused Indexes](/best-practices/index-management-best-practices.md) + +## Deployment + +- [Deploy TiDB on Public Cloud](/best-practices/best-practices-on-public-cloud.md) +- [Three-Node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) +- [Local Reads in Three-Data-Center Deployments](/best-practices/three-dc-local-read.md) + +## Operations + +- [Use HAProxy for Load Balancing](/best-practices/haproxy-best-practices.md) +- [Use Read-Only Storage Nodes](/best-practices/readonly-nodes.md) +- [Monitor TiDB Using Grafana](/best-practices/grafana-monitor-best-practices.md) + +## Performance Tuning + +- [Handle Millions of Tables in SaaS Multi-Tenant Scenarios](/best-practices/saas-best-practices.md) +- [Handle High-Concurrency Writes](/best-practices/high-concurrency-best-practices.md) +- [Tune TiKV Performance with Massive Regions](/best-practices/massive-regions-best-practices.md) +- [Tune PD Scheduling](/best-practices/pd-scheduling-best-practices.md) diff --git a/TOC-develop.md b/TOC-develop.md new file mode 100644 index 0000000000000..9300837095adb --- /dev/null +++ b/TOC-develop.md @@ -0,0 +1,119 @@ + + + +# Table of Contents + +## QUICK START + +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) +- [TiDB Basics](/develop/dev-guide-tidb-basics.md) +- [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + +## GUIDES + +- Connect to TiDB + - [Overview](/develop/dev-guide-connect-to-tidb.md) + - CLI & GUI Tools + - [MySQL CLI Tools](/develop/dev-guide-mysql-tools.md) + - [JetBrains DataGrip](/develop/dev-guide-gui-datagrip.md) + - [DBeaver](/develop/dev-guide-gui-dbeaver.md) + - [VS Code](/develop/dev-guide-gui-vscode-sqltools.md) + - [MySQL Workbench](/develop/dev-guide-gui-mysql-workbench.md) + - [Navicat](/develop/dev-guide-gui-navicat.md) + - Drivers & ORMs + - [Choose a Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - [Configure Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - [Best Practices for Developing Java Applications](/develop/java-app-best-practices.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - [Django](/develop/dev-guide-sample-application-python-django.md) + - Node.js + - [node-mysql2](/develop/dev-guide-sample-application-nodejs-mysql2.md) + - [mysql.js](/develop/dev-guide-sample-application-nodejs-mysqljs.md) + - [Prisma](/develop/dev-guide-sample-application-nodejs-prisma.md) + - [Sequelize](/develop/dev-guide-sample-application-nodejs-sequelize.md) + - [TypeORM](/develop/dev-guide-sample-application-nodejs-typeorm.md) + - [Next.js](/develop/dev-guide-sample-application-nextjs.md) + - [AWS Lambda](/develop/dev-guide-sample-application-aws-lambda.md) + - Ruby + - [mysql2](/develop/dev-guide-sample-application-ruby-mysql2.md) + - [Rails](/develop/dev-guide-sample-application-ruby-rails.md) + - C# + - [C#](/develop/dev-guide-sample-application-cs.md) + - TiDB Cloud Serverless Driver ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/develop/serverless-driver.md) + - [Node.js Example](/develop/serverless-driver-node-example.md) + - [Prisma Example](/develop/serverless-driver-prisma-example.md) + - [Kysely Example](/develop/serverless-driver-kysely-example.md) + - [Drizzle Example](/develop/serverless-driver-drizzle-example.md) +- Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) +- Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Expired Data Using TTL (Time to Live)](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) +- Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) +- [Vector Search](/develop/dev-guide-vector-search.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) +- Manage Transactions + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) +- Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Additional Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) +- Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + +## INTEGRATIONS + +- Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) +- [ProxySQL](/develop/dev-guide-proxysql-integration.md) +- [Amazon AppFlow](/develop/dev-guide-aws-appflow-integration.md) +- [WordPress](/develop/dev-guide-wordpress.md) + +## REFERENCE + +- Development Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) +- [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) +- Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) \ No newline at end of file diff --git a/TOC-tidb-cloud-essential.md b/TOC-tidb-cloud-essential.md new file mode 100644 index 0000000000000..cdeb3c7ca755d --- /dev/null +++ b/TOC-tidb-cloud-essential.md @@ -0,0 +1,572 @@ + + + +# Table of Contents + +## GET STARTED + +- Why TiDB Cloud + - [Introduction](/tidb-cloud/tidb-cloud-intro.md) + - [Features](/tidb-cloud/features.md) + - [MySQL Compatibility](/mysql-compatibility.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Try Out TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) +- Key Concepts + - [Overview](/tidb-cloud/key-concepts.md) + - [Architecture](/tidb-cloud/architecture-concepts.md) + - [Database Schema](/tidb-cloud/database-schema-concepts.md) + - [Transactions](/tidb-cloud/transaction-concepts.md) + - [SQL](/tidb-cloud/sql-concepts.md) + - [AI Features](/tidb-cloud/ai-feature-concepts.md) + - [Scalability](/tidb-cloud/scalability-concepts.md) + - [High Availability](/tidb-cloud/serverless-high-availability.md) + - [Monitoring](/tidb-cloud/monitoring-concepts.md) + - [Backup & Restore](/tidb-cloud/backup-and-restore-concepts.md) + - [Security](/tidb-cloud/security-concepts.md) + +## GUIDES + +- Manage Cluster + - [Select Your Cluster Plan](/tidb-cloud/select-cluster-tier.md) + - Manage TiDB Cloud Clusters + - [Create a TiDB Cloud Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) + - Connect to Your TiDB Cloud Cluster + - [Network Connection Overview](/tidb-cloud/connect-to-tidb-cluster-serverless.md) + - [Connect via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + - Branch ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/branch-overview.md) + - [Manage Branches](/tidb-cloud/branch-manage.md) + - [GitHub Integration](/tidb-cloud/branch-github-integration.md) + - [Back Up and Restore TiDB Cloud Data](/tidb-cloud/backup-and-restore-serverless.md) + - [Export Data from TiDB Cloud](/tidb-cloud/serverless-export.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Use FastScan](/tiflash/use-fastscan.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [TiFlash Late Materialization](/tiflash/tiflash-late-materialization.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Pipeline Execution Model](/tiflash/tiflash-pipeline-model.md) + - Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Metrics](/tidb-cloud/built-in-monitoring.md) + - [Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md) + - Subscribe to Alert Notifications + - [Subscribe via Email](/tidb-cloud/monitor-alert-email.md) + - [Subscribe via Slack](/tidb-cloud/monitor-alert-slack.md) + - [Subscribe via Zoom](/tidb-cloud/monitor-alert-zoom.md) + - [Cluster Events](/tidb-cloud/tidb-cloud-events.md) + - Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - [Analyze Performance](/tidb-cloud/tune-performance.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [Derive TopN or Limit from Window Functions](/derive-topn-from-window.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Runtime Filter](/runtime-filter.md) + - [Prepared Execution Plan Cache](/sql-prepared-plan-cache.md) + - [Non-Prepared Execution Plan Cache](/sql-non-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) + - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) +- Migrate or Import Data + - [Overview](/tidb-cloud/tidb-cloud-migration-overview.md) + - Migrate Data into TiDB Cloud + - [Migrate Existing and Incremental Data Using Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) + - [Migrate Incremental Data Using Data Migration](/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md) + - [Migrate from TiDB Self-Managed to TiDB Cloud](/tidb-cloud/migrate-from-op-tidb.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/tidb-cloud/migrate-sql-shards.md) + - [Migrate from Amazon RDS for Oracle Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md) + - Import Data into TiDB Cloud + - [Import Local Files](/tidb-cloud/tidb-cloud-import-local-files.md) + - [Import Sample Data (SQL Files) from Cloud Storage](/tidb-cloud/import-sample-data-serverless.md) + - [Import CSV Files from Cloud Storage](/tidb-cloud/import-csv-files-serverless.md) + - [Import Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files-serverless.md) + - [Import Snapshot Files from Cloud Storage](/tidb-cloud/import-snapshot-files-serverless.md) + - [Import with MySQL CLI](/tidb-cloud/import-with-mysql-cli-serverless.md) + - Reference + - [Configure External Storage Access for TiDB Cloud](/tidb-cloud/configure-external-storage-access.md) + - [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md) + - [CSV Configurations for Importing Data](/tidb-cloud/csv-config-for-import-data.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md) +- Stream Data ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Changefeed Overview](/tidb-cloud/essential-changefeed-overview.md) + - [Sink to MySQL](/tidb-cloud/essential-changefeed-sink-to-mysql.md) + - [Sink to Apache Kafka](/tidb-cloud/essential-changefeed-sink-to-kafka.md) +- Security + - [Security Overview](/tidb-cloud/security-overview.md) + - Identity Access Control + - [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) + - [Standard SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md) + - [Organization SSO Authentication](/tidb-cloud/tidb-cloud-org-sso-authentication.md) + - [Identity Access Management](/tidb-cloud/manage-user-access.md) + - [OAuth 2.0](/tidb-cloud/oauth2.md) + - Network Access Control + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + - [Configure Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md) + - [TLS Connections to TiDB Cloud](/tidb-cloud/secure-connections-to-serverless-clusters.md) + - Private Link Connection + - [Private Link Connection Overview](/tidb-cloud/serverless-private-link-connection.md) + - [Connect to Amazon RDS](/tidb-cloud/serverless-private-link-connection-to-aws-rds.md) + - [Connect to Alibaba Cloud RDS](/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md) + - [Connect to Confluent Cloud on AWS](/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md) + - [Connect to Self-Hosted Kafka on Alibaba Cloud](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md) + - [Connect to Self-Hosted Kafka on AWS](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md) + - Audit Management + - [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md) + - [Database Audit Logging](/tidb-cloud/essential-database-audit-logging.md) +- Billing + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Cost Explorer](/tidb-cloud/tidb-cloud-billing.md#cost-explorer) + - [Billing Profile](/tidb-cloud/tidb-cloud-billing.md#billing-profile) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) + - [Billing from Cloud Provider Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-cloud-provider-marketplace) + - [Manage Budgets](/tidb-cloud/tidb-cloud-budget.md) +- Integrations + - [Airbyte](/tidb-cloud/integrate-tidbcloud-with-airbyte.md) + - [Cloudflare](/tidb-cloud/integrate-tidbcloud-with-cloudflare.md) + - [dbt](/tidb-cloud/integrate-tidbcloud-with-dbt.md) + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - [n8n](/tidb-cloud/integrate-tidbcloud-with-n8n.md) + - [Netlify](/tidb-cloud/integrate-tidbcloud-with-netlify.md) + - [ProxySQL](/develop/dev-guide-proxysql-integration.md) + - Terraform + - [Terraform Integration Overview](/tidb-cloud/terraform-tidbcloud-provider-overview.md) + - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) + - [Use the `tidbcloud_serverless_cluster` Resource](/tidb-cloud/terraform-use-serverless-cluster-resource-manage-essential.md) + - [Use the `tidbcloud_serverless_branch` Resource](/tidb-cloud/terraform-use-serverless-branch-resource.md) + - [Use the `tidbcloud_serverless_export` Resource](/tidb-cloud/terraform-use-serverless-export-resource.md) + - [Use the `tidbcloud_sql_user` Resource](/tidb-cloud/terraform-use-sql-user-resource.md) + - [Use the `tidbcloud_import` Resource](/tidb-cloud/terraform-use-import-resource.md) + - [Migrate Cluster Resource](/tidb-cloud/terraform-migrate-cluster-resource.md) + - [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) + - [Zapier](/tidb-cloud/integrate-tidbcloud-with-zapier.md) + +## REFERENCE + +- SQL Reference + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [Overview](/sql-statements/sql-statement-overview.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP INDEX`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER SEQUENCE`](/sql-statements/sql-statement-alter-sequence.md) + - `ALTER TABLE` + - [Overview](/sql-statements/sql-statement-alter-table.md) + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER`](/sql-statements/sql-statement-flashback-cluster.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW COLUMN_STATS_USAGE`](/sql-statements/sql-statement-show-column-stats-usage.md) + - [`SHOW COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW PROCESSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_BUCKETS`](/sql-statements/sql-statement-show-stats-buckets.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-stats-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATS_TOPN`](/sql-statements/sql-statement-show-stats-topn.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - JSON Functions + - [Overview](/functions-and-operators/json-functions.md) + - [Functions That Create JSON](/functions-and-operators/json-functions/json-functions-create.md) + - [Functions That Search JSON](/functions-and-operators/json-functions/json-functions-search.md) + - [Functions That Modify JSON](/functions-and-operators/json-functions/json-functions-modify.md) + - [Functions That Return JSON](/functions-and-operators/json-functions/json-functions-return.md) + - [JSON Utility Functions](/functions-and-operators/json-functions/json-functions-utility.md) + - [Functions That Aggregate JSON](/functions-and-operators/json-functions/json-functions-aggregate.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [GROUP BY Modifiers](/functions-and-operators/group-by-modifier.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [Sequence Functions](/functions-and-operators/sequence-functions.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Global Indexes](/global-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - [FOREIGN KEY Constraints](/foreign-key.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - `mysql` Schema + - [Overview](/mysql-schema/mysql-schema.md) + - [`user`](/mysql-schema/mysql-schema-user.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CHECK_CONSTRAINTS`](/information-schema/information-schema-check-constraints.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEYWORDS`](/information-schema/information-schema-keywords.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_CHECK_CONSTRAINTS`](/information-schema/information-schema-tidb-check-constraints.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_INDEX_USAGE`](/information-schema/information-schema-tidb-index-usage.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - PERFORMANCE_SCHEMA + - [Overview](/performance-schema/performance-schema.md) + - [`SESSION_CONNECT_ATTRS`](/performance-schema/performance-schema-session-connect-attrs.md) + - SYS + - [Overview](/sys-schema/sys-schema.md) + - [`schema_unused_indexes`](/sys-schema/sys-schema-unused-indexes.md) + - [Metadata Lock](/metadata-lock.md) + - [TiDB Accelerated Table Creation](/accelerated-table-creation.md) +- CLI Reference ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/cli-reference.md) + - auth + - [login](/tidb-cloud/ticloud-auth-login.md) + - [logout](/tidb-cloud/ticloud-auth-logout.md) + - [whoami](/tidb-cloud/ticloud-auth-whoami.md) + - serverless + - [capacity](/tidb-cloud/ticloud-serverless-capacity.md) + - [create](/tidb-cloud/ticloud-cluster-create.md) + - [delete](/tidb-cloud/ticloud-cluster-delete.md) + - [describe](/tidb-cloud/ticloud-cluster-describe.md) + - [list](/tidb-cloud/ticloud-cluster-list.md) + - [update](/tidb-cloud/ticloud-serverless-update.md) + - [spending-limit](/tidb-cloud/ticloud-serverless-spending-limit.md) + - [region](/tidb-cloud/ticloud-serverless-region.md) + - [shell](/tidb-cloud/ticloud-serverless-shell.md) + - branch + - [create](/tidb-cloud/ticloud-branch-create.md) + - [delete](/tidb-cloud/ticloud-branch-delete.md) + - [describe](/tidb-cloud/ticloud-branch-describe.md) + - [list](/tidb-cloud/ticloud-branch-list.md) + - [shell](/tidb-cloud/ticloud-branch-shell.md) + - import + - [cancel](/tidb-cloud/ticloud-import-cancel.md) + - [describe](/tidb-cloud/ticloud-import-describe.md) + - [list](/tidb-cloud/ticloud-import-list.md) + - [start](/tidb-cloud/ticloud-import-start.md) + - export + - [create](/tidb-cloud/ticloud-serverless-export-create.md) + - [describe](/tidb-cloud/ticloud-serverless-export-describe.md) + - [list](/tidb-cloud/ticloud-serverless-export-list.md) + - [cancel](/tidb-cloud/ticloud-serverless-export-cancel.md) + - [download](/tidb-cloud/ticloud-serverless-export-download.md) + - sql-user + - [create](/tidb-cloud/ticloud-serverless-sql-user-create.md) + - [delete](/tidb-cloud/ticloud-serverless-sql-user-delete.md) + - [list](/tidb-cloud/ticloud-serverless-sql-user-list.md) + - [update](/tidb-cloud/ticloud-serverless-sql-user-update.md) + - authorized-network + - [create](/tidb-cloud/ticloud-serverless-authorized-network-create.md) + - [delete](/tidb-cloud/ticloud-serverless-authorized-network-delete.md) + - [list](/tidb-cloud/ticloud-serverless-authorized-network-list.md) + - [update](/tidb-cloud/ticloud-serverless-authorized-network-update.md) + - audit-log + - config + - [update](/tidb-cloud/ticloud-serverless-audit-log-config-update.md) + - [describe](/tidb-cloud/ticloud-serverless-audit-log-config-describe.md) + - filter-rule + - [create](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-create.md) + - [delete](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-delete.md) + - [describe](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-describe.md) + - [list](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-list.md) + - [update](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-update.md) + - [template](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-template.md) + - [download](/tidb-cloud/ticloud-serverless-audit-log-download.md) + - [completion](/tidb-cloud/ticloud-completion.md) + - config + - [create](/tidb-cloud/ticloud-config-create.md) + - [delete](/tidb-cloud/ticloud-config-delete.md) + - [describe](/tidb-cloud/ticloud-config-describe.md) + - [edit](/tidb-cloud/ticloud-config-edit.md) + - [list](/tidb-cloud/ticloud-config-list.md) + - [set](/tidb-cloud/ticloud-config-set.md) + - [use](/tidb-cloud/ticloud-config-use.md) + - project + - [list](/tidb-cloud/ticloud-project-list.md) + - [upgrade](/tidb-cloud/ticloud-upgrade.md) + - [help](/tidb-cloud/ticloud-help.md) +- General Reference + - TiDB Classic Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [TSO](/tso.md) + - [TiDB X Architecture](/tidb-cloud/tidb-x-architecture.md) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Spill to Disk](/tiflash/tiflash-spill-disk.md) + - TiDB Cloud Partner Web Console + - [TiDB Cloud Partners](/tidb-cloud/tidb-cloud-partners.md) + - [MSP Customer](/tidb-cloud/managed-service-provider-customer.md) + - [Reseller's Customer](/tidb-cloud/cppo-customer.md) + - [{{{ .starter }}} and Essential Limitations](/tidb-cloud/serverless-limitations.md) + - [Limited SQL Features on TiDB Cloud](/tidb-cloud/limited-sql-features.md) + - [TiDB Limitations](/tidb-limitations.md) + - [System Variables](/system-variables.md) + - [Server Status Variables](/status-variables.md) + - [Table Filter](/table-filter.md) + - [URI Formats of External Storage Services](/external-storage-uri.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - [Notifications](/tidb-cloud/notifications.md) +- Support Plan + - [Connected Care Overview](/tidb-cloud/connected-care-overview.md) + - [Connected Care Details](/tidb-cloud/connected-care-detail.md) + - Connected Care Support Service Features + - [Connected: AI Chat in IM](/tidb-cloud/connected-ai-chat-in-im.md) + - Connected: IM Subscription for TiDB Cloud Alerts + - [Subscribe via Slack](/tidb-cloud/monitor-alert-slack.md) + - [Subscribe via Zoom](/tidb-cloud/monitor-alert-zoom.md) + - [Subscribe via Flashduty](/tidb-cloud/monitor-alert-flashduty.md) + - [Subscribe via PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md) + - Connected: IM Ticket Creation and Update Subscription + - [Create Tickets and Subscribe to Ticket Updates via Slack](/tidb-cloud/connected-slack-ticket-creation.md) + - [Create Tickets and Subscribe to Ticket Updates via Lark](/tidb-cloud/connected-lark-ticket-creation.md) + - Connected: IM Interaction for Support Tickets + - [Interact with Support Tickets via Slack](/tidb-cloud/connected-slack-ticket-interaction.md) + - [Interact with Support Tickets via Lark](/tidb-cloud/connected-lark-ticket-interaction.md) + - [Get Support](/tidb-cloud/tidb-cloud-support.md) +- FAQs + - [TiDB Cloud FAQs](/tidb-cloud/tidb-cloud-faq.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/TOC-tidb-cloud-premium.md b/TOC-tidb-cloud-premium.md new file mode 100644 index 0000000000000..299577ac3e16e --- /dev/null +++ b/TOC-tidb-cloud-premium.md @@ -0,0 +1,633 @@ + + + +# Table of Contents + +## GET STARTED + +- Why TiDB Cloud + - [TiDB Cloud Introduction](/tidb-cloud/tidb-cloud-intro.md) + - [MySQL Compatibility](/mysql-compatibility.md) +- Get Started with TiDB Cloud + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Try Out TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) +- Key Concepts + - [Overview](/tidb-cloud/key-concepts.md) + - [Architecture](/tidb-cloud/architecture-concepts.md) + - [Database Schema](/tidb-cloud/database-schema-concepts.md) + - [Transactions](/tidb-cloud/transaction-concepts.md) + - [SQL](/tidb-cloud/sql-concepts.md) + - [AI Features](/tidb-cloud/ai-feature-concepts.md) + - [Scalability](/tidb-cloud/scalability-concepts.md) + - [High Availability](/tidb-cloud/serverless-high-availability.md) + - [Monitoring](/tidb-cloud/monitoring-concepts.md) + - [Backup & Restore](/tidb-cloud/backup-and-restore-concepts.md) + - [Security](/tidb-cloud/security-concepts.md) + +## DEVELOP + +- Development Quick Start + - [Build a TiDB Cloud Cluster](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) +- Connect to TiDB Cloud + - GUI Database Tools + - [JetBrains DataGrip](/develop/dev-guide-gui-datagrip.md) + - [DBeaver](/develop/dev-guide-gui-dbeaver.md) + - [VS Code](/develop/dev-guide-gui-vscode-sqltools.md) + - [MySQL Workbench](/develop/dev-guide-gui-mysql-workbench.md) + - [Navicat](/develop/dev-guide-gui-navicat.md) + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - [Django](/develop/dev-guide-sample-application-python-django.md) + - Node.js + - [node-mysql2](/develop/dev-guide-sample-application-nodejs-mysql2.md) + - [mysql.js](/develop/dev-guide-sample-application-nodejs-mysqljs.md) + - [Prisma](/develop/dev-guide-sample-application-nodejs-prisma.md) + - [Sequelize](/develop/dev-guide-sample-application-nodejs-sequelize.md) + - [TypeORM](/develop/dev-guide-sample-application-nodejs-typeorm.md) + - [Next.js](/develop/dev-guide-sample-application-nextjs.md) + - Ruby + - [mysql2](/develop/dev-guide-sample-application-ruby-mysql2.md) + - [Rails](/develop/dev-guide-sample-application-ruby-rails.md) + - C# + - [C#](/develop/dev-guide-sample-application-cs.md) + - [WordPress](/develop/dev-guide-wordpress.md) + - Serverless Driver ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [TiDB Cloud Serverless Driver](/develop/serverless-driver.md) + - [Node.js Example](/develop/serverless-driver-node-example.md) + - [Prisma Example](/develop/serverless-driver-prisma-example.md) + - [Kysely Example](/develop/serverless-driver-kysely-example.md) + - [Drizzle Example](/develop/serverless-driver-drizzle-example.md) +- Development Reference + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Expired Data Using TTL (Time to Live)](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Development Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) + +## GUIDES + +- Manage Instances + - [Select Your Cluster Plan](/tidb-cloud/select-cluster-tier.md) + - Manage TiDB Cloud Instances + - [Create a {{{ .premium }}} Instance](/tidb-cloud/premium/create-tidb-instance-premium.md) + - Connect to Your TiDB Cloud Instance + - [Connection Overview](/tidb-cloud/premium/connect-to-tidb-instance.md) + - [Connect via Public Endpoint](/tidb-cloud/premium/connect-to-premium-via-public-connection.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/premium/connect-to-premium-via-alibaba-cloud-private-endpoint.md) + - [Back Up and Restore TiDB Cloud Data](/tidb-cloud/premium/backup-and-restore-premium.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Use FastScan](/tiflash/use-fastscan.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [TiFlash Late Materialization](/tiflash/tiflash-late-materialization.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Pipeline Execution Model](/tiflash/tiflash-pipeline-model.md) + - Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Metrics](/tidb-cloud/premium/built-in-monitoring-premium.md) + - Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - [Analyze Performance](/tidb-cloud/tune-performance.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [Derive TopN or Limit from Window Functions](/derive-topn-from-window.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Runtime Filter](/runtime-filter.md) + - [Prepared Execution Plan Cache](/sql-prepared-plan-cache.md) + - [Non-Prepared Execution Plan Cache](/sql-non-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Delete a TiDB Instance](/tidb-cloud/premium/delete-tidb-instance.md) +- Migrate or Import Data + - [Overview](/tidb-cloud/tidb-cloud-migration-overview.md) + - Migrate Data into TiDB Cloud + - [Migrate from TiDB Self-Managed to TiDB Cloud Premium](/tidb-cloud/premium/migrate-from-op-tidb-premium.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/tidb-cloud/migrate-sql-shards.md) + - [Migrate from Amazon RDS for Oracle Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md) + - Import Data into TiDB Cloud + - [Import Sample Data (SQL Files) from Cloud Storage](/tidb-cloud/import-sample-data-serverless.md) + - [Import CSV Files from Cloud Storage](/tidb-cloud/premium/import-csv-files-premium.md) + - [Import CSV Files from Amazon S3](/tidb-cloud/premium/import-from-s3-premium.md) + - [Import Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files-serverless.md) + - [Import Snapshot Files from Cloud Storage](/tidb-cloud/import-snapshot-files-serverless.md) + - [Import Data Using MySQL CLI](/tidb-cloud/premium/import-with-mysql-cli-premium.md) + - Reference + - [Configure External Storage Access for TiDB Cloud](/tidb-cloud/configure-external-storage-access.md) + - [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md) + - [CSV Configurations for Importing Data](/tidb-cloud/csv-config-for-import-data.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md) +- Stream Data + - [Changefeed Overview](/tidb-cloud/changefeed-overview.md) + - [To MySQL Sink](/tidb-cloud/changefeed-sink-to-mysql.md) + - [To Kafka Sink](/tidb-cloud/changefeed-sink-to-apache-kafka.md) + - Reference + - [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) + - [Set Up Private Endpoint for Changefeeds](/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md) +- Security + - [Security Overview](/tidb-cloud/security-overview.md) + - Identity Access Control + - [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) + - [Standard SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md) + - [Organization SSO Authentication](/tidb-cloud/tidb-cloud-org-sso-authentication.md) + - [Identity Access Management](/tidb-cloud/premium/manage-user-access-premium.md) + - [OAuth 2.0](/tidb-cloud/oauth2.md) + - Network Access Control + - [Configure an IP Access List](/tidb-cloud/premium/configure-ip-access-list-premium.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/premium/connect-to-premium-via-alibaba-cloud-private-endpoint.md) + - [Configure Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md) + - [TLS Connections to TiDB Cloud](/tidb-cloud/premium/tidb-cloud-tls-connect-to-premium.md) + - Data Access Control + - [User-Controlled Log Redaction](/tidb-cloud/tidb-cloud-log-redaction.md) + - Audit Management + - [Database Audit Logging](/tidb-cloud/premium/tidb-cloud-auditing-premium.md) + - [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md) +- Billing + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Cost Explorer](/tidb-cloud/tidb-cloud-billing.md#cost-explorer) + - [Billing Profile](/tidb-cloud/tidb-cloud-billing.md#billing-profile) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) + - [Billing from Cloud Provider Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-cloud-provider-marketplace) + - [Billing for Changefeed](/tidb-cloud/premium/tidb-cloud-billing-ticdc-ccu.md) + - [Manage Budgets](/tidb-cloud/tidb-cloud-budget.md) +- Integrations + - [Airbyte](/tidb-cloud/integrate-tidbcloud-with-airbyte.md) + - [Cloudflare](/tidb-cloud/integrate-tidbcloud-with-cloudflare.md) + - [dbt](/tidb-cloud/integrate-tidbcloud-with-dbt.md) + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - [n8n](/tidb-cloud/integrate-tidbcloud-with-n8n.md) + - [Netlify](/tidb-cloud/integrate-tidbcloud-with-netlify.md) + - [ProxySQL](/develop/dev-guide-proxysql-integration.md) + - Terraform + - [Terraform Integration Overview](/tidb-cloud/terraform-tidbcloud-provider-overview.md) + - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) + - [Use the `tidbcloud_serverless_cluster` Resource](/tidb-cloud/terraform-use-serverless-cluster-resource-manage-essential.md) + - [Use the `tidbcloud_serverless_branch` Resource](/tidb-cloud/terraform-use-serverless-branch-resource.md) + - [Use the `tidbcloud_serverless_export` Resource](/tidb-cloud/terraform-use-serverless-export-resource.md) + - [Use the `tidbcloud_sql_user` Resource](/tidb-cloud/terraform-use-sql-user-resource.md) + - [Use the `tidbcloud_import` Resource](/tidb-cloud/terraform-use-import-resource.md) + - [Migrate Cluster Resource](/tidb-cloud/terraform-migrate-cluster-resource.md) + - [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) + - [Zapier](/tidb-cloud/integrate-tidbcloud-with-zapier.md) + +## REFERENCE + +- SQL Reference + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [Overview](/sql-statements/sql-statement-overview.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP INDEX`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER SEQUENCE`](/sql-statements/sql-statement-alter-sequence.md) + - `ALTER TABLE` + - [Overview](/sql-statements/sql-statement-alter-table.md) + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER`](/sql-statements/sql-statement-flashback-cluster.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW COLUMN_STATS_USAGE`](/sql-statements/sql-statement-show-column-stats-usage.md) + - [`SHOW COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW PROCESSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_BUCKETS`](/sql-statements/sql-statement-show-stats-buckets.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-stats-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATS_TOPN`](/sql-statements/sql-statement-show-stats-topn.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - JSON Functions + - [Overview](/functions-and-operators/json-functions.md) + - [Functions That Create JSON](/functions-and-operators/json-functions/json-functions-create.md) + - [Functions That Search JSON](/functions-and-operators/json-functions/json-functions-search.md) + - [Functions That Modify JSON](/functions-and-operators/json-functions/json-functions-modify.md) + - [Functions That Return JSON](/functions-and-operators/json-functions/json-functions-return.md) + - [JSON Utility Functions](/functions-and-operators/json-functions/json-functions-utility.md) + - [Functions That Aggregate JSON](/functions-and-operators/json-functions/json-functions-aggregate.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [GROUP BY Modifiers](/functions-and-operators/group-by-modifier.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [Sequence Functions](/functions-and-operators/sequence-functions.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Global Indexes](/global-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - [FOREIGN KEY Constraints](/foreign-key.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - `mysql` Schema + - [Overview](/mysql-schema/mysql-schema.md) + - [`user`](/mysql-schema/mysql-schema-user.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CHECK_CONSTRAINTS`](/information-schema/information-schema-check-constraints.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEYWORDS`](/information-schema/information-schema-keywords.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_CHECK_CONSTRAINTS`](/information-schema/information-schema-tidb-check-constraints.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_INDEX_USAGE`](/information-schema/information-schema-tidb-index-usage.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - PERFORMANCE_SCHEMA + - [Overview](/performance-schema/performance-schema.md) + - [`SESSION_CONNECT_ATTRS`](/performance-schema/performance-schema-session-connect-attrs.md) + - SYS + - [Overview](/sys-schema/sys-schema.md) + - [`schema_unused_indexes`](/sys-schema/sys-schema-unused-indexes.md) + - [Metadata Lock](/metadata-lock.md) + - [TiDB Accelerated Table Creation](/accelerated-table-creation.md) +- CLI Reference ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/cli-reference.md) + - auth + - [login](/tidb-cloud/ticloud-auth-login.md) + - [logout](/tidb-cloud/ticloud-auth-logout.md) + - [whoami](/tidb-cloud/ticloud-auth-whoami.md) + - serverless + - [capacity](/tidb-cloud/ticloud-serverless-capacity.md) + - [create](/tidb-cloud/ticloud-cluster-create.md) + - [delete](/tidb-cloud/ticloud-cluster-delete.md) + - [describe](/tidb-cloud/ticloud-cluster-describe.md) + - [list](/tidb-cloud/ticloud-cluster-list.md) + - [update](/tidb-cloud/ticloud-serverless-update.md) + - [spending-limit](/tidb-cloud/ticloud-serverless-spending-limit.md) + - [region](/tidb-cloud/ticloud-serverless-region.md) + - [shell](/tidb-cloud/ticloud-serverless-shell.md) + - branch + - [create](/tidb-cloud/ticloud-branch-create.md) + - [delete](/tidb-cloud/ticloud-branch-delete.md) + - [describe](/tidb-cloud/ticloud-branch-describe.md) + - [list](/tidb-cloud/ticloud-branch-list.md) + - [shell](/tidb-cloud/ticloud-branch-shell.md) + - import + - [cancel](/tidb-cloud/ticloud-import-cancel.md) + - [describe](/tidb-cloud/ticloud-import-describe.md) + - [list](/tidb-cloud/ticloud-import-list.md) + - [start](/tidb-cloud/ticloud-import-start.md) + - export + - [create](/tidb-cloud/ticloud-serverless-export-create.md) + - [describe](/tidb-cloud/ticloud-serverless-export-describe.md) + - [list](/tidb-cloud/ticloud-serverless-export-list.md) + - [cancel](/tidb-cloud/ticloud-serverless-export-cancel.md) + - [download](/tidb-cloud/ticloud-serverless-export-download.md) + - sql-user + - [create](/tidb-cloud/ticloud-serverless-sql-user-create.md) + - [delete](/tidb-cloud/ticloud-serverless-sql-user-delete.md) + - [list](/tidb-cloud/ticloud-serverless-sql-user-list.md) + - [update](/tidb-cloud/ticloud-serverless-sql-user-update.md) + - authorized-network + - [create](/tidb-cloud/ticloud-serverless-authorized-network-create.md) + - [delete](/tidb-cloud/ticloud-serverless-authorized-network-delete.md) + - [list](/tidb-cloud/ticloud-serverless-authorized-network-list.md) + - [update](/tidb-cloud/ticloud-serverless-authorized-network-update.md) + - [completion](/tidb-cloud/ticloud-completion.md) + - config + - [create](/tidb-cloud/ticloud-config-create.md) + - [delete](/tidb-cloud/ticloud-config-delete.md) + - [describe](/tidb-cloud/ticloud-config-describe.md) + - [edit](/tidb-cloud/ticloud-config-edit.md) + - [list](/tidb-cloud/ticloud-config-list.md) + - [set](/tidb-cloud/ticloud-config-set.md) + - [use](/tidb-cloud/ticloud-config-use.md) + - project + - [list](/tidb-cloud/ticloud-project-list.md) + - [upgrade](/tidb-cloud/ticloud-upgrade.md) + - [help](/tidb-cloud/ticloud-help.md) +- General Reference + - TiDB Classic Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [TSO](/tso.md) + - [TiDB X Architecture](/tidb-cloud/tidb-x-architecture.md) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Spill to Disk](/tiflash/tiflash-spill-disk.md) + - TiDB Cloud Partner Web Console + - [TiDB Cloud Partners](/tidb-cloud/tidb-cloud-partners.md) + - [MSP Customer](/tidb-cloud/managed-service-provider-customer.md) + - [Reseller's Customer](/tidb-cloud/cppo-customer.md) + - [Limited SQL Features on TiDB Cloud](/tidb-cloud/limited-sql-features.md) + - [TiDB Limitations](/tidb-limitations.md) + - [System Variables](/system-variables.md) + - [Server Status Variables](/status-variables.md) + - [Table Filter](/table-filter.md) + - [URI Formats of External Storage Services](/external-storage-uri.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - [Notifications](/tidb-cloud/notifications.md) +- Support Plan + - [Connected Care Overview](/tidb-cloud/connected-care-overview.md) + - [Connected Care Details](/tidb-cloud/connected-care-detail.md) + - Connected Care Support Service Features + - [Connected: AI Chat in IM](/tidb-cloud/connected-ai-chat-in-im.md) + - Connected: IM Ticket Creation and Update Subscription + - [Create Tickets and Subscribe to Ticket Updates via Slack](/tidb-cloud/connected-slack-ticket-creation.md) + - [Create Tickets and Subscribe to Ticket Updates via Lark](/tidb-cloud/connected-lark-ticket-creation.md) + - Connected: IM Interaction for Support Tickets + - [Interact with Support Tickets via Slack](/tidb-cloud/connected-slack-ticket-interaction.md) + - [Interact with Support Tickets via Lark](/tidb-cloud/connected-lark-ticket-interaction.md) + - [Get Support](/tidb-cloud/tidb-cloud-support.md) +- FAQs + - [TiDB Cloud FAQs](/tidb-cloud/tidb-cloud-faq.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/TOC-tidb-cloud-releases.md b/TOC-tidb-cloud-releases.md new file mode 100644 index 0000000000000..0c1e2ce823e75 --- /dev/null +++ b/TOC-tidb-cloud-releases.md @@ -0,0 +1,27 @@ + + + +# Table of Contents + +## RELEASE NOTES + +- [2026](/tidb-cloud/releases/tidb-cloud-release-notes.md) +- [2025](/tidb-cloud/releases/release-notes-2025.md) +- [2024](/tidb-cloud/releases/release-notes-2024.md) +- Earlier Releases + - [2023](/tidb-cloud/releases/release-notes-2023.md) + - [2022](/tidb-cloud/releases/release-notes-2022.md) + - [2021](/tidb-cloud/releases/release-notes-2021.md) + - [2020](/tidb-cloud/releases/release-notes-2020.md) + +## MAINTENANCE NOTIFICATIONS + +- [[2024-09-15] TiDB Cloud Console Maintenance Notification](/tidb-cloud/releases/notification-2024-09-15-console-maintenance.md) +- [[2024-04-18] TiDB Cloud Data Migration (DM) Feature Maintenance Notification](/tidb-cloud/releases/notification-2024-04-18-dm-feature-maintenance.md) +- [[2024-04-16] TiDB Cloud Monitoring Features Maintenance Notification](/tidb-cloud/releases/notification-2024-04-16-monitoring-features-maintenance.md) +- [[2024-04-11] TiDB Cloud Data Migration (DM) Feature Maintenance Notification](/tidb-cloud/releases/notification-2024-04-11-dm-feature-maintenance.md) +- [[2024-04-09] TiDB Cloud Monitoring Features Maintenance Notification](/tidb-cloud/releases/notification-2024-04-09-monitoring-features-maintenance.md) +- Earlier Notifications + - [[2023-11-14] TiDB Cloud Dedicated Scale Feature Maintenance Notification](/tidb-cloud/releases/notification-2023-11-14-scale-feature-maintenance.md) + - [[2023-09-26] TiDB Cloud Console Maintenance Notification](/tidb-cloud/releases/notification-2023-09-26-console-maintenance.md) + - [[2023-08-31] TiDB Cloud Console Maintenance Notification](/tidb-cloud/releases/notification-2023-08-31-console-maintenance.md) diff --git a/TOC-tidb-cloud-starter.md b/TOC-tidb-cloud-starter.md new file mode 100644 index 0000000000000..d8fa6020884bd --- /dev/null +++ b/TOC-tidb-cloud-starter.md @@ -0,0 +1,562 @@ + + + +# Table of Contents + +## GET STARTED + +- Why TiDB Cloud + - [Introduction](/tidb-cloud/tidb-cloud-intro.md) + - [Features](/tidb-cloud/features.md) + - [MySQL Compatibility](/mysql-compatibility.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out TiDB + AI Tools](/tidb-cloud/use-tidb-cloud-with-ai-tools.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Try Out TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) +- Key Concepts + - [Overview](/tidb-cloud/key-concepts.md) + - [Architecture](/tidb-cloud/architecture-concepts.md) + - [Database Schema](/tidb-cloud/database-schema-concepts.md) + - [Transactions](/tidb-cloud/transaction-concepts.md) + - [SQL](/tidb-cloud/sql-concepts.md) + - [AI Features](/tidb-cloud/ai-feature-concepts.md) + - [Data Service](/tidb-cloud/data-service-concepts.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Scalability](/tidb-cloud/scalability-concepts.md) + - [High Availability](/tidb-cloud/serverless-high-availability.md) + - [Monitoring](/tidb-cloud/monitoring-concepts.md) + - [Backup & Restore](/tidb-cloud/backup-and-restore-concepts.md) + - [Security](/tidb-cloud/security-concepts.md) + +## GUIDES + +- Manage Cluster + - [Select Your Cluster Plan](/tidb-cloud/select-cluster-tier.md) + - Manage TiDB Cloud Clusters + - [Create a TiDB Cloud Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) + - Connect to Your TiDB Cloud Cluster + - [Network Connection Overview](/tidb-cloud/connect-to-tidb-cluster-serverless.md) + - [Connect via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + - Branch ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/branch-overview.md) + - [Manage Branches](/tidb-cloud/branch-manage.md) + - [GitHub Integration](/tidb-cloud/branch-github-integration.md) + - [Manage Spending Limit](/tidb-cloud/manage-serverless-spend-limit.md) + - [Back Up and Restore TiDB Cloud Data](/tidb-cloud/backup-and-restore-serverless.md) + - [Export Data from TiDB Cloud](/tidb-cloud/serverless-export.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Use FastScan](/tiflash/use-fastscan.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [TiFlash Late Materialization](/tiflash/tiflash-late-materialization.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Pipeline Execution Model](/tiflash/tiflash-pipeline-model.md) + - Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Metrics](/tidb-cloud/built-in-monitoring.md) + - [Cluster Events](/tidb-cloud/tidb-cloud-events.md) + - Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - [Analyze Performance](/tidb-cloud/tune-performance.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [Derive TopN or Limit from Window Functions](/derive-topn-from-window.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Runtime Filter](/runtime-filter.md) + - [Prepared Execution Plan Cache](/sql-prepared-plan-cache.md) + - [Non-Prepared Execution Plan Cache](/sql-non-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) + - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) +- Migrate or Import Data + - [Overview](/tidb-cloud/tidb-cloud-migration-overview.md) + - Migrate Data into TiDB Cloud + - [Migrate from TiDB Self-Managed to TiDB Cloud](/tidb-cloud/migrate-from-op-tidb.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/tidb-cloud/migrate-sql-shards.md) + - [Migrate from Amazon RDS for Oracle Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md) + - Import Data into TiDB Cloud + - [Import Local Files](/tidb-cloud/tidb-cloud-import-local-files.md) + - [Import Sample Data (SQL Files) from Cloud Storage](/tidb-cloud/import-sample-data-serverless.md) + - [Import CSV Files from Cloud Storage](/tidb-cloud/import-csv-files-serverless.md) + - [Import Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files-serverless.md) + - [Import Snapshot Files from Cloud Storage](/tidb-cloud/import-snapshot-files-serverless.md) + - [Import with MySQL CLI](/tidb-cloud/import-with-mysql-cli-serverless.md) + - Reference + - [Configure External Storage Access for TiDB Cloud](/tidb-cloud/configure-external-storage-access.md) + - [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md) + - [CSV Configurations for Importing Data](/tidb-cloud/csv-config-for-import-data.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md) +- Explore Data + - [Chat2Query in SQL Editor](/tidb-cloud/explore-data-with-chat2query.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [SQL Proxy Account](/tidb-cloud/sql-proxy-account.md) +- Data Service ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/data-service-overview.md) + - [Get Started](/tidb-cloud/data-service-get-started.md) + - Chat2Query API + - [Get Started](/tidb-cloud/use-chat2query-api.md) + - [Start Multi-round Chat2Query](/tidb-cloud/use-chat2query-sessions.md) + - [Use Knowledge Bases](/tidb-cloud/use-chat2query-knowledge.md) + - [Manage Data App](/tidb-cloud/data-service-manage-data-app.md) + - [Manage Endpoint](/tidb-cloud/data-service-manage-endpoint.md) + - [API Key](/tidb-cloud/data-service-api-key.md) + - [Custom Domain](/tidb-cloud/data-service-custom-domain.md) + - [Integrations](/tidb-cloud/data-service-integrations.md) + - [Run in Postman](/tidb-cloud/data-service-postman-integration.md) + - [Deploy Automatically with GitHub](/tidb-cloud/data-service-manage-github-connection.md) + - [Use OpenAPI Specification with Next.js](/tidb-cloud/data-service-oas-with-nextjs.md) + - [Data App Configuration Files](/tidb-cloud/data-service-app-config-files.md) + - [Response and Status Code](/tidb-cloud/data-service-response-and-status-code.md) +- Security + - [Security Overview](/tidb-cloud/security-overview.md) + - Identity Access Control + - [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) + - [Standard SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md) + - [Organization SSO Authentication](/tidb-cloud/tidb-cloud-org-sso-authentication.md) + - [Identity Access Management](/tidb-cloud/manage-user-access.md) + - [OAuth 2.0](/tidb-cloud/oauth2.md) + - Network Access Control + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + - [Connect via Private Endpoint with Alibaba Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + - [Configure Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md) + - [TLS Connections to TiDB Cloud](/tidb-cloud/secure-connections-to-serverless-clusters.md) + - Audit Management + - [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md) +- Billing + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Cost Explorer](/tidb-cloud/tidb-cloud-billing.md#cost-explorer) + - [Billing Profile](/tidb-cloud/tidb-cloud-billing.md#billing-profile) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) + - [Billing from Cloud Provider Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-cloud-provider-marketplace) + - [Manage Budgets](/tidb-cloud/tidb-cloud-budget.md) +- Integrations + - [Airbyte](/tidb-cloud/integrate-tidbcloud-with-airbyte.md) + - [Amazon AppFlow](/develop/dev-guide-aws-appflow-integration.md) + - [AWS Lambda](/tidb-cloud/integrate-tidbcloud-with-aws-lambda.md) + - [Cloudflare](/tidb-cloud/integrate-tidbcloud-with-cloudflare.md) + - [dbt](/tidb-cloud/integrate-tidbcloud-with-dbt.md) + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - [n8n](/tidb-cloud/integrate-tidbcloud-with-n8n.md) + - [Netlify](/tidb-cloud/integrate-tidbcloud-with-netlify.md) + - [ProxySQL](/develop/dev-guide-proxysql-integration.md) + - Terraform + - [Terraform Integration Overview](/tidb-cloud/terraform-tidbcloud-provider-overview.md) + - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) + - [Use the `tidbcloud_serverless_cluster` Resource](/tidb-cloud/terraform-use-serverless-cluster-resource.md) + - [Use the `tidbcloud_serverless_branch` Resource](/tidb-cloud/terraform-use-serverless-branch-resource.md) + - [Use the `tidbcloud_serverless_export` Resource](/tidb-cloud/terraform-use-serverless-export-resource.md) + - [Use the `tidbcloud_sql_user` Resource](/tidb-cloud/terraform-use-sql-user-resource.md) + - [Use the `tidbcloud_import` Resource](/tidb-cloud/terraform-use-import-resource.md) + - [Migrate Cluster Resource](/tidb-cloud/terraform-migrate-cluster-resource.md) + - [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) + - [Zapier](/tidb-cloud/integrate-tidbcloud-with-zapier.md) + +## REFERENCE + +- SQL Reference + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [Overview](/sql-statements/sql-statement-overview.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP INDEX`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER SEQUENCE`](/sql-statements/sql-statement-alter-sequence.md) + - `ALTER TABLE` + - [Overview](/sql-statements/sql-statement-alter-table.md) + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER`](/sql-statements/sql-statement-flashback-cluster.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW COLUMN_STATS_USAGE`](/sql-statements/sql-statement-show-column-stats-usage.md) + - [`SHOW COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW PROCESSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_BUCKETS`](/sql-statements/sql-statement-show-stats-buckets.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-stats-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATS_TOPN`](/sql-statements/sql-statement-show-stats-topn.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - JSON Functions + - [Overview](/functions-and-operators/json-functions.md) + - [Functions That Create JSON](/functions-and-operators/json-functions/json-functions-create.md) + - [Functions That Search JSON](/functions-and-operators/json-functions/json-functions-search.md) + - [Functions That Modify JSON](/functions-and-operators/json-functions/json-functions-modify.md) + - [Functions That Return JSON](/functions-and-operators/json-functions/json-functions-return.md) + - [JSON Utility Functions](/functions-and-operators/json-functions/json-functions-utility.md) + - [Functions That Aggregate JSON](/functions-and-operators/json-functions/json-functions-aggregate.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [GROUP BY Modifiers](/functions-and-operators/group-by-modifier.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [Sequence Functions](/functions-and-operators/sequence-functions.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Global Indexes](/global-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - [FOREIGN KEY Constraints](/foreign-key.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - `mysql` Schema + - [Overview](/mysql-schema/mysql-schema.md) + - [`user`](/mysql-schema/mysql-schema-user.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CHECK_CONSTRAINTS`](/information-schema/information-schema-check-constraints.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEYWORDS`](/information-schema/information-schema-keywords.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_CHECK_CONSTRAINTS`](/information-schema/information-schema-tidb-check-constraints.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_INDEX_USAGE`](/information-schema/information-schema-tidb-index-usage.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - PERFORMANCE_SCHEMA + - [Overview](/performance-schema/performance-schema.md) + - [`SESSION_CONNECT_ATTRS`](/performance-schema/performance-schema-session-connect-attrs.md) + - SYS + - [Overview](/sys-schema/sys-schema.md) + - [`schema_unused_indexes`](/sys-schema/sys-schema-unused-indexes.md) + - [Metadata Lock](/metadata-lock.md) + - [TiDB Accelerated Table Creation](/accelerated-table-creation.md) +- CLI Reference ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - [Overview](/tidb-cloud/cli-reference.md) + - auth + - [login](/tidb-cloud/ticloud-auth-login.md) + - [logout](/tidb-cloud/ticloud-auth-logout.md) + - [whoami](/tidb-cloud/ticloud-auth-whoami.md) + - serverless + - [capacity](/tidb-cloud/ticloud-serverless-capacity.md) + - [create](/tidb-cloud/ticloud-cluster-create.md) + - [delete](/tidb-cloud/ticloud-cluster-delete.md) + - [describe](/tidb-cloud/ticloud-cluster-describe.md) + - [list](/tidb-cloud/ticloud-cluster-list.md) + - [update](/tidb-cloud/ticloud-serverless-update.md) + - [spending-limit](/tidb-cloud/ticloud-serverless-spending-limit.md) + - [region](/tidb-cloud/ticloud-serverless-region.md) + - [shell](/tidb-cloud/ticloud-serverless-shell.md) + - branch + - [create](/tidb-cloud/ticloud-branch-create.md) + - [delete](/tidb-cloud/ticloud-branch-delete.md) + - [describe](/tidb-cloud/ticloud-branch-describe.md) + - [list](/tidb-cloud/ticloud-branch-list.md) + - [shell](/tidb-cloud/ticloud-branch-shell.md) + - import + - [cancel](/tidb-cloud/ticloud-import-cancel.md) + - [describe](/tidb-cloud/ticloud-import-describe.md) + - [list](/tidb-cloud/ticloud-import-list.md) + - [start](/tidb-cloud/ticloud-import-start.md) + - export + - [create](/tidb-cloud/ticloud-serverless-export-create.md) + - [describe](/tidb-cloud/ticloud-serverless-export-describe.md) + - [list](/tidb-cloud/ticloud-serverless-export-list.md) + - [cancel](/tidb-cloud/ticloud-serverless-export-cancel.md) + - [download](/tidb-cloud/ticloud-serverless-export-download.md) + - sql-user + - [create](/tidb-cloud/ticloud-serverless-sql-user-create.md) + - [delete](/tidb-cloud/ticloud-serverless-sql-user-delete.md) + - [list](/tidb-cloud/ticloud-serverless-sql-user-list.md) + - [update](/tidb-cloud/ticloud-serverless-sql-user-update.md) + - authorized-network + - [create](/tidb-cloud/ticloud-serverless-authorized-network-create.md) + - [delete](/tidb-cloud/ticloud-serverless-authorized-network-delete.md) + - [list](/tidb-cloud/ticloud-serverless-authorized-network-list.md) + - [update](/tidb-cloud/ticloud-serverless-authorized-network-update.md) + - [completion](/tidb-cloud/ticloud-completion.md) + - config + - [create](/tidb-cloud/ticloud-config-create.md) + - [delete](/tidb-cloud/ticloud-config-delete.md) + - [describe](/tidb-cloud/ticloud-config-describe.md) + - [edit](/tidb-cloud/ticloud-config-edit.md) + - [list](/tidb-cloud/ticloud-config-list.md) + - [set](/tidb-cloud/ticloud-config-set.md) + - [use](/tidb-cloud/ticloud-config-use.md) + - project + - [list](/tidb-cloud/ticloud-project-list.md) + - [upgrade](/tidb-cloud/ticloud-upgrade.md) + - [help](/tidb-cloud/ticloud-help.md) +- General Reference + - TiDB Classic Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [TSO](/tso.md) + - [TiDB X Architecture](/tidb-cloud/tidb-x-architecture.md) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Spill to Disk](/tiflash/tiflash-spill-disk.md) + - TiDB Cloud Partner Web Console + - [TiDB Cloud Partners](/tidb-cloud/tidb-cloud-partners.md) + - [MSP Customer](/tidb-cloud/managed-service-provider-customer.md) + - [Reseller's Customer](/tidb-cloud/cppo-customer.md) + - [{{{ .starter }}} and Essential Limitations](/tidb-cloud/serverless-limitations.md) + - [Limited SQL Features on TiDB Cloud](/tidb-cloud/limited-sql-features.md) + - [TiDB Limitations](/tidb-limitations.md) + - [System Variables](/system-variables.md) + - [Server Status Variables](/status-variables.md) + - [Table Filter](/table-filter.md) + - [URI Formats of External Storage Services](/external-storage-uri.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - [Notifications](/tidb-cloud/notifications.md) +- Support Plan + - [Connected Care Overview](/tidb-cloud/connected-care-overview.md) + - [Connected Care Details](/tidb-cloud/connected-care-detail.md) + - Connected Care Support Service Features + - [Connected: AI Chat in IM](/tidb-cloud/connected-ai-chat-in-im.md) + - Connected: IM Ticket Creation and Update Subscription + - [Create Tickets and Subscribe to Ticket Updates via Slack](/tidb-cloud/connected-slack-ticket-creation.md) + - [Create Tickets and Subscribe to Ticket Updates via Lark](/tidb-cloud/connected-lark-ticket-creation.md) + - Connected: IM Interaction for Support Tickets + - [Interact with Support Tickets via Slack](/tidb-cloud/connected-slack-ticket-interaction.md) + - [Interact with Support Tickets via Lark](/tidb-cloud/connected-lark-ticket-interaction.md) + - [Get Support](/tidb-cloud/tidb-cloud-support.md) +- FAQs + - [TiDB Cloud FAQs](/tidb-cloud/tidb-cloud-faq.md) + - [{{{ .starter }}} FAQs](/tidb-cloud/serverless-faqs.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/TOC-tidb-cloud.md b/TOC-tidb-cloud.md index 725cbc6773d21..2c0959c954a6b 100644 --- a/TOC-tidb-cloud.md +++ b/TOC-tidb-cloud.md @@ -6,13 +6,12 @@ ## GET STARTED - Why TiDB Cloud - - [TiDB Cloud Introduction](/tidb-cloud/tidb-cloud-intro.md) + - [Introduction](/tidb-cloud/tidb-cloud-intro.md) + - [Features](/tidb-cloud/features.md) - [MySQL Compatibility](/mysql-compatibility.md) -- Get Started with TiDB Cloud - - [Try Out TiDB Cloud Serverless](/tidb-cloud/tidb-cloud-quickstart.md) - - [Try Out TiDB + AI](/vector-search/vector-search-get-started-using-python.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) - - [Try Out TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) - [Perform a PoC](/tidb-cloud/tidb-cloud-poc.md) - Key Concepts - [Overview](/tidb-cloud/key-concepts.md) @@ -23,149 +22,32 @@ - [AI Features](/tidb-cloud/ai-feature-concepts.md) - [Data Service](/tidb-cloud/data-service-concepts.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - [Scalability](/tidb-cloud/scalability-concepts.md) - - High Availability - - [High Availability in TiDB Cloud Serverless](/tidb-cloud/serverless-high-availability.md) - - [High Availability in TiDB Cloud Dedicated](/tidb-cloud/high-availability-with-multi-az.md) + - [High Availability](/tidb-cloud/high-availability-with-multi-az.md) - [Monitoring](/tidb-cloud/monitoring-concepts.md) - [Data Streaming](/tidb-cloud/data-streaming-concepts.md) - [Backup & Restore](/tidb-cloud/backup-and-restore-concepts.md) - [Security](/tidb-cloud/security-concepts.md) -## DEVELOP - -- Development Quick Start - - [Developer Guide Overview](/develop/dev-guide-overview.md) - - [Build a TiDB Cloud Serverless Cluster](/develop/dev-guide-build-cluster-in-cloud.md) - - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) -- Connect to TiDB Cloud - - GUI Database Tools - - [JetBrains DataGrip](/develop/dev-guide-gui-datagrip.md) - - [DBeaver](/develop/dev-guide-gui-dbeaver.md) - - [VS Code](/develop/dev-guide-gui-vscode-sqltools.md) - - [MySQL Workbench](/develop/dev-guide-gui-mysql-workbench.md) - - [Navicat](/develop/dev-guide-gui-navicat.md) - - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) - - BI - - [Looker Studio](/tidb-cloud/dev-guide-bi-looker-studio.md) - - Java - - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) - - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) - - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) - - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) - - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - - Go - - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) - - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) - - Python - - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) - - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) - - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) - - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) - - [peewee](/develop/dev-guide-sample-application-python-peewee.md) - - [Django](/develop/dev-guide-sample-application-python-django.md) - - Node.js - - [node-mysql2](/develop/dev-guide-sample-application-nodejs-mysql2.md) - - [mysql.js](/develop/dev-guide-sample-application-nodejs-mysqljs.md) - - [Prisma](/develop/dev-guide-sample-application-nodejs-prisma.md) - - [Sequelize](/develop/dev-guide-sample-application-nodejs-sequelize.md) - - [TypeORM](/develop/dev-guide-sample-application-nodejs-typeorm.md) - - [Next.js](/develop/dev-guide-sample-application-nextjs.md) - - [AWS Lambda](/develop/dev-guide-sample-application-aws-lambda.md) - - Ruby - - [mysql2](/develop/dev-guide-sample-application-ruby-mysql2.md) - - [Rails](/develop/dev-guide-sample-application-ruby-rails.md) - - C# - - [C#](/develop/dev-guide-sample-application-cs.md) - - [WordPress](/tidb-cloud/dev-guide-wordpress.md) - - Serverless Driver ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - - [TiDB Cloud Serverless Driver](/tidb-cloud/serverless-driver.md) - - [Node.js Example](/tidb-cloud/serverless-driver-node-example.md) - - [Prisma Example](/tidb-cloud/serverless-driver-prisma-example.md) - - [Kysely Example](/tidb-cloud/serverless-driver-kysely-example.md) - - [Drizzle Example](/tidb-cloud/serverless-driver-drizzle-example.md) -- Development Reference - - Design Database Schema - - [Overview](/develop/dev-guide-schema-design-overview.md) - - [Create a Database](/develop/dev-guide-create-database.md) - - [Create a Table](/develop/dev-guide-create-table.md) - - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) - - Write Data - - [Insert Data](/develop/dev-guide-insert-data.md) - - [Update Data](/develop/dev-guide-update-data.md) - - [Delete Data](/develop/dev-guide-delete-data.md) - - [Periodically Delete Expired Data Using TTL (Time to Live)](/time-to-live.md) - - [Prepared Statements](/develop/dev-guide-prepared-statement.md) - - Read Data - - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) - - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) - - [Subquery](/develop/dev-guide-use-subqueries.md) - - [Paginate Results](/develop/dev-guide-paginate-results.md) - - [Views](/develop/dev-guide-use-views.md) - - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) - - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) - - Read Replica Data - - [Follower Read](/develop/dev-guide-use-follower-read.md) - - [Stale Read](/develop/dev-guide-use-stale-read.md) - - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) - - Transaction - - [Overview](/develop/dev-guide-transaction-overview.md) - - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) - - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) - - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) - - Optimize - - [Overview](/develop/dev-guide-optimize-sql-overview.md) - - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) - - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) - - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) - - Other Optimization Methods - - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) - - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) - - Troubleshoot - - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) - - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) - - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) - - Development Guidelines - - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) - - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) - - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) - - Third-Party Support - - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) - - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) - ## GUIDES - Manage Cluster - Plan Your Cluster - - [Select Your Cluster Tier](/tidb-cloud/select-cluster-tier.md) + - [Select Your Cluster Plan](/tidb-cloud/select-cluster-tier.md) - [Determine Your TiDB Size](/tidb-cloud/size-your-cluster.md) - [TiDB Cloud Performance Reference](/tidb-cloud/tidb-cloud-performance-reference.md) - - Manage TiDB Cloud Serverless Clusters - - [Create a TiDB Cloud Serverless Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) - - Connect to Your TiDB Cloud Serverless Cluster - - [Connection Overview](/tidb-cloud/connect-to-tidb-cluster-serverless.md) - - [Connect via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) - - [Connect via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) - - Branch ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - - [Overview](/tidb-cloud/branch-overview.md) - - [Manage Branches](/tidb-cloud/branch-manage.md) - - [GitHub Integration](/tidb-cloud/branch-github-integration.md) - - [Manage Spending Limit](/tidb-cloud/manage-serverless-spend-limit.md) - - [Back Up and Restore TiDB Cloud Serverless Data](/tidb-cloud/backup-and-restore-serverless.md) - - [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) - - Manage TiDB Cloud Dedicated Clusters - - [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md) - - Connect to Your TiDB Cloud Dedicated Cluster - - [Connection Method Overview](/tidb-cloud/connect-to-tidb-cluster.md) - - [Connect via Public Connection](/tidb-cloud/connect-via-standard-connection.md) - - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections.md) - - [Connect via Private Endpoint with Azure](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) - - [Connect via Private Endpoint with Google Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) - - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) - - [Connect via SQL Shell](/tidb-cloud/connect-via-sql-shell.md) - - [Scale a TiDB Cloud Dedicated Cluster](/tidb-cloud/scale-tidb-cluster.md) - - [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md) - - [Pause or Resume a TiDB Cloud Dedicated Cluster](/tidb-cloud/pause-or-resume-tidb-cluster.md) - - [Configure Maintenance Window](/tidb-cloud/configure-maintenance-window.md) + - [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md) + - Connect to Your TiDB Cloud Dedicated Cluster + - [Network Connection Overview](/tidb-cloud/connect-to-tidb-cluster.md) + - [Connect via Public Connection](/tidb-cloud/connect-via-standard-connection.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections.md) + - [Connect via Private Endpoint with Azure](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) + - [Connect via Private Endpoint with Google Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) + - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) + - [Connect via SQL Shell](/tidb-cloud/connect-via-sql-shell.md) + - [Scale a TiDB Cloud Dedicated Cluster](/tidb-cloud/scale-tidb-cluster.md) + - [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md) + - [Pause or Resume a TiDB Cloud Dedicated Cluster](/tidb-cloud/pause-or-resume-tidb-cluster.md) + - [Configure Maintenance Window](/tidb-cloud/configure-maintenance-window.md) - Use an HTAP Cluster with TiFlash - [TiFlash Overview](/tiflash/tiflash-overview.md) - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) @@ -186,12 +68,15 @@ - [Subscribe via Slack](/tidb-cloud/monitor-alert-slack.md) - [Subscribe via Zoom](/tidb-cloud/monitor-alert-zoom.md) - [Cluster Events](/tidb-cloud/tidb-cloud-events.md) - - [Third-Party Metrics Integrations](/tidb-cloud/third-party-monitoring-integrations.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) + - Third-Party Metrics Integrations + - [Overview](/tidb-cloud/third-party-monitoring-integrations.md) + - [Migrate Datadog and New Relic Integrations](/tidb-cloud/migrate-metrics-integrations.md) + - [Migrate Prometheus Integrations](/tidb-cloud/migrate-prometheus-metrics-integrations.md) - [TiDB Cloud Clinic](/tidb-cloud/tidb-cloud-clinic.md) - Tune Performance - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) - Analyze Performance - - [Use the Diagnosis Tab](/tidb-cloud/tune-performance.md) + - [Use the Diagnosis Page](/tidb-cloud/tune-performance.md) - [Use Statement Summary Tables](/statement-summary-tables.md) - SQL Tuning - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) @@ -240,10 +125,11 @@ - [TiKV Follower Read](/follower-read.md) - [Coprocessor Cache](/coprocessor-cache.md) - Garbage Collection (GC) - - [Overview](/garbage-collection-overview.md) - - [Configuration](/garbage-collection-configuration.md) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) - Optimize Resource Allocation + - [Overview of Resource Allocation](/tidb-cloud/optimize-resource-allocation.md) - Resource Manager - [Use Resource Control to Achieve Resource Group Limitation and Flow Control](/tidb-resource-control-ru-groups.md) - [Manage Runaway Queries](/tidb-resource-control-runaway-queries.md) @@ -251,6 +137,9 @@ - TiDB Node Group - [Overview of TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md) - [Manage TiDB Node Groups](/tidb-cloud/tidb-node-group-management.md) + - Manage Connections by TiProxy + - [Overview of TiProxy](/tidb-cloud/tiproxy-overview-for-cloud.md) + - [Manage TiProxy](/tidb-cloud/tiproxy-management.md) - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) - Migrate or Import Data @@ -263,19 +152,13 @@ - [Migrate from MySQL-Compatible Databases Using AWS DMS](/tidb-cloud/migrate-from-mysql-using-aws-dms.md) - [Migrate from Amazon RDS for Oracle Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md) - Import Data into TiDB Cloud Dedicated - - [Import Sample Data](/tidb-cloud/import-sample-data.md) + - [Import Sample Data (SQL Files) from Cloud Storage](/tidb-cloud/import-sample-data.md) - [Import CSV Files from Cloud Storage](/tidb-cloud/import-csv-files.md) - [Import Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files.md) + - [Import Snapshot Files from Cloud Storage](/tidb-cloud/import-snapshot-files.md) - [Import with MySQL CLI](/tidb-cloud/import-with-mysql-cli.md) - - Import Data into TiDB Cloud Serverless - - [Import Sample Data](/tidb-cloud/import-sample-data-serverless.md) - - [Import Local Files](/tidb-cloud/tidb-cloud-import-local-files.md) - - [Import CSV Files from Cloud Storage](/tidb-cloud/import-csv-files-serverless.md) - - [Import Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files-serverless.md) - - [Import with MySQL CLI](/tidb-cloud/import-with-mysql-cli-serverless.md) - Reference - [Configure External Storage Access for TiDB Cloud Dedicated](/tidb-cloud/dedicated-external-storage.md) - - [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md) - [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md) - [CSV Configurations for Importing Data](/tidb-cloud/csv-config-for-import-data.md) - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) @@ -284,35 +167,6 @@ - Explore Data - [Chat2Query in SQL Editor](/tidb-cloud/explore-data-with-chat2query.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - [SQL Proxy Account](/tidb-cloud/sql-proxy-account.md) -- Vector Search ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - - [Overview](/vector-search/vector-search-overview.md) - - Get Started - - [Get Started with SQL](/vector-search/vector-search-get-started-using-sql.md) - - [Get Started with Python](/vector-search/vector-search-get-started-using-python.md) - - Integrations - - [Overview](/vector-search/vector-search-integration-overview.md) - - AI Frameworks - - [LlamaIndex](/vector-search/vector-search-integrate-with-llamaindex.md) - - [Langchain](/vector-search/vector-search-integrate-with-langchain.md) - - AI Services - - [Amazon Bedrock](/tidb-cloud/vector-search-integrate-with-amazon-bedrock.md) - - Embedding Models/Services - - [Jina AI](/vector-search/vector-search-integrate-with-jinaai-embedding.md) - - ORM Libraries - - [SQLAlchemy](/vector-search/vector-search-integrate-with-sqlalchemy.md) - - [peewee](/vector-search/vector-search-integrate-with-peewee.md) - - [Django ORM](/vector-search/vector-search-integrate-with-django-orm.md) - - Text Search - - [Full-Text Search with SQL](/tidb-cloud/vector-search-full-text-search-sql.md) - - [Full-Text Search with Python](/tidb-cloud/vector-search-full-text-search-python.md) - - [Hybrid Search](/tidb-cloud/vector-search-hybrid-search.md) - - Reference - - [Vector Data Types](/vector-search/vector-search-data-types.md) - - [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md) - - [Vector Index](/vector-search/vector-search-index.md) - - [Improve Performance](/vector-search/vector-search-improve-performance.md) - - [Limitations](/vector-search/vector-search-limitations.md) - - [Changelogs](/tidb-cloud/vector-search-changelogs.md) - Data Service ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - [Overview](/tidb-cloud/data-service-overview.md) - [Get Started](/tidb-cloud/data-service-get-started.md) @@ -341,12 +195,9 @@ - [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) - [Set Up Self-Hosted Kafka Private Link Service in Azure](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md) - [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) -- Disaster Recovery - - [Recovery Group Overview](/tidb-cloud/recovery-group-overview.md) - - [Get Started](/tidb-cloud/recovery-group-get-started.md) - - [Failover and Reprotect Databases](/tidb-cloud/recovery-group-failover.md) - - [Delete a Recovery Group](/tidb-cloud/recovery-group-delete.md) + - [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md) - Security + - [Security Overview](/tidb-cloud/security-overview.md) - Identity Access Control - [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) - [Standard SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md) @@ -354,25 +205,20 @@ - [Identity Access Management](/tidb-cloud/manage-user-access.md) - [OAuth 2.0](/tidb-cloud/oauth2.md) - Network Access Control - - TiDB Cloud Serverless - - [Connect via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) - - [Configure Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md) - - [TLS Connections to TiDB Cloud Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md) - - TiDB Cloud Dedicated - - [Configure an IP Access List](/tidb-cloud/configure-ip-access-list.md) - - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections.md) - - [Connect via Private Endpoint with Azure](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) - - [Connect via Private Endpoint with Google Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) - - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) - - [TLS Connections to TiDB Cloud Dedicated](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md) + - [Configure an IP Access List](/tidb-cloud/configure-ip-access-list.md) + - [Connect via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections.md) + - [Connect via Private Endpoint with Azure](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) + - [Connect via Private Endpoint with Google Cloud](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) + - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) + - [TLS Connections to TiDB Cloud Dedicated](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md) - Data Access Control - - [Encryption at Rest Using Customer-Managed Encryption Keys](/tidb-cloud/tidb-cloud-encrypt-cmek.md) - - [User-Controlled Log Redaction](/tidb-cloud/tidb-cloud-log-redaction.md) + - [Encryption at Rest Using Customer-Managed Encryption Keys on AWS](/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md) + - [Encryption at Rest Using Customer-Managed Encryption Keys on Azure](/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md) + - [User-Controlled Log Redaction](/tidb-cloud/tidb-cloud-log-redaction.md) - Database Access Control - [Configure Cluster Password Settings](/tidb-cloud/configure-security-settings.md) - Audit Management - [TiDB Cloud Dedicated Database Audit Logging](/tidb-cloud/tidb-cloud-auditing.md) - - [TiDB Cloud Serverless Database Audit Logging](/tidb-cloud/serverless-audit-logging.md) ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md) - Billing - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) @@ -381,10 +227,9 @@ - [Billing Profile](/tidb-cloud/tidb-cloud-billing.md#billing-profile) - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) - - [Billing from AWS, Azure, or Google Cloud Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-aws-marketplace-azure-marketplace-or-google-cloud-marketplace) + - [Billing from Cloud Provider Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-cloud-provider-marketplace) - [Billing for Changefeed](/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md) - [Billing for Data Migration](/tidb-cloud/tidb-cloud-billing-dm.md) - - [Billing for Recovery Groups](/tidb-cloud/tidb-cloud-billing-recovery-group.md) - [Manage Budgets](/tidb-cloud/tidb-cloud-budget.md) - Integrations - [Airbyte](/tidb-cloud/integrate-tidbcloud-with-airbyte.md) @@ -402,18 +247,15 @@ - Terraform - [Terraform Integration Overview](/tidb-cloud/terraform-tidbcloud-provider-overview.md) - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) - - [Use TiDB Cloud Dedicated Cluster Resource](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) - - [Use TiDB Cloud Dedicated Private Endpoint Connection Resource](/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md) - - [Use TiDB Cloud Dedicated VPC Peering Resource](/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md) - - [Use TiDB Cloud Dedicated Network Container Resource](/tidb-cloud/terraform-use-dedicated-network-container-resource.md) - - [Use TiDB Cloud Serverless Cluster Resource](/tidb-cloud/terraform-use-serverless-cluster-resource.md) - - [Use TiDB Cloud Serverless Branch Resource](/tidb-cloud/terraform-use-serverless-branch-resource.md) - - [Use TiDB Cloud Serverless Export Resource](/tidb-cloud/terraform-use-serverless-export-resource.md) - - [Use SQL User Resource](/tidb-cloud/terraform-use-sql-user-resource.md) - - [Use Cluster Resource (Deprecated)](/tidb-cloud/terraform-use-cluster-resource.md) - - [Use Backup Resource](/tidb-cloud/terraform-use-backup-resource.md) - - [Use Restore Resource](/tidb-cloud/terraform-use-restore-resource.md) - - [Use Import Resource](/tidb-cloud/terraform-use-import-resource.md) + - [Use the `tidbcloud_dedicated_cluster` Resource](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) + - [Use the `tidbcloud_dedicated_private_endpoint_connection` Resource](/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md) + - [Use the `tidbcloud_dedicated_vpc_peering` Resource](/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md) + - [Use the `tidbcloud_dedicated_network_container` Resource](/tidb-cloud/terraform-use-dedicated-network-container-resource.md) + - [Use the `tidbcloud_sql_user` Resource](/tidb-cloud/terraform-use-sql-user-resource.md) + - [Use the `tidbcloud_cluster` Resource (Deprecated)](/tidb-cloud/terraform-use-cluster-resource.md) + - [Use the `tidbcloud_backup` Resource](/tidb-cloud/terraform-use-backup-resource.md) + - [Use the `tidbcloud_restore` Resource](/tidb-cloud/terraform-use-restore-resource.md) + - [Use the `tidbcloud_import` Resource](/tidb-cloud/terraform-use-import-resource.md) - [Migrate Cluster Resource](/tidb-cloud/terraform-migrate-cluster-resource.md) - [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) - [Zapier](/tidb-cloud/integrate-tidbcloud-with-zapier.md) @@ -467,6 +309,7 @@ - [`BACKUP`](/sql-statements/sql-statement-backup.md) - [`BATCH`](/sql-statements/sql-statement-batch.md) - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CANCEL DISTRIBUTION JOB`](/sql-statements/sql-statement-cancel-distribution-job.md) - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) - [`COMMIT`](/sql-statements/sql-statement-commit.md) - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) @@ -484,6 +327,7 @@ - [`DELETE`](/sql-statements/sql-statement-delete.md) - [`DESC`](/sql-statements/sql-statement-desc.md) - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DISTRIBUTE TABLE`](/sql-statements/sql-statement-distribute-table.md) - [`DO`](/sql-statements/sql-statement-do.md) - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) @@ -547,6 +391,7 @@ - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DISTRIBUTION JOBS`](/sql-statements/sql-statement-show-distribution-jobs.md) - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) - [`SHOW FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) @@ -569,6 +414,7 @@ - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) - [`SHOW STATS_TOPN`](/sql-statements/sql-statement-show-stats-topn.md) - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE DISTRIBUTION`](/sql-statements/sql-statement-show-table-distribution.md) - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) @@ -645,7 +491,6 @@ - Character Set and Collation - [Overview](/character-set-and-collation.md) - [GBK](/character-set-gbk.md) - - [GB18030](/character-set-gb18030.md) - Read Historical Data - Use Stale Read (Recommended) - [Usage Scenarios of Stale Read](/stale-read.md) @@ -717,84 +562,8 @@ - [Overview](/sys-schema/sys-schema.md) - [`schema_unused_indexes`](/sys-schema/sys-schema-unused-indexes.md) - [Metadata Lock](/metadata-lock.md) - - [Use UUIDs](/best-practices/uuid.md) - [TiDB Accelerated Table Creation](/accelerated-table-creation.md) - [Schema Cache](/schema-cache.md) -- API Reference ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - - [Overview](/tidb-cloud/api-overview.md) - - v1beta1 - - [Billing](https://docs.pingcap.com/tidbcloud/api/v1beta1/billing) - - [Data Service](https://docs.pingcap.com/tidbcloud/api/v1beta1/dataservice) - - [IAM](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam) - - [MSP (Deprecated)](https://docs.pingcap.com/tidbcloud/api/v1beta1/msp) - - [v1beta](https://docs.pingcap.com/tidbcloud/api/v1beta) -- CLI Reference ![BETA](/media/tidb-cloud/blank_transparent_placeholder.png) - - [Overview](/tidb-cloud/cli-reference.md) - - auth - - [login](/tidb-cloud/ticloud-auth-login.md) - - [logout](/tidb-cloud/ticloud-auth-logout.md) - - [whoami](/tidb-cloud/ticloud-auth-whoami.md) - - serverless - - [create](/tidb-cloud/ticloud-cluster-create.md) - - [delete](/tidb-cloud/ticloud-cluster-delete.md) - - [describe](/tidb-cloud/ticloud-cluster-describe.md) - - [list](/tidb-cloud/ticloud-cluster-list.md) - - [update](/tidb-cloud/ticloud-serverless-update.md) - - [spending-limit](/tidb-cloud/ticloud-serverless-spending-limit.md) - - [region](/tidb-cloud/ticloud-serverless-region.md) - - [shell](/tidb-cloud/ticloud-serverless-shell.md) - - branch - - [create](/tidb-cloud/ticloud-branch-create.md) - - [delete](/tidb-cloud/ticloud-branch-delete.md) - - [describe](/tidb-cloud/ticloud-branch-describe.md) - - [list](/tidb-cloud/ticloud-branch-list.md) - - [shell](/tidb-cloud/ticloud-branch-shell.md) - - import - - [cancel](/tidb-cloud/ticloud-import-cancel.md) - - [describe](/tidb-cloud/ticloud-import-describe.md) - - [list](/tidb-cloud/ticloud-import-list.md) - - [start](/tidb-cloud/ticloud-import-start.md) - - export - - [create](/tidb-cloud/ticloud-serverless-export-create.md) - - [describe](/tidb-cloud/ticloud-serverless-export-describe.md) - - [list](/tidb-cloud/ticloud-serverless-export-list.md) - - [cancel](/tidb-cloud/ticloud-serverless-export-cancel.md) - - [download](/tidb-cloud/ticloud-serverless-export-download.md) - - sql-user - - [create](/tidb-cloud/ticloud-serverless-sql-user-create.md) - - [delete](/tidb-cloud/ticloud-serverless-sql-user-delete.md) - - [list](/tidb-cloud/ticloud-serverless-sql-user-list.md) - - [update](/tidb-cloud/ticloud-serverless-sql-user-update.md) - - audit-log - - [config](/tidb-cloud/ticloud-auditlog-config.md) - - [describe](/tidb-cloud/ticloud-auditlog-describe.md) - - [download](/tidb-cloud/ticloud-auditlog-download.md) - - filter-rule - - [create](/tidb-cloud/ticloud-auditlog-filter-create.md) - - [delete](/tidb-cloud/ticloud-auditlog-filter-delete.md) - - [describe](/tidb-cloud/ticloud-auditlog-filter-describe.md) - - [list](/tidb-cloud/ticloud-auditlog-filter-list.md) - - [template](/tidb-cloud/ticloud-auditlog-filter-template.md) - - [update](/tidb-cloud/ticloud-auditlog-filter-update.md) - - authorized-network - - [create](/tidb-cloud/ticloud-serverless-authorized-network-create.md) - - [delete](/tidb-cloud/ticloud-serverless-authorized-network-delete.md) - - [list](/tidb-cloud/ticloud-serverless-authorized-network-list.md) - - [update](/tidb-cloud/ticloud-serverless-authorized-network-update.md) - - [ai](/tidb-cloud/ticloud-ai.md) - - [completion](/tidb-cloud/ticloud-completion.md) - - config - - [create](/tidb-cloud/ticloud-config-create.md) - - [delete](/tidb-cloud/ticloud-config-delete.md) - - [describe](/tidb-cloud/ticloud-config-describe.md) - - [edit](/tidb-cloud/ticloud-config-edit.md) - - [list](/tidb-cloud/ticloud-config-list.md) - - [set](/tidb-cloud/ticloud-config-set.md) - - [use](/tidb-cloud/ticloud-config-use.md) - - project - - [list](/tidb-cloud/ticloud-project-list.md) - - [upgrade](/tidb-cloud/ticloud-upgrade.md) - - [help](/tidb-cloud/ticloud-help.md) - General Reference - TiDB Classic Architecture - [Overview](/tidb-architecture.md) @@ -805,8 +574,8 @@ - [TiDB X Architecture](/tidb-cloud/tidb-x-architecture.md) - Storage Engines - TiKV - - [TiKV Overview](/tikv-overview.md) - - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) - TiFlash - [TiFlash Overview](/tiflash/tiflash-overview.md) - [Spill to Disk](/tiflash/tiflash-spill-disk.md) @@ -818,7 +587,6 @@ - [Introduction](/tidb-distributed-execution-framework.md) - [TiDB Global Sort](/tidb-global-sort.md) - [TiDB Cloud Dedicated Limitations and Quotas](/tidb-cloud/limitations-and-quotas.md) - - [TiDB Cloud Serverless Limitations](/tidb-cloud/serverless-limitations.md) - [Limited SQL Features on TiDB Cloud](/tidb-cloud/limited-sql-features.md) - [TiDB Limitations](/tidb-limitations.md) - Benchmarks @@ -842,12 +610,10 @@ - [Server Status Variables](/status-variables.md) - [Table Filter](/table-filter.md) - [URI Formats of External Storage Services](/external-storage-uri.md) - - [DDL Execution Principles and Best Practices](/ddl-introduction.md) - [`ANALYZE` Embedded in DDL Statements](/ddl_embedded_analyze.md) - [Batch Processing](/batch-processing.md) - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) - [Notifications](/tidb-cloud/notifications.md) - - [Glossary](/tidb-cloud/tidb-cloud-glossary.md) - Support Plan - [Connected Care Overview](/tidb-cloud/connected-care-overview.md) - [Connected Care Details](/tidb-cloud/connected-care-detail.md) @@ -857,6 +623,8 @@ - Connected: IM Subscription for TiDB Cloud Alerts - [Subscribe via Slack](/tidb-cloud/monitor-alert-slack.md) - [Subscribe via Zoom](/tidb-cloud/monitor-alert-zoom.md) + - [Subscribe via Flashduty](/tidb-cloud/monitor-alert-flashduty.md) + - [Subscribe via PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md) - Connected: IM Ticket Creation and Update Subscription - [Create Tickets and Subscribe to Ticket Updates via Slack](/tidb-cloud/connected-slack-ticket-creation.md) - [Create Tickets and Subscribe to Ticket Updates via Lark](/tidb-cloud/connected-lark-ticket-creation.md) @@ -866,23 +634,8 @@ - [Get Support](/tidb-cloud/tidb-cloud-support.md) - FAQs - [TiDB Cloud FAQs](/tidb-cloud/tidb-cloud-faq.md) - - [TiDB Cloud Serverless FAQs](/tidb-cloud/serverless-faqs.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) -## RELEASES +## _BUILD_ALLOWLIST -- Release Notes - - [2025](/tidb-cloud/tidb-cloud-release-notes.md) - - [2024](/tidb-cloud/release-notes-2024.md) - - [2023](/tidb-cloud/release-notes-2023.md) - - [2022](/tidb-cloud/release-notes-2022.md) - - [2021](/tidb-cloud/release-notes-2021.md) - - [2020](/tidb-cloud/release-notes-2020.md) -- Maintenance Notification - - [[2024-09-15] TiDB Cloud Console Maintenance Notification](/tidb-cloud/notification-2024-09-15-console-maintenance.md) - - [[2024-04-18] TiDB Cloud Data Migration (DM) Feature Maintenance Notification](/tidb-cloud/notification-2024-04-18-dm-feature-maintenance.md) - - [[2024-04-16] TiDB Cloud Monitoring Features Maintenance Notification](/tidb-cloud/notification-2024-04-16-monitoring-features-maintenance.md) - - [[2024-04-11] TiDB Cloud Data Migration (DM) Feature Maintenance Notification](/tidb-cloud/notification-2024-04-11-dm-feature-maintenance.md) - - [[2024-04-09] TiDB Cloud Monitoring Features Maintenance Notification](/tidb-cloud/notification-2024-04-09-monitoring-features-maintenance.md) - - [[2023-11-14] TiDB Cloud Dedicated Scale Feature Maintenance Notification](/tidb-cloud/notification-2023-11-14-scale-feature-maintenance.md) - - [[2023-09-26] TiDB Cloud Console Maintenance Notification](/tidb-cloud/notification-2023-09-26-console-maintenance.md) - - [[2023-08-31] TiDB Cloud Console Maintenance Notification](/tidb-cloud/notification-2023-08-31-console-maintenance.md) +- [Manage Database Users and Roles](/tidb-cloud/configure-sql-users.md) diff --git a/TOC-tidb-releases.md b/TOC-tidb-releases.md new file mode 100644 index 0000000000000..6a6f2f5708e4d --- /dev/null +++ b/TOC-tidb-releases.md @@ -0,0 +1,246 @@ + + + +# Table of Contents + +## OVERVIEW + +- [Release Timeline](/releases/release-timeline.md) +- [TiDB Versioning](/releases/versioning.md) +- [Release Support Policy](https://www.pingcap.com/tidb-release-support-policy/) + +## RELEASE NOTES + +- v8.5 + - [8.5.5](/releases/release-8.5.5.md) + - [8.5.4](/releases/release-8.5.4.md) + - [8.5.3](/releases/release-8.5.3.md) + - [8.5.2](/releases/release-8.5.2.md) + - [8.5.1](/releases/release-8.5.1.md) + - [8.5.0](/releases/release-8.5.0.md) +- v8.4 + - [8.4.0-DMR](/releases/release-8.4.0.md) +- v8.3 + - [8.3.0-DMR](/releases/release-8.3.0.md) +- v8.2 + - [8.2.0-DMR](/releases/release-8.2.0.md) +- v8.1 + - [8.1.2](/releases/release-8.1.2.md) + - [8.1.1](/releases/release-8.1.1.md) + - [8.1.0](/releases/release-8.1.0.md) +- v8.0 + - [8.0.0-DMR](/releases/release-8.0.0.md) +- v7.6 + - [7.6.0-DMR](/releases/release-7.6.0.md) +- v7.5 + - [7.5.7](/releases/release-7.5.7.md) + - [7.5.6](/releases/release-7.5.6.md) + - [7.5.5](/releases/release-7.5.5.md) + - [7.5.4](/releases/release-7.5.4.md) + - [7.5.3](/releases/release-7.5.3.md) + - [7.5.2](/releases/release-7.5.2.md) + - [7.5.1](/releases/release-7.5.1.md) + - [7.5.0](/releases/release-7.5.0.md) +- v7.4 + - [7.4.0-DMR](/releases/release-7.4.0.md) +- v7.3 + - [7.3.0-DMR](/releases/release-7.3.0.md) +- v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) +- v7.1 + - [7.1.6](/releases/release-7.1.6.md) + - [7.1.5](/releases/release-7.1.5.md) + - [7.1.4](/releases/release-7.1.4.md) + - [7.1.3](/releases/release-7.1.3.md) + - [7.1.2](/releases/release-7.1.2.md) + - [7.1.1](/releases/release-7.1.1.md) + - [7.1.0](/releases/release-7.1.0.md) +- v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) +- v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) +- v6.5 + - [6.5.12](/releases/release-6.5.12.md) + - [6.5.11](/releases/release-6.5.11.md) + - [6.5.10](/releases/release-6.5.10.md) + - [6.5.9](/releases/release-6.5.9.md) + - [6.5.8](/releases/release-6.5.8.md) + - [6.5.7](/releases/release-6.5.7.md) + - [6.5.6](/releases/release-6.5.6.md) + - [6.5.5](/releases/release-6.5.5.md) + - [6.5.4](/releases/release-6.5.4.md) + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) +- v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) +- v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) +- v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) +- v6.1 + - [6.1.7](/releases/release-6.1.7.md) + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) +- v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) +- v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) +- End of Life Releases + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0.8](/releases/release-1.0.8.md) + - [1.0.7](/releases/release-1.0.7.md) + - [1.0.6](/releases/release-1.0.6.md) + - [1.0.5](/releases/release-1.0.5.md) + - [1.0.4](/releases/release-1.0.4.md) + - [1.0.3](/releases/release-1.0.3.md) + - [1.0.2](/releases/release-1.0.2.md) + - [1.0.1](/releases/release-1.0.1.md) + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) diff --git a/TOC.md b/TOC.md index 6e925c33c28a1..7c130d5c5d982 100644 --- a/TOC.md +++ b/TOC.md @@ -14,120 +14,6 @@ - [Explore SQL with TiDB](/basic-sql-operations.md) - [Explore HTAP](/explore-htap.md) - [Import Example Database](/import-example-data.md) -- Develop - - [Overview](/develop/dev-guide-overview.md) - - Quick Start - - [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) - - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) - - Example Applications - - Java - - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) - - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) - - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) - - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) - - Go - - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) - - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) - - Python - - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) - - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) - - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) - - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) - - [peewee](/develop/dev-guide-sample-application-python-peewee.md) - - [Django](/develop/dev-guide-sample-application-python-django.md) - - Node.js - - [node-mysql2](/develop/dev-guide-sample-application-nodejs-mysql2.md) - - [mysql.js](/develop/dev-guide-sample-application-nodejs-mysqljs.md) - - [Prisma](/develop/dev-guide-sample-application-nodejs-prisma.md) - - [Sequelize](/develop/dev-guide-sample-application-nodejs-sequelize.md) - - [TypeORM](/develop/dev-guide-sample-application-nodejs-typeorm.md) - - [Next.js](/develop/dev-guide-sample-application-nextjs.md) - - [AWS Lambda](/develop/dev-guide-sample-application-aws-lambda.md) - - Ruby - - [mysql2](/develop/dev-guide-sample-application-ruby-mysql2.md) - - [Rails](/develop/dev-guide-sample-application-ruby-rails.md) - - C# - - [C#](/develop/dev-guide-sample-application-cs.md) - - Connect to TiDB - - GUI Database Tools - - [JetBrains DataGrip](/develop/dev-guide-gui-datagrip.md) - - [DBeaver](/develop/dev-guide-gui-dbeaver.md) - - [VS Code](/develop/dev-guide-gui-vscode-sqltools.md) - - [MySQL Workbench](/develop/dev-guide-gui-mysql-workbench.md) - - [Navicat](/develop/dev-guide-gui-navicat.md) - - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) - - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) - - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - - Design Database Schema - - [Overview](/develop/dev-guide-schema-design-overview.md) - - [Create a Database](/develop/dev-guide-create-database.md) - - [Create a Table](/develop/dev-guide-create-table.md) - - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) - - Write Data - - [Insert Data](/develop/dev-guide-insert-data.md) - - [Update Data](/develop/dev-guide-update-data.md) - - [Delete Data](/develop/dev-guide-delete-data.md) - - [Periodically Delete Data Using Time to Live](/time-to-live.md) - - [Prepared Statements](/develop/dev-guide-prepared-statement.md) - - Read Data - - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) - - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) - - [Subquery](/develop/dev-guide-use-subqueries.md) - - [Paginate Results](/develop/dev-guide-paginate-results.md) - - [Views](/develop/dev-guide-use-views.md) - - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) - - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) - - Read Replica Data - - [Follower Read](/develop/dev-guide-use-follower-read.md) - - [Stale Read](/develop/dev-guide-use-stale-read.md) - - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) - - Vector Search - - [Overview](/vector-search/vector-search-overview.md) - - Get Started - - [Get Started with SQL](/vector-search/vector-search-get-started-using-sql.md) - - [Get Started with Python](/vector-search/vector-search-get-started-using-python.md) - - Integrations - - [Overview](/vector-search/vector-search-integration-overview.md) - - AI Frameworks - - [LlamaIndex](/vector-search/vector-search-integrate-with-llamaindex.md) - - [Langchain](/vector-search/vector-search-integrate-with-langchain.md) - - Embedding Models/Services - - [Jina AI](/vector-search/vector-search-integrate-with-jinaai-embedding.md) - - ORM Libraries - - [SQLAlchemy](/vector-search/vector-search-integrate-with-sqlalchemy.md) - - [peewee](/vector-search/vector-search-integrate-with-peewee.md) - - [Django](/vector-search/vector-search-integrate-with-django-orm.md) - - [Improve Performance](/vector-search/vector-search-improve-performance.md) - - [Limitations](/vector-search/vector-search-limitations.md) - - Transaction - - [Overview](/develop/dev-guide-transaction-overview.md) - - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) - - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) - - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) - - Optimize - - [Overview](/develop/dev-guide-optimize-sql-overview.md) - - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) - - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) - - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) - - Other Optimization Methods - - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) - - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) - - Troubleshoot - - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) - - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) - - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) - - Reference - - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) - - Guidelines - - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) - - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) - - Cloud Native Development Environment - - [Gitpod](/develop/dev-guide-playground-gitpod.md) - - Third-Party Support - - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) - - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) - - [ProxySQL Integration Guide](/develop/dev-guide-proxysql-integration.md) - - [Amazon AppFlow Integration Guide](/develop/dev-guide-aws-appflow-integration.md) - Deploy - [Software and Hardware Requirements](/hardware-and-software-requirements.md) - [Environment Configuration Checklist](/check-before-deployment.md) @@ -354,7 +240,6 @@ - [TiFlash Performance Analysis Methods](/tiflash-performance-tuning-methods.md) - [TiCDC Performance Analysis Methods](/ticdc-performance-tuning-methods.md) - [Latency Breakdown](/latency-breakdown.md) - - [TiDB Best Practices on Public Cloud](/best-practices-on-public-cloud.md) - Configuration Tuning - [Tune Operating System Performance](/tune-operating-system.md) - [Tune TiDB Memory](/configure-memory-usage.md) @@ -434,23 +319,6 @@ - [TSO Configuration Flags](/command-line-flags-for-tso-configuration.md) - [Scheduling Configuration File](/scheduling-configuration-file.md) - [Scheduling Configuration Flags](/command-line-flags-for-scheduling-configuration.md) -- Best Practices - - [Use TiDB](/best-practices/tidb-best-practices.md) - - [Manage DDL](/ddl-introduction.md) - - [Optimize Multi-Column Indexes](/best-practices/multi-column-index-best-practices.md) - - [Manage Indexes and Identify Unused Indexes](/best-practices/index-management-best-practices.md) - - [Handle Millions of Tables in SaaS Multi-Tenant Scenarios](/best-practices/saas-best-practices.md) - - [Use TiDB Partitioned Tables](/best-practices/tidb-partitioned-tables-best-practices.md) - - [Use UUIDs as Primary Keys](/best-practices/uuid.md) - - [Develop Java Applications](/best-practices/java-app-best-practices.md) - - [Handle High-Concurrency Writes](/best-practices/high-concurrency-best-practices.md) - - [Tune TiKV Performance with Massive Regions](/best-practices/massive-regions-best-practices.md) - - [Tune PD Scheduling](/best-practices/pd-scheduling-best-practices.md) - - [Use Read-Only Storage Nodes](/best-practices/readonly-nodes.md) - - [Use HAProxy for Load Balancing](/best-practices/haproxy-best-practices.md) - - [Monitor TiDB Using Grafana](/best-practices/grafana-monitor-best-practices.md) - - [Three-Node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) - - [Local Reads in Three-Data-Center Deployments](/best-practices/three-dc-local-read.md) - TiDB Tools - [Overview](/ecosystem-tool-user-guide.md) - [Use Cases](/ecosystem-tool-user-case.md) @@ -950,7 +818,7 @@ - [Date and Time Types](/data-type-date-and-time.md) - [String Types](/data-type-string.md) - [JSON Type](/data-type-json.md) - - [Vector Types](/vector-search/vector-search-data-types.md) + - [Vector Types](/ai/reference/vector-search-data-types.md) - Functions and Operators - [Overview](/functions-and-operators/functions-and-operators-overview.md) - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) @@ -964,7 +832,7 @@ - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) - [Locking Functions](/functions-and-operators/locking-functions.md) - [Information Functions](/functions-and-operators/information-functions.md) - - [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md) + - [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) - JSON Functions - [Overview](/functions-and-operators/json-functions.md) - [Functions That Create JSON](/functions-and-operators/json-functions/json-functions-create.md) @@ -987,7 +855,7 @@ - [Comparisons between Functions and Syntax of Oracle and TiDB](/oracle-functions-to-tidb.md) - [Clustered Indexes](/clustered-indexes.md) - [Global Indexes](/global-indexes.md) - - [Vector Index](/vector-search/vector-search-index.md) + - [Vector Index](/ai/reference/vector-search-index.md) - [Constraints](/constraints.md) - [Generated Columns](/generated-columns.md) - [SQL Mode](/sql-mode.md) @@ -1090,6 +958,7 @@ - [Telemetry](/telemetry.md) - [Error Codes](/error-codes.md) - [Table Filter](/table-filter.md) + - [TiDB Installation Packages](/binary-package.md) - [Schedule Replicas by Topology Labels](/schedule-replicas-by-topology-labels.md) - [URI Formats of External Storage Services](/external-storage-uri.md) - [TiDB Workload Repository](/workload-repository.md) @@ -1107,242 +976,4 @@ - [High Availability FAQs](/faq/high-availability-faq.md) - [High Reliability FAQs](/faq/high-reliability-faq.md) - [Backup and Restore FAQs](/faq/backup-and-restore-faq.md) -- Release Notes - - [All Releases](/releases/release-notes.md) - - [Release Timeline](/releases/release-timeline.md) - - [TiDB Versioning](/releases/versioning.md) - - [Release Support Policy](https://www.pingcap.com/tidb-release-support-policy/) - - [TiDB Installation Packages](/binary-package.md) - - v8.5 - - [8.5.5](/releases/release-8.5.5.md) - - [8.5.4](/releases/release-8.5.4.md) - - [8.5.3](/releases/release-8.5.3.md) - - [8.5.2](/releases/release-8.5.2.md) - - [8.5.1](/releases/release-8.5.1.md) - - [8.5.0](/releases/release-8.5.0.md) - - v8.4 - - [8.4.0-DMR](/releases/release-8.4.0.md) - - v8.3 - - [8.3.0-DMR](/releases/release-8.3.0.md) - - v8.2 - - [8.2.0-DMR](/releases/release-8.2.0.md) - - v8.1 - - [8.1.2](/releases/release-8.1.2.md) - - [8.1.1](/releases/release-8.1.1.md) - - [8.1.0](/releases/release-8.1.0.md) - - v8.0 - - [8.0.0-DMR](/releases/release-8.0.0.md) - - v7.6 - - [7.6.0-DMR](/releases/release-7.6.0.md) - - v7.5 - - [7.5.7](/releases/release-7.5.7.md) - - [7.5.6](/releases/release-7.5.6.md) - - [7.5.5](/releases/release-7.5.5.md) - - [7.5.4](/releases/release-7.5.4.md) - - [7.5.3](/releases/release-7.5.3.md) - - [7.5.2](/releases/release-7.5.2.md) - - [7.5.1](/releases/release-7.5.1.md) - - [7.5.0](/releases/release-7.5.0.md) - - v7.4 - - [7.4.0-DMR](/releases/release-7.4.0.md) - - v7.3 - - [7.3.0-DMR](/releases/release-7.3.0.md) - - v7.2 - - [7.2.0-DMR](/releases/release-7.2.0.md) - - v7.1 - - [7.1.6](/releases/release-7.1.6.md) - - [7.1.5](/releases/release-7.1.5.md) - - [7.1.4](/releases/release-7.1.4.md) - - [7.1.3](/releases/release-7.1.3.md) - - [7.1.2](/releases/release-7.1.2.md) - - [7.1.1](/releases/release-7.1.1.md) - - [7.1.0](/releases/release-7.1.0.md) - - v7.0 - - [7.0.0-DMR](/releases/release-7.0.0.md) - - v6.6 - - [6.6.0-DMR](/releases/release-6.6.0.md) - - v6.5 - - [6.5.12](/releases/release-6.5.12.md) - - [6.5.11](/releases/release-6.5.11.md) - - [6.5.10](/releases/release-6.5.10.md) - - [6.5.9](/releases/release-6.5.9.md) - - [6.5.8](/releases/release-6.5.8.md) - - [6.5.7](/releases/release-6.5.7.md) - - [6.5.6](/releases/release-6.5.6.md) - - [6.5.5](/releases/release-6.5.5.md) - - [6.5.4](/releases/release-6.5.4.md) - - [6.5.3](/releases/release-6.5.3.md) - - [6.5.2](/releases/release-6.5.2.md) - - [6.5.1](/releases/release-6.5.1.md) - - [6.5.0](/releases/release-6.5.0.md) - - v6.4 - - [6.4.0-DMR](/releases/release-6.4.0.md) - - v6.3 - - [6.3.0-DMR](/releases/release-6.3.0.md) - - v6.2 - - [6.2.0-DMR](/releases/release-6.2.0.md) - - v6.1 - - [6.1.7](/releases/release-6.1.7.md) - - [6.1.6](/releases/release-6.1.6.md) - - [6.1.5](/releases/release-6.1.5.md) - - [6.1.4](/releases/release-6.1.4.md) - - [6.1.3](/releases/release-6.1.3.md) - - [6.1.2](/releases/release-6.1.2.md) - - [6.1.1](/releases/release-6.1.1.md) - - [6.1.0](/releases/release-6.1.0.md) - - v6.0 - - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) - - v5.4 - - [5.4.3](/releases/release-5.4.3.md) - - [5.4.2](/releases/release-5.4.2.md) - - [5.4.1](/releases/release-5.4.1.md) - - [5.4.0](/releases/release-5.4.0.md) - - v5.3 - - [5.3.4](/releases/release-5.3.4.md) - - [5.3.3](/releases/release-5.3.3.md) - - [5.3.2](/releases/release-5.3.2.md) - - [5.3.1](/releases/release-5.3.1.md) - - [5.3.0](/releases/release-5.3.0.md) - - v5.2 - - [5.2.4](/releases/release-5.2.4.md) - - [5.2.3](/releases/release-5.2.3.md) - - [5.2.2](/releases/release-5.2.2.md) - - [5.2.1](/releases/release-5.2.1.md) - - [5.2.0](/releases/release-5.2.0.md) - - v5.1 - - [5.1.5](/releases/release-5.1.5.md) - - [5.1.4](/releases/release-5.1.4.md) - - [5.1.3](/releases/release-5.1.3.md) - - [5.1.2](/releases/release-5.1.2.md) - - [5.1.1](/releases/release-5.1.1.md) - - [5.1.0](/releases/release-5.1.0.md) - - v5.0 - - [5.0.6](/releases/release-5.0.6.md) - - [5.0.5](/releases/release-5.0.5.md) - - [5.0.4](/releases/release-5.0.4.md) - - [5.0.3](/releases/release-5.0.3.md) - - [5.0.2](/releases/release-5.0.2.md) - - [5.0.1](/releases/release-5.0.1.md) - - [5.0 GA](/releases/release-5.0.0.md) - - [5.0.0-rc](/releases/release-5.0.0-rc.md) - - v4.0 - - [4.0.16](/releases/release-4.0.16.md) - - [4.0.15](/releases/release-4.0.15.md) - - [4.0.14](/releases/release-4.0.14.md) - - [4.0.13](/releases/release-4.0.13.md) - - [4.0.12](/releases/release-4.0.12.md) - - [4.0.11](/releases/release-4.0.11.md) - - [4.0.10](/releases/release-4.0.10.md) - - [4.0.9](/releases/release-4.0.9.md) - - [4.0.8](/releases/release-4.0.8.md) - - [4.0.7](/releases/release-4.0.7.md) - - [4.0.6](/releases/release-4.0.6.md) - - [4.0.5](/releases/release-4.0.5.md) - - [4.0.4](/releases/release-4.0.4.md) - - [4.0.3](/releases/release-4.0.3.md) - - [4.0.2](/releases/release-4.0.2.md) - - [4.0.1](/releases/release-4.0.1.md) - - [4.0 GA](/releases/release-4.0-ga.md) - - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) - - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) - - [4.0.0-rc](/releases/release-4.0.0-rc.md) - - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) - - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) - - [4.0.0-beta](/releases/release-4.0.0-beta.md) - - v3.1 - - [3.1.2](/releases/release-3.1.2.md) - - [3.1.1](/releases/release-3.1.1.md) - - [3.1.0 GA](/releases/release-3.1.0-ga.md) - - [3.1.0-rc](/releases/release-3.1.0-rc.md) - - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) - - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) - - [3.1.0-beta](/releases/release-3.1.0-beta.md) - - v3.0 - - [3.0.20](/releases/release-3.0.20.md) - - [3.0.19](/releases/release-3.0.19.md) - - [3.0.18](/releases/release-3.0.18.md) - - [3.0.17](/releases/release-3.0.17.md) - - [3.0.16](/releases/release-3.0.16.md) - - [3.0.15](/releases/release-3.0.15.md) - - [3.0.14](/releases/release-3.0.14.md) - - [3.0.13](/releases/release-3.0.13.md) - - [3.0.12](/releases/release-3.0.12.md) - - [3.0.11](/releases/release-3.0.11.md) - - [3.0.10](/releases/release-3.0.10.md) - - [3.0.9](/releases/release-3.0.9.md) - - [3.0.8](/releases/release-3.0.8.md) - - [3.0.7](/releases/release-3.0.7.md) - - [3.0.6](/releases/release-3.0.6.md) - - [3.0.5](/releases/release-3.0.5.md) - - [3.0.4](/releases/release-3.0.4.md) - - [3.0.3](/releases/release-3.0.3.md) - - [3.0.2](/releases/release-3.0.2.md) - - [3.0.1](/releases/release-3.0.1.md) - - [3.0 GA](/releases/release-3.0-ga.md) - - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) - - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) - - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) - - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) - - [3.0.0-beta](/releases/release-3.0-beta.md) - - v2.1 - - [2.1.19](/releases/release-2.1.19.md) - - [2.1.18](/releases/release-2.1.18.md) - - [2.1.17](/releases/release-2.1.17.md) - - [2.1.16](/releases/release-2.1.16.md) - - [2.1.15](/releases/release-2.1.15.md) - - [2.1.14](/releases/release-2.1.14.md) - - [2.1.13](/releases/release-2.1.13.md) - - [2.1.12](/releases/release-2.1.12.md) - - [2.1.11](/releases/release-2.1.11.md) - - [2.1.10](/releases/release-2.1.10.md) - - [2.1.9](/releases/release-2.1.9.md) - - [2.1.8](/releases/release-2.1.8.md) - - [2.1.7](/releases/release-2.1.7.md) - - [2.1.6](/releases/release-2.1.6.md) - - [2.1.5](/releases/release-2.1.5.md) - - [2.1.4](/releases/release-2.1.4.md) - - [2.1.3](/releases/release-2.1.3.md) - - [2.1.2](/releases/release-2.1.2.md) - - [2.1.1](/releases/release-2.1.1.md) - - [2.1 GA](/releases/release-2.1-ga.md) - - [2.1 RC5](/releases/release-2.1-rc.5.md) - - [2.1 RC4](/releases/release-2.1-rc.4.md) - - [2.1 RC3](/releases/release-2.1-rc.3.md) - - [2.1 RC2](/releases/release-2.1-rc.2.md) - - [2.1 RC1](/releases/release-2.1-rc.1.md) - - [2.1 Beta](/releases/release-2.1-beta.md) - - v2.0 - - [2.0.11](/releases/release-2.0.11.md) - - [2.0.10](/releases/release-2.0.10.md) - - [2.0.9](/releases/release-2.0.9.md) - - [2.0.8](/releases/release-2.0.8.md) - - [2.0.7](/releases/release-2.0.7.md) - - [2.0.6](/releases/release-2.0.6.md) - - [2.0.5](/releases/release-2.0.5.md) - - [2.0.4](/releases/release-2.0.4.md) - - [2.0.3](/releases/release-2.0.3.md) - - [2.0.2](/releases/release-2.0.2.md) - - [2.0.1](/releases/release-2.0.1.md) - - [2.0](/releases/release-2.0-ga.md) - - [2.0 RC5](/releases/release-2.0-rc.5.md) - - [2.0 RC4](/releases/release-2.0-rc.4.md) - - [2.0 RC3](/releases/release-2.0-rc.3.md) - - [2.0 RC1](/releases/release-2.0-rc.1.md) - - [1.1 Beta](/releases/release-1.1-beta.md) - - [1.1 Alpha](/releases/release-1.1-alpha.md) - - v1.0 - - [1.0.8](/releases/release-1.0.8.md) - - [1.0.7](/releases/release-1.0.7.md) - - [1.0.6](/releases/release-1.0.6.md) - - [1.0.5](/releases/release-1.0.5.md) - - [1.0.4](/releases/release-1.0.4.md) - - [1.0.3](/releases/release-1.0.3.md) - - [1.0.2](/releases/release-1.0.2.md) - - [1.0.1](/releases/release-1.0.1.md) - - [1.0](/releases/release-1.0-ga.md) - - [Pre-GA](/releases/release-pre-ga.md) - - [RC4](/releases/release-rc.4.md) - - [RC3](/releases/release-rc.3.md) - - [RC2](/releases/release-rc.2.md) - - [RC1](/releases/release-rc.1.md) - [Glossary](/glossary.md) diff --git a/ai/_index.md b/ai/_index.md new file mode 100644 index 0000000000000..c2d36b2690fb7 --- /dev/null +++ b/ai/_index.md @@ -0,0 +1,77 @@ +--- +title: TiDB for AI +summary: Build modern AI applications with TiDB's integrated vector search, full-text search, and seamless Python SDK. +--- + +# TiDB for AI + +TiDB is a distributed SQL database designed for modern AI applications, offering integrated vector search, full-text search, and hybrid search capabilities. This document provides an overview of the AI features and tools available for building AI-powered applications with TiDB. + +## Quick Start + +Get up and running quickly with TiDB's AI capabilities. + +| Document | Description | +| --- | --- | +| [Get Started with Python](/ai/quickstart-via-python.md) | Build your first AI application with TiDB in minutes using Python. | +| [Get Started with SQL](/ai/quickstart-via-sql.md) | Quick start guide for vector search using SQL. | + +## Concepts + +Understand the foundational concepts behind AI-powered search in TiDB. + +| Document | Description | +| --- | --- | +| [Vector Search](/ai/concepts/vector-search-overview.md) | Comprehensive overview of vector search, including concepts, how it works, and use cases. | + +## Guides + +Step-by-step guides for building AI applications with TiDB using the [`pytidb`](https://github.com/pingcap/pytidb) SDK or SQL. + +| Document | Description | +| --- | --- | +| [Connect to TiDB](/ai/guides/connect.md) | Connect to TiDB Cloud or self-managed clusters using `pytidb`. | +| [Working with Tables](/ai/guides/tables.md) | Create, query, and manage tables with vector fields. | +| [Vector Search](/ai/guides/vector-search.md) | Perform semantic similarity searches using `pytidb`. | +| [Full-Text Search](/ai/guides/vector-search-full-text-search-python.md) | Keyword-based text search with BM25 ranking. | +| [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) | Combine vector and full-text search for better results. | +| [Image Search](/ai/guides/image-search.md) | Search images using multimodal embeddings. | +| [Auto Embedding](/ai/guides/auto-embedding.md) | Automatically generate embeddings on data insertion. | +| [Filtering](/ai/guides/filtering.md) | Filter search results with metadata conditions. | + +## Examples + +Complete code examples and demos showcasing TiDB's AI capabilities. + +| Document | Description | +| --- | --- | +| [Basic CRUD Operations](/ai/examples/basic-with-pytidb.md) | Fundamental table operations with `pytidb`. | +| [Vector Search](/ai/examples/vector-search-with-pytidb.md) | Semantic similarity search example. | +| [RAG Application](/ai/examples/rag-with-pytidb.md) | Build a Retrieval-Augmented Generation application. | +| [Image Search](/ai/examples/image-search-with-pytidb.md) | Multimodal image search with Jina AI embeddings. | +| [Conversational Memory](/ai/examples/memory-with-pytidb.md) | Persistent memory for AI agents and chatbots. | +| [Text-to-SQL](/ai/examples/text2sql-with-pytidb.md) | Convert natural language to SQL queries. | + +## Integrations + +Integrate TiDB with popular AI frameworks, embedding providers, and development tools. + +| Document | Description | +| --- | --- | +| [Integration Overview](/ai/integrations/vector-search-integration-overview.md) | Overview of all available integrations. | +| [Embedding Providers](/ai/integrations/vector-search-auto-embedding-overview.md#available-text-embedding-models) | Unified interface for OpenAI, Cohere, Jina AI, and more. | +| [LangChain](/ai/integrations/vector-search-integrate-with-langchain.md) | Use TiDB as a vector store with LangChain. | +| [LlamaIndex](/ai/integrations/vector-search-integrate-with-llamaindex.md) | Use TiDB as a vector store with LlamaIndex. | +| [MCP Server](/ai/integrations/tidb-mcp-server.md) | Connect TiDB to Claude Code, Cursor, and other AI-powered IDEs. | + +## Reference + +Technical reference documentation for TiDB's AI and vector search features. + +| Document | Description | +| --- | --- | +| [Vector Data Types](/ai/reference/vector-search-data-types.md) | Vector column types and usage. | +| [Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) | Distance functions and vector operations. | +| [Vector Search Index](/ai/reference/vector-search-index.md) | Create and manage vector indexes for performance. | +| [Performance Tuning](/ai/reference/vector-search-improve-performance.md) | Optimize vector search performance. | +| [Limitations](/ai/reference/vector-search-limitations.md) | Current limitations and constraints. | diff --git a/vector-search/vector-search-overview.md b/ai/concepts/vector-search-overview.md similarity index 71% rename from vector-search/vector-search-overview.md rename to ai/concepts/vector-search-overview.md index f9e0bf3daf2fe..eaca016526a25 100644 --- a/vector-search/vector-search-overview.md +++ b/ai/concepts/vector-search-overview.md @@ -1,31 +1,17 @@ --- title: Vector Search Overview summary: Learn about Vector Search in TiDB. This feature provides an advanced search solution for performing semantic similarity searches across various data types, including documents, images, audio, and video. +aliases: ['/tidb/stable/vector-search-overview/','/tidb/dev/vector-search-overview/','/tidbcloud/vector-search-overview/'] --- # Vector Search Overview Vector search offers a powerful solution for semantic similarity searches across diverse data types, such as documents, images, audio, and video. It allows developers to leverage their MySQL expertise to build scalable applications enriched with generative AI capabilities, simplifying the integration of advanced search functionality. - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Concepts @@ -43,7 +29,7 @@ A vector embedding, also known as an embedding, is a sequence of numbers that re Vector embeddings are essential in machine learning and serve as the foundation for semantic similarity searches. -TiDB introduces [Vector data types](/vector-search/vector-search-data-types.md) and [Vector search index](/vector-search/vector-search-index.md) designed to optimize the storage and retrieval of vector embeddings, enhancing their use in AI applications. You can store vector embeddings in TiDB and perform vector search queries to find the most relevant data using these data types. +TiDB introduces [Vector data types](/ai/reference/vector-search-data-types.md) and [Vector search index](/ai/reference/vector-search-index.md) designed to optimize the storage and retrieval of vector embeddings, enhancing their use in AI applications. You can store vector embeddings in TiDB and perform vector search queries to find the most relevant data using these data types. ### Embedding model @@ -57,7 +43,7 @@ To learn how to generate vector embeddings for your specific data types, refer t After converting raw data into vector embeddings and storing them in TiDB, your application can execute vector search queries to find the data most semantically or contextually relevant to a user's query. -TiDB vector search identifies the top-k nearest neighbor (KNN) vectors by using a [distance function](/vector-search/vector-search-functions-and-operators.md) to calculate the distance between the given vector and vectors stored in the database. The vectors closest to the given vector in the query represent the most similar data in meaning. +TiDB vector search identifies the top-k nearest neighbor (KNN) vectors by using a [distance function](/ai/reference/vector-search-functions-and-operators.md) to calculate the distance between the given vector and vectors stored in the database. The vectors closest to the given vector in the query represent the most similar data in meaning. ![The Schematic TiDB Vector Search](/media/vector-search/embedding-search.png) @@ -84,5 +70,5 @@ A recommendation engine is a system that proactively suggests content, products, To get started with TiDB Vector Search, see the following documents: -- [Get started with vector search using Python](/vector-search/vector-search-get-started-using-python.md) -- [Get started with vector search using SQL](/vector-search/vector-search-get-started-using-sql.md) +- [Get started with vector search using Python](/ai/quickstart-via-python.md) +- [Get started with vector search using SQL](/ai/quickstart-via-sql.md) diff --git a/ai/examples/auto-embedding-with-pytidb.md b/ai/examples/auto-embedding-with-pytidb.md new file mode 100644 index 0000000000000..3f963ff0eac4c --- /dev/null +++ b/ai/examples/auto-embedding-with-pytidb.md @@ -0,0 +1,87 @@ +--- +title: Auto Embedding Example +summary: Automatically generate embeddings for your text data using built-in embedding models. +--- + +# Auto Embedding Example + +This example shows how to use the [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) feature with the [pytidb](https://github.com/pingcap/pytidb) client. + +1. Connect to TiDB using the `pytidb` client. +2. Define a table with a VectorField configured for automatic embedding. +3. Insert plain text data: embeddings are populated automatically in the background. +4. Run vector searches with natural-language queries: embeddings are generated transparently. + +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## How to run + +### Step 1. Clone the `pytidb` repository + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/auto_embedding/ +``` + +### Step 2. Install the required packages + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env <=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## How to run + +### Step 1. Clone the `pytidb` repository + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/basic/ +``` + +### Step 2. Install the required packages + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env < + E-commerce product search with full-text search +

E-commerce product search with full-text search

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## How to run + +### Step 1. Clone the `pytidb` repository + +[`pytidb`](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/fulltext_search/ +``` + +### Step 2. Install the required packages and set up the environment + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env < + TiDB Hybrid Search Demo +

TiDB Hybrid Search Demo

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **OpenAI API key**: Get an OpenAI API key from [OpenAI](https://platform.openai.com/api-keys). + +> **Note** +> +> Currently, full-text search is available only in the following product option and regions: +> +> - TiDB Cloud Starter: Frankfurt (`eu-central-1`), Singapore (`ap-southeast-1`) + +## How to run + +### Step 1. Clone the `pytidb` repository + +[pytidb](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/hybrid_search +``` + +### Step 2. Install the required packages and set up the environment + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env < +EOF +``` + +### Step 4. Run the demo + +### Option 1. Run the Streamlit app + +If you want to check the demo with a web UI, you can run the following command: + +```bash +streamlit run app.py +``` + +Open your browser and visit `http://localhost:8501`. + +### Option 2. Run the demo script + +If you want to check the demo with a script, you can run the following command: + +```bash +python example.py +``` + +Expected output: + +``` +=== CONNECT TO TIDB === +Connected to TiDB. + +=== CREATE TABLE === +Table created. + +=== INSERT SAMPLE DATA === +Inserted 3 rows. + +=== PERFORM HYBRID SEARCH === +Search results: +[ + { + "_distance": 0.4740166257687124, + "_match_score": 1.6804268, + "_score": 0.03278688524590164, + "id": 60013, + "text": "TiDB is a distributed database that supports OLTP, OLAP, HTAP and AI workloads." + }, + { + "_distance": 0.6428459116216618, + "_match_score": 0.78427225, + "_score": 0.03200204813108039, + "id": 60015, + "text": "LlamaIndex is a Python library for building AI-powered applications." + }, + { + "_distance": 0.641581407158715, + "_match_score": null, + "_score": 0.016129032258064516, + "id": 60014, + "text": "PyTiDB is a Python library for developers to connect to TiDB." + } +] +``` + +## Related resources + +- **Source Code**: [View on GitHub](https://github.com/pingcap/pytidb/tree/main/examples/hybrid_search) \ No newline at end of file diff --git a/ai/examples/image-search-with-pytidb.md b/ai/examples/image-search-with-pytidb.md new file mode 100644 index 0000000000000..042b09fc95177 --- /dev/null +++ b/ai/examples/image-search-with-pytidb.md @@ -0,0 +1,102 @@ +--- +title: Image Search Example +summary: Build an image search application using multimodal embeddings for both text-to-image and image-to-image search. +--- + +# Image Search Example + +This example shows how to build an image search app by combining TiDB vector search capabilities with multimodal embedding models. + +With just a few lines of code, you can create a search system that understands both text and images. + +- **Text-to-image search**: Find pet photos by describing what you want in natural language, such as "fluffy orange cat" +- **Image-to-image search**: Upload a photo to find visually similar pets by breed, color, pose, and more + +

+ PyTiDB Image Search Demo +

Pet image search via multimodal embeddings

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **Jina AI API key**: You can get a free API key from [Jina AI Embeddings](https://jina.ai/embeddings/). + +## How to run + +### Step 1. Clone the `pytidb` repository + +[`pytidb`](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/image_search/ +``` + +### Step 2. Install the required packages + +```bash +python -m venv .venv +source .venv/bin/activate # Windows: .venv\Scripts\activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env < + AI Agent with memory powered by TiDB +

AI Agent with memory powered by TiDB

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **OpenAI API key**: Get an OpenAI API key from [OpenAI](https://platform.openai.com/api-keys). + +## How to run + +### Step 1. Clone the `pytidb` repository + +[`pytidb`](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/memory/ +``` + +### Step 2. Install the required packages + +```bash +python -m venv .venv +source .venv/bin/activate # Windows: .venv\Scripts\activate +pip install -r reqs.txt +``` + +### Step 3. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env < + RAG application built with PyTiDB +

RAG application built with PyTiDB

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **Ollama**: Install from [Ollama](https://ollama.com/download). + +## How to run + +### Step 1. Prepare the inference API + +Pull the embedding and LLM models with the Ollama CLI: + +```bash +ollama pull mxbai-embed-large +ollama pull gemma3:4b +ollama run gemma3:4b +``` + +Verify that the `/embed` and `/generate` endpoints are running: + +```bash +curl http://localhost:11434/api/embed -d '{ + "model": "mxbai-embed-large", + "input": "Llamas are members of the camelid family" +}' +``` + +```bash +curl http://localhost:11434/api/generate -d '{ + "model": "gemma3:4b", + "prompt": "Hello, Who are you?" +}' +``` + +### Step 2. Clone the repository + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/rag/ +``` + +### Step 3. Install the required packages and set up the environment + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 4. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + +```bash +cat > .env <=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **OpenAI API key**: Get an OpenAI API key from [OpenAI](https://platform.openai.com/api-keys). + +## How to run + +### Step 1. Clone the `pytidb` repository + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/text2sql/ +``` + +### Step 2. Install the required packages + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 3. Run the Streamlit app + +```bash +streamlit run app.py +``` + +### Step 4. Use the app + +Open your browser and visit `http://localhost:8501`. + +1. Enter your OpenAI API key in the left sidebar +2. Enter the TiDB connection string in the left sidebar, for example: `mysql+pymysql://root@localhost:4000/test` + +## Related resources + +- **Source Code**: [View on GitHub](https://github.com/pingcap/pytidb/tree/main/examples/text2sql) \ No newline at end of file diff --git a/ai/examples/vector-search-with-pytidb.md b/ai/examples/vector-search-with-pytidb.md new file mode 100644 index 0000000000000..a9d76e2684fc6 --- /dev/null +++ b/ai/examples/vector-search-with-pytidb.md @@ -0,0 +1,85 @@ +--- +title: Vector Search Example +summary: Implement semantic search using vector embeddings to find similar content. +--- + +# Vector Search Example + +This example demonstrates how to build a semantic search application using TiDB and local embedding models. It uses vector search to find similar items by meaning (not just keywords). + +The application uses [Ollama](https://ollama.com/download) for local embedding generation, [Streamlit](https://streamlit.io/) for the web UI, and [`pytidb`](https://github.com/pingcap/pytidb) (the official Python SDK for TiDB) to build the RAG pipeline. + +

+ Semantic search with vector embeddings +

Semantic search with vector embeddings

+

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Python (>=3.10)**: Install [Python](https://www.python.org/downloads/) 3.10 or a later version. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). +- **Ollama**: Install from [Ollama](https://ollama.com/download). + +## How to run + +### Step 1. Start the embedding service with Ollama + +Pull the embedding model: + +```bash +ollama pull mxbai-embed-large +``` + +Verify that the embedding service is running: + +```bash +curl http://localhost:11434/api/embed -d '{ + "model": "mxbai-embed-large", + "input": "Llamas are members of the camelid family" +}' +``` + +### Step 2. Clone the repository + +```bash +git clone https://github.com/pingcap/pytidb.git +cd pytidb/examples/vector_search/ +``` + +### Step 3. Install the required packages and set up the environment + +```bash +python -m venv .venv +source .venv/bin/activate +pip install -r reqs.txt +``` + +### Step 4. Set environment variables + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Set environment variables according to the connection parameters as follows: + + ```bash + cat > .env < **Note:** +> +> For a complete example of auto embedding, see [Auto Embedding Example](/ai/examples/auto-embedding-with-pytidb.md). + +## Basic usage + +This document uses a TiDB Cloud hosted embedding model for demonstration. For a full list of supported providers, see [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md#available-text-embedding-models). + +### Step 1. Define an embedding function + +Define an embedding function to generate vector embeddings for your text data. + +```python +from pytidb.embeddings import EmbeddingFunction + +embed_func = EmbeddingFunction( + model_name="tidbcloud_free/amazon/titan-embed-text-v2", +) +``` + +### Step 2. Create a table and a vector field + +Use `embed_func.VectorField()` to create a vector field in the table schema. + +To enable auto embedding, set `source_field` to the field you want to embed. + +```python hl_lines="7" +from pytidb.schema import TableModel, Field +from pytidb.datatype import TEXT + +class Chunk(TableModel): + id: int = Field(primary_key=True) + text: str = Field(sa_type=TEXT) + text_vec: list[float] = embed_func.VectorField(source_field="text") + +table = client.create_table(schema=Chunk, if_exists="overwrite") +``` + +You don't need to specify the `dimensions` parameter, because the embedding model automatically determines it. + +However, you can set the `dimensions` parameter to override the default dimension. + +### Step 3. Insert some sample data + +Insert some sample data into the table. + +```python +table.bulk_insert([ + Chunk(text="TiDB is a distributed database that supports OLTP, OLAP, HTAP and AI workloads."), + Chunk(text="PyTiDB is a Python library for developers to connect to TiDB."), + Chunk(text="LlamaIndex is a Python library for building AI-powered applications."), +]) +``` + +When inserting data, the `text_vec` field is automatically populated with embeddings generated from `text`. + +### Step 4. Perform a vector search + +You can pass query text directly to the `search()` method. The query text will be embedded automatically and then used for vector search. + +```python +table.search("HTAP database").limit(3).to_list() +``` diff --git a/ai/guides/connect.md b/ai/guides/connect.md new file mode 100644 index 0000000000000..9d2e264f4ca16 --- /dev/null +++ b/ai/guides/connect.md @@ -0,0 +1,145 @@ +--- +title: Connect to TiDB +summary: Learn how to connect to a TiDB database using the `pytidb` client. +--- + +# Connect to TiDB + +This guide shows how to connect to a TiDB database using the `pytidb` client. + +## Install the dependencies + +[`pytidb`](https://github.com/pingcap/pytidb) is a Python client built on [SQLAlchemy](https://sqlalchemy.org/). It provides a series of high-level APIs to help you store and search vector embeddings without writing raw SQL. + +To install the Python client, run the following command: + +```bash +pip install pytidb +``` + +## Connect with connection parameters + +Choose the steps based on your TiDB deployment type: + + +
+ +You can [create a TiDB Cloud Starter cluster](https://tidbcloud.com/free-trial/), and then get the connection parameters from the web console as follows: + +1. Navigate to the [Clusters page](https://tidbcloud.com/clusters), and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. +3. Copy the connection parameters to your code or environment variables. + +Example code: + +```python title="main.py" +from pytidb import TiDBClient + +db = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="test", +) +``` + +> **Note:** +> +> For TiDB Cloud Starter, [TLS connection to the database](https://docs.pingcap.com/tidbcloud/secure-connections-to-starter-clusters/) is required when using a public endpoint. The `pytidb` client **automatically** enables TLS for TiDB Cloud Starter clusters. + +
+
+ +Follow [Quick Start with TiDB Self-Managed](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb/#deploy-a-local-test-cluster) to deploy a TiDB cluster for testing. + +Example code: + +```python title="main.py" +from pytidb import TiDBClient + +db = TiDBClient.connect( + host="{tidb_server_host}", + port=4000, + username="root", + password="{password}", + database="test", +) +``` + +> **Note:** +> +> If you are using `tiup playground` to deploy a TiDB cluster for testing, the default host is `127.0.0.1` and the default password is empty. + +
+
+ +Once connected, you can use the `db` object to operate tables, query data, and more. + +## Connect with connection string + +If you prefer to use a connection string (database URL), you can follow the format based on your deployment type: + + +
+ +You can [create a TiDB Cloud Starter cluster](https://tidbcloud.com/free-trial/), and then get the connection parameters from the web console as follows: + +1. Navigate to the [Clusters page](https://tidbcloud.com/clusters), and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with the connection parameters listed. +3. Copy the connection parameters and construct a connection string in the following format: + +```python title="main.py" +from pytidb import TiDBClient + +db = TiDBClient.connect( + database_url="mysql+pymysql://{USERNAME}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}?ssl_verify_cert=true&ssl_verify_identity=true", +) +``` + +> **Note:** +> +> For TiDB Cloud Starter, [TLS connection to the database](https://docs.pingcap.com/tidbcloud/secure-connections-to-starter-clusters/) is required when using a public endpoint, so you need to set `ssl_verify_cert=true&ssl_verify_identity=true` in the connection string. + +
+
+ +You can follow the format below to construct the connection string: + +```python title="main.py" +from pytidb import TiDBClient + +db = TiDBClient.connect( + database_url="mysql+pymysql://{USERNAME}:{PASSWORD}@{HOST}:{PORT}/{DATABASE}", +) +``` + +> **Note:** +> +> If you are using `tiup playground` to deploy a TiDB cluster for testing, the connection string is: +> +> ``` +> mysql+pymysql://root:@127.0.0.1:4000/test +> ``` + +
+
+ +## Connect with SQLAlchemy DB engine + +If your application already has a SQLAlchemy database engine, you can reuse it via the `db_engine` parameter: + +```python title="main.py" +from pytidb import TiDBClient + +db = TiDBClient(db_engine=db_engine) +``` + +## Next steps + +After connecting to your TiDB database, you can explore the following guides to learn how to work with your data: + +- [Working with Tables](/ai/guides/tables.md): Learn how to define and manage tables in TiDB. +- [Vector Search](/ai/guides/vector-search.md): Perform semantic search using vector embeddings. +- [Full-Text Search](/ai/guides/vector-search-full-text-search-python.md): Retrieve documents using keyword-based search. +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md): Combine vector and full-text search for more relevant results. diff --git a/ai/guides/filtering.md b/ai/guides/filtering.md new file mode 100644 index 0000000000000..b33f2b47933f3 --- /dev/null +++ b/ai/guides/filtering.md @@ -0,0 +1,190 @@ +--- +title: Filtering +summary: Learn how to use filtering in your application. +--- + +# Filtering + +As a relational database, TiDB supports a rich set of [SQL operators](https://docs.pingcap.com/tidbcloud/operators/) and flexible combinations of filtering conditions for precise queries. + +## Overview + +You can filter on both scalar fields and JSON fields. Filtering on JSON fields is often used for [metadata filtering](/ai/guides/vector-search.md#metadata-filtering) in vector search. + +[`pytidb`](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +When using `pytidb`, you can apply filtering by passing the **filters** parameter to the `table.query()`, `table.delete()`, `table.update()`, and `table.search()` methods. + +The **filters** parameter supports two formats: [Dictionary filters](#dictionary-filters) and [SQL string filters](#sql-string-filters). + +## Dictionary filters + +`pytidb` lets you define filter conditions using a Python dictionary with operators as the **filters** parameter. + +The dictionary structure of **filters** is as follows: + +```python +{ + "": { + "": + }, + ... +} +``` + +- ``: The key can be a column name, a JSON path expression to access a JSON field (see [Metadata filtering](/ai/guides/vector-search.md#metadata-filtering)), or a [logical operator](#logical-operators). +- ``: The operator can be a [compare operator](#compare-operators) or an [inclusion operator](#inclusion-operators). +- ``: The value can be a scalar value or an array, depending on the operator. + +**Example: Filter records where `created_at` is greater than 2024-01-01** + +```python +table.query({ + # The `created_at` is a scalar field with DATETIME type + "created_at": { + "$gt": "2024-01-01" + } +}) +``` + +**Example: Filter records where `meta.category` is in the array ["tech", "science"]** + +```python +results = ( + table.search("some query", search_type="vector") + .filter({ + # The `meta` is a JSON field, and its value is a JSON object like {"category": "tech"} + "meta.category": { + "$in": ["tech", "science"] + } + }) + .limit(10) + .to_list() +) +``` + +### Compare operators + +You can use the following comparison operators to filter records: + +| Operator | Description | +|----------|-----------------------------------| +| `$eq` | Equal to value | +| `$ne` | Not equal to value | +| `$gt` | Greater than value | +| `$gte` | Greater than or equal to value | +| `$lt` | Less than value | +| `$lte` | Less than or equal to value | + +**Example: Filter records where `user_id` equals 1** + +```python +{ + "user_id": { + "$eq": 1 + } +} +``` + +You can omit the `$eq` operator. The following filter is equivalent to the preceding one: + +```python +{ + "user_id": 1 +} +``` + +### Inclusion operators + +You can use the following inclusion operators to filter records: + +| Operator | Description | +|----------|-----------------------------------| +| `$in` | In array (string, int, or float) | +| `$nin` | Not in array (string, int, float) | + +**Example: Filter records where `category` is in the array ["tech", "science"]** + +```python +{ + "category": { + "$in": ["tech", "science"] + } +} +``` + +### Logical operators + +You can use the logical operators `$and` and `$or` to combine multiple filters. + +| Operator | Description | +|----------|-----------------------------------------------------| +| `$and` | Returns results that match **all** filters in the list | +| `$or` | Returns results that match **any** filter in the list | + +**Syntax for `$and` or `$or`:** + +```python +{ + "$and|$or": [ + { + "field_name": { + : + } + }, + { + "field_name": { + : + } + } + ... + ] +} +``` + +**Example: using `$and` to combine multiple filters:** + +```python +{ + "$and": [ + { + "created_at": { + "$gt": "2024-01-01" + } + }, + { + "meta.category": { + "$in": ["tech", "science"] + } + } + ] +} +``` + +## SQL String Filters + +You can also use a SQL string as `filters`. The string must be a valid SQL `WHERE` clause (without the `WHERE` keyword) in the TiDB SQL syntax. + +**Example: Filter records where `created_at` is greater than 2024-01-01** + +```python +results = table.query( + filters="created_at > '2024-01-01'", + limit=10 +).to_list() +``` + +**Example: Filter records where the JSON field `meta.category` equals 'tech'** + +```python +results = table.query( + filters="meta->>'$.category' = 'tech'", + limit=10 +).to_list() +``` + +You can combine multiple conditions using `AND`, `OR`, and parentheses, and use any TiDB-supported [SQL operators](https://docs.pingcap.com/tidbcloud/operators/). + +> **Warning:** +> +> When using SQL string filters with dynamic user input, always validate the input to prevent [SQL injection](https://en.wikipedia.org/wiki/SQL_injection) vulnerabilities. diff --git a/ai/guides/image-search.md b/ai/guides/image-search.md new file mode 100644 index 0000000000000..abe6518341e0c --- /dev/null +++ b/ai/guides/image-search.md @@ -0,0 +1,111 @@ +--- +title: Image Search +summary: Learn how to use image search in your application. +--- + +# Image Search + +**Image search** helps you find similar images by comparing their visual content, not just text or metadata. This feature is useful for e-commerce, content moderation, digital asset management, and any scenario where you need to search for or deduplicate images based on appearance. + +TiDB enables image search through **vector search**. With automatic embedding, you can generate image embeddings from image URLs, PIL images, or keyword text using a multimodal embedding model. TiDB then searches for similar vectors at scale. + +> **Note:** +> +> For a complete example of image search, see [Image Search Example](/ai/examples/image-search-with-pytidb.md). + +## Basic usage + +### Step 1. Define an embedding function + +To generate image embeddings, you need an embedding model that accepts image input. + +For demonstration, you can use the multimodal embedding model of Jina AI. + +Go to [Jina AI](https://jina.ai/embeddings) to create an API key, and then initialize the embedding function as follows: + +```python hl_lines="7" +from pytidb.embeddings import EmbeddingFunction + +image_embed = EmbeddingFunction( + # Or another provider/model that supports multimodal input + model_name="jina_ai/jina-embedding-v4", + api_key="{your-jina-api-key}", + multimodal=True, +) +``` + +### Step 2. Create a table and vector field + +Use `VectorField()` to define a vector field for storing image embeddings. Set the `source_field` parameter to specify the field that stores image URLs. + +```python +from pytidb.schema import TableModel, Field + +class ImageItem(TableModel): + __tablename__ = "image_items" + id: int = Field(primary_key=True) + image_uri: str = Field() + image_vec: list[float] = image_embed.VectorField( + source_field="image_uri" + ) + +table = client.create_table(schema=ImageItem, if_exists="overwrite") +``` + +### Step 3. Insert image data + +When you insert data, the `image_vec` field is automatically populated with an embedding generated from `image_uri`. + +```python +table.bulk_insert([ + ImageItem(image_uri="https://example.com/image1.jpg"), + ImageItem(image_uri="https://example.com/image2.jpg"), + ImageItem(image_uri="https://example.com/image3.jpg"), +]) +``` + +### Step 4. Perform image search + +Image search is a type of vector search. With automatic embedding, you can provide an image URL, a PIL image, or keyword text directly, and each input is converted into an embedding for similarity matching. + +#### Option 1: Search by image URL + +Search for similar images by providing an image URL: + +```python +results = table.search("https://example.com/query.jpg").limit(3).to_list() +``` + +The client converts the image URL into a vector. TiDB then returns the most similar images by comparing vectors. + +#### Option 2: Search by PIL image + +You can also search for similar images by providing an image file or bytes: + +```python +from PIL import Image + +image = Image.open("/path/to/query.jpg") + +results = table.search(image).limit(3).to_list() +``` + +The client converts the PIL image object to a Base64 string before sending it to the embedding model. + +#### Option 3: Search by keyword text + +You can also search for similar images by providing keyword text. + +For example, if you are working on a pet image dataset, you can search by keywords such as "orange tabby cat" or "golden retriever puppy" to find similar images. + +```python +results = table.search("orange tabby cat").limit(3).to_list() +``` + +Then, the multimodal embedding model converts the keyword text into an embedding that captures its semantic meaning, and TiDB performs a vector search to find images with embeddings most similar to that keyword embedding. + +## See also + +- [Automatic embedding guide](/ai/guides/auto-embedding.md) +- [Vector search guide](/ai/concepts/vector-search-overview.md) +- [Image Search Example](/ai/examples/image-search-with-pytidb.md) diff --git a/ai/guides/join-queries.md b/ai/guides/join-queries.md new file mode 100644 index 0000000000000..b95974e9dfd68 --- /dev/null +++ b/ai/guides/join-queries.md @@ -0,0 +1,124 @@ +--- +title: Multiple Table Joins +summary: Learn how to use multiple table joins in your application. +--- + +# Multiple Table Joins + +As a relational database, TiDB lets you store diverse data in tables with different structures (for example, `chunks`, `documents`, `users`, `chats`) in a single database. You can also use joins to combine data from multiple tables and perform complex queries. + +## Basic Usage + +### Step 1. Create tables and insert sample data + + +
+ +Assuming you have already [connected to TiDB](/ai/guides/connect.md) using `TiDBClient`: + +Create a `documents` table and insert some sample data: + +```python +from pytidb import Session +from pytidb.schema import TableModel, Field +from pytidb.sql import select + +class Document(TableModel): + __tablename__ = "documents" + id: int = Field(primary_key=True) + title: str = Field(max_length=255) + +client.create_table(schema=Document, if_exists="overwrite") +client.table("documents").truncate() +client.table("documents").bulk_insert([ + Document(id=1, title="The Power of Positive Thinking"), + Document(id=2, title="The Happiness Advantage"), + Document(id=3, title="The Art of Happiness"), +]) +``` + +Create a `chunks` table and insert some sample data: + +```python +class Chunk(TableModel): + __tablename__ = "chunks" + id: int = Field(primary_key=True) + text: str = Field(max_length=255) + document_id: int = Field(foreign_key="documents.id") + +client.create_table(schema=Chunk, if_exists="overwrite") +client.table("chunks").truncate() +client.table("chunks").bulk_insert([ + Chunk(id=1, text="Positive thinking can change your life", document_id=1), + Chunk(id=2, text="Happiness leads to success", document_id=2), + Chunk(id=3, text="Finding joy in everyday moments", document_id=3), +]) +``` + +
+
+ +Create a `documents` table and insert some sample data: + +```sql +CREATE TABLE documents ( + id INT PRIMARY KEY, + title VARCHAR(255) NOT NULL +); + +INSERT INTO documents (id, title) VALUES + (1, 'The Power of Positive Thinking'), + (2, 'The Happiness Advantage'), + (3, 'The Art of Happiness'); +``` + +Create a `chunks` table and insert some sample data: + +```sql +CREATE TABLE chunks ( + id INT PRIMARY KEY, + text VARCHAR(255) NOT NULL, + document_id INT NOT NULL, + FOREIGN KEY (document_id) REFERENCES documents(id) +); + +INSERT INTO chunks (id, text, document_id) VALUES + (1, 'Positive thinking can change your life', 1), + (2, 'Happiness leads to success', 2), + (3, 'Finding joy in everyday moments', 3); +``` + +
+
+ +### Step 2. Perform a join query + + +
+ +```python +with Session(client.db_engine) as db_session: + query = ( + select(Chunk) + .join(Document, Chunk.document_id == Document.id) + .where(Document.title == "The Power of Positive Thinking") + ) + chunks = db_session.exec(query).all() + +[(c.id, c.text, c.document_id) for c in chunks] +``` + +
+
+ +Perform a join query to combine data from the `chunks` and `documents` tables: + +```sql +SELECT c.id, c.text, c.document_id +FROM chunks c +JOIN documents d ON c.document_id = d.id +WHERE d.title = 'The Power of Positive Thinking'; +``` + +
+
\ No newline at end of file diff --git a/ai/guides/raw-queries.md b/ai/guides/raw-queries.md new file mode 100644 index 0000000000000..c8105073b7874 --- /dev/null +++ b/ai/guides/raw-queries.md @@ -0,0 +1,89 @@ +--- +title: Raw Queries +summary: Learn how to use raw queries in your application. +--- + +# Raw Queries + +This guide describes how to run raw SQL queries in your application. + +## Operate data with raw SQL + +Use the `client.execute()` method to execute `INSERT`, `UPDATE`, `DELETE`, and other data-manipulation statements. + +```python +client.execute("INSERT INTO chunks(text, user_id) VALUES ('sample text', 5)") +``` + +### SQL injection prevention + +Both the `execute()` and `query()` methods support the **Parameterized SQL** feature, which helps you avoid [SQL injection](https://en.wikipedia.org/wiki/SQL_injection) while building dynamic SQL statements. + +```python +client.execute( + "INSERT INTO chunks(text, user_id) VALUES (:text, :user_id)", + { + "text": "sample text", + "user_id": 6, + }, +) +``` + +## Query data with raw SQL + +Use the `client.query()` method to execute `SELECT`, `SHOW`, and other query statements. + +### Output query result + +The `client.query()` method will return a `SQLQueryResult` instance with some helper methods: + +- `to_pydantic()` +- `to_list()` +- `to_pandas()` +- `to_rows()` +- `scalar()` + +#### As Pydantic model + +The `to_pydantic()` method returns a list of Pydantic models. + +```python +client.query("SELECT id, text, user_id FROM chunks").to_pydantic() +``` + +#### As SQLAlchemy result rows + +The `to_rows()` method returns a list of tuples, where each tuple represents one row. + +```python +client.query("SHOW TABLES;").to_rows() +``` + +#### As a list of dictionaries + +The `to_list()` method converts the query result to a list of dictionaries. + +```python +client.query( + "SELECT id, text, user_id FROM chunks WHERE user_id = :user_id", + { + "user_id": 3 + } +).to_list() +``` + +#### As pandas DataFrame + +The `to_pandas()` method converts the query result to a `pandas.DataFrame`, which is displayed in a human-friendly format within the notebook: + +```python +client.query("SELECT id, text, user_id FROM chunks").to_pandas() +``` + +#### As scalar value + +The `scalar()` method will return the first column of the first row of the result set. + +```python +client.query("SELECT COUNT(*) FROM chunks;").scalar() +``` \ No newline at end of file diff --git a/ai/guides/reranking.md b/ai/guides/reranking.md new file mode 100644 index 0000000000000..f4fa244e7f48c --- /dev/null +++ b/ai/guides/reranking.md @@ -0,0 +1,53 @@ +--- +title: Reranking +summary: Learn how to use reranking in your application. +--- + +# Reranking + +Reranking is a technique used to improve the relevance and accuracy of search results by re-evaluating and reordering them using a dedicated reranking model. + +The search process works in two stages: + +1. **Initial Retrieval**: Vector search identifies the top `k` most similar documents from the collection. +2. **Reranking**: A reranking model evaluates these `k` documents based on the relevance between the query and the documents and reorders them to produce the final top `n` results (where `n` ≤ `k`). + +This two-stage retrieval approach significantly improves both document relevance and accuracy. + +## Basic usage + +[`pytidb`](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +`pytidb` provides the `Reranker` class that lets you use reranking models from multiple third-party providers. + +1. Create a reranker instance: + + ```python + from pytidb.rerankers import Reranker + + reranker = Reranker(model_name="{provider}/{model_name}") + ``` + +2. Apply the reranker by using the `.rerank()` method: + + ```python + table.search("{query}").rerank(reranker, "{field_to_rerank}").limit(3) + ``` + +## Supported providers + +The following examples show how to use reranking models from third-party providers. + +### Jina AI + +To use the reranker from Jina AI, go to their [website](https://jina.ai/reranker) to create an API key. + +For example: + +```python +jinaai = Reranker( + # Using the `jina-reranker-m0` model + model_name="jina_ai/jina-reranker-m0", + api_key="{your-jinaai-api-key}" +) +``` diff --git a/ai/guides/tables.md b/ai/guides/tables.md new file mode 100644 index 0000000000000..dceda111b6fd6 --- /dev/null +++ b/ai/guides/tables.md @@ -0,0 +1,448 @@ +--- +title: Working with Tables +summary: Learn how to work with tables in TiDB. +--- + +# Working with Tables + +TiDB uses tables to organize and store collections of related data. It provides flexible schema definition capabilities, so you can design tables to meet your specific requirements. + +A table can contain multiple columns of different data types. Supported data types include text, numbers, vectors, binary data (`BLOB`), JSON, and more. + +This document shows how to work with tables using [`pytidb`](https://github.com/pingcap/pytidb). + +`pytidb` is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +> **Note:** +> +> For a complete working example, see the [basic example](https://github.com/pingcap/pytidb/tree/main/examples/basic) in our repository. + +## Create a table + +### Using TableModel + +`pytidb` provides a `TableModel` class that represents the schema of a table. The class is compatible with the [Pydantic model](https://docs.pydantic.dev/latest/concepts/models/) and enables you to define tables declaratively. + +In the following example, you create a table named `items` with these columns: + +- `id`: a primary key column with an integer type +- `content`: a text type column +- `embedding`: a vector type column with 3 dimensions +- `meta`: a JSON type column + + +
+ +After you [connect to the database](/ai/guides/connect.md) using `pytidb` and obtain a `client` instance, you can create a table with the `create_table` method. + +```python hl_lines="12" +from pytidb.schema import TableModel, Field, VectorField +from pytidb.datatype import TEXT, JSON + +class Item(TableModel): + __tablename__ = "items" + + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = VectorField(dimensions=3) + meta: dict = Field(sa_type=JSON, default_factory=dict) + +table = client.create_table(schema=Item, if_exists="overwrite") +``` + +The `create_table` method accepts these parameters: + +- `schema`: The `TableModel` class that defines your table structure. +- `if_exists`: The table creation mode. + - `raise` (default): Creates the table if it does not exist; raises an error if it already exists. + - `skip`: Creates the table if it does not exist; does nothing if it already exists. + - `overwrite`: Drops the existing table and creates a new one. This is useful for **testing and development**, but not recommended for production environments. + +Once the table is created, you can use the `table` object to insert, update, delete, and query data. + +
+
+ +Use the `CREATE TABLE` statement to create a table. + +```sql +CREATE TABLE items ( + id INT PRIMARY KEY, + content TEXT, + embedding VECTOR(3), + meta JSON +); +``` + +
+
+ +## Add data to a table + +### With TableModel + +You can use a `TableModel` instance to represent a row and insert it into the table. + +To insert a single record: + + +
+ +Use the `table.insert()` method to insert a single record into the table. + +```python +table.insert( + Item( + id=1, + content="TiDB is a distributed SQL database", + embedding=[0.1, 0.2, 0.3], + meta={"category": "database"}, + ) +) +``` + +
+
+ +Use the `INSERT INTO` statement to insert a single record into the table. + +```sql +INSERT INTO items(id, content, embedding, meta) +VALUES (1, 'TiDB is a distributed SQL database', '[0.1, 0.2, 0.3]', '{"category": "database"}'); +``` + +
+
+ +To insert multiple records: + + +
+ +Use the `table.bulk_insert()` method to insert multiple records into the table. + +```python +table.bulk_insert([ + Item( + id=2, + content="GPT-4 is a large language model", + embedding=[0.4, 0.5, 0.6], + meta={"category": "llm"}, + ), + Item( + id=3, + content="LlamaIndex is a Python library for building AI-powered applications", + embedding=[0.7, 0.8, 0.9], + meta={"category": "rag"}, + ), +]) +``` + +
+
+ +Use the `INSERT INTO` statement to insert multiple records into the table. + +```sql +INSERT INTO items(id, content, embedding, meta) +VALUES + (2, 'GPT-4 is a large language model', '[0.4, 0.5, 0.6]', '{"category": "llm"}'), + (3, 'LlamaIndex is a Python library for building AI-powered applications', '[0.7, 0.8, 0.9]', '{"category": "rag"}'); +``` + +
+
+ +### With Dict + +You can also use `dict` to represent rows and insert them into the table. This approach is more flexible and does not require a `TableModel` to insert data. + +To insert a single record: + + +
+ +Use the `table.insert()` method with a dictionary to insert a single record into the table. + +```python +table.insert({ + "id": 1, + "content": "TiDB is a distributed SQL database", + "embedding": [0.1, 0.2, 0.3], + "meta": {"category": "database"}, +}) +``` + +
+
+ +Use the `INSERT INTO` statement to insert a single record into the table. + +```sql +INSERT INTO items(id, content, embedding, meta) +VALUES (1, 'TiDB is a distributed SQL database', '[0.1, 0.2, 0.3]', '{"category": "database"}'); +``` + +
+
+ +## Save data to a table + +The `save` method provides a convenient way to insert or update a single row. For a row, if the primary key does not exist in the table, the method inserts it into the table as a new row. If the record already exists, the method overwrites the entire row. + +> **Note:** +> +> If a record ID already exists in the table, `table.save()` overwrites the entire record. To change only part of a record, use `table.update()`. + + +
+ +Use the `table.save()` method to save a single record to the table. + +**Example: Save a new record** + +```python +saved_record = table.save( + Item( + id=4, + content="Vector databases enable AI applications", + embedding=[1.0, 1.1, 1.2], + meta={"category": "vector-db"}, + ) +) +``` + +**Example: Save an existing record (overwrites the entire record)** + +```python +# This overwrites the entire record with id=1 +updated_record = table.save( + Item( + id=1, # Existing ID + content="Updated content for TiDB", + embedding=[0.2, 0.3, 0.4], + meta={"category": "updated"}, + ) +) +``` + +
+
+ +Use the `INSERT ... ON DUPLICATE KEY UPDATE` statement to save a record. + +**Example: Save a new record or update if it exists** + +```sql +INSERT INTO items(id, content, embedding, meta) +VALUES (4, 'Vector databases enable AI applications', '[1.0, 1.1, 1.2]', '{"category": "vector-db"}') +ON DUPLICATE KEY UPDATE + content = VALUES(content), + embedding = VALUES(embedding), + meta = VALUES(meta); +``` + +
+
+ +## Query data from a table + +To fetch records from a table: + + +
+ +Use the `table.query()` method to fetch records from the table. + +**Example: Fetch the first 10 records** + +```python +result = table.query(limit=10).to_list() +``` + +
+
+ +Use the `SELECT` statement to fetch the records from the table. + +**Example: Fetch the first 10 records** + +```sql +SELECT * FROM items LIMIT 10; +``` + +
+
+ +To fetch records based on query conditions: + + +
+ +Pass the `filters` parameter to the `table.query()` method. + +```python +result = table.query( + filters={"meta.category": "database"}, + limit=10 +).to_list() +``` + +
+
+ +Use the `WHERE` clause to filter records. + +**Example: Fetch the 10 records with category "database"** + +```sql +SELECT * FROM items WHERE meta->>'$.category' = 'database' LIMIT 10; +``` + +
+
+ +For a complete list of supported filter operations and examples, refer to the [Filtering](/ai/guides/filtering.md) guide. + +## Update data in a table + + +
+ +Use the `table.update()` method to update records with [filters](/ai/guides/filtering.md). + +**Example: Update the record whose `id` equals 1** + +```python +table.update( + values={ + "content": "TiDB Cloud Starter is a fully managed, auto-scaling cloud database service", + "embedding": [0.1, 0.2, 0.4], + "meta": {"category": "dbaas"}, + }, + filters={ + "id": 1 + }, +) +``` + +
+
+ +Use the `UPDATE` statement to update records with [filters](/ai/guides/filtering.md). + +**Example: Update the record whose `id` equals 1** + +```sql +UPDATE items +SET + content = 'TiDB Cloud Starter is a fully managed, auto-scaling cloud database service', + embedding = '[0.1, 0.2, 0.4]', + meta = '{"category": "dbaas"}' +WHERE + id = 1; +``` + +
+
+ +## Delete from a table + + +
+ +Use the `table.delete()` method to delete records with [filters](/ai/guides/filtering.md). + +**Example: Delete the record where `id` equals 2** + +```python +table.delete( + filters={ + "id": 2 + } +) +``` + +
+
+ +Use the `DELETE` statement to delete records with [filters](/ai/guides/filtering.md). + +**Example: Delete the record where `id` equals 2** + +```sql +DELETE FROM items WHERE id = 2; +``` + +
+
+ +## Truncate a table + + +
+ +To remove all data from the table but keep the table structure, use the `table.truncate()` method. + +```python +table.truncate() +``` + +To check that the table is truncated, verify that it contains 0 rows. + +```python +table.rows() +``` + +
+
+ +To remove all data from the table but keep the table structure, use the `TRUNCATE TABLE` statement. + +```sql +TRUNCATE TABLE items; +``` + +To check that the table is truncated, verify that it contains 0 rows. + +```sql +SELECT COUNT(*) FROM items; +``` + +
+
+ +## Drop a table + + +
+ +To permanently remove a table from the database, use the `client.drop_table()` method. + +```python +client.drop_table("items") +``` + +To check that the table is removed from the database: + +```python +client.table_names() +``` + +
+
+ +To permanently remove a table from the database, use the `DROP TABLE` statement. + +```sql +DROP TABLE items; +``` + +To check that the table is removed from the database: + +```sql +SHOW TABLES; +``` + +
+
\ No newline at end of file diff --git a/ai/guides/transactions.md b/ai/guides/transactions.md new file mode 100644 index 0000000000000..033e32021568d --- /dev/null +++ b/ai/guides/transactions.md @@ -0,0 +1,30 @@ +--- +title: Transactions +summary: Learn how to use transactions in your application. +--- + +# Transactions + +TiDB supports ACID transactions to ensure data consistency and reliability. + +## Basic usage + +```python +with client.session() as session: + initial_total_balance = session.query("SELECT SUM(balance) FROM players").scalar() + + # Transfer 10 coins from player 1 to player 2 + session.execute("UPDATE players SET balance = balance - 10 WHERE id = 1") + session.execute("UPDATE players SET balance = balance + 10 WHERE id = 2") + + session.commit() + # or session.rollback() + + final_total_balance = session.query("SELECT SUM(balance) FROM players").scalar() + assert final_total_balance == initial_total_balance +``` + +## See also + +- [TiDB Developer Guide - Transactions](/develop/dev-guide-transaction-overview.md) +- [TiDB Documentation - SQL Reference - Transactions](/transaction-overview.md) \ No newline at end of file diff --git a/tidb-cloud/vector-search-full-text-search-python.md b/ai/guides/vector-search-full-text-search-python.md similarity index 81% rename from tidb-cloud/vector-search-full-text-search-python.md rename to ai/guides/vector-search-full-text-search-python.md index b0aa3e1ea5f68..2fd549326047e 100644 --- a/tidb-cloud/vector-search-full-text-search-python.md +++ b/ai/guides/vector-search-full-text-search-python.md @@ -1,12 +1,12 @@ --- title: Full-Text Search with Python summary: Full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. -aliases: ['/tidb/stable/vector-search-full-text-search-python'] +aliases: ['/tidb/stable/vector-search-full-text-search-python/','/tidbcloud/vector-search-full-text-search-python/'] --- # Full-Text Search with Python -Unlike [Vector Search](/vector-search/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. +Unlike [Vector Search](/ai/concepts/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. The full-text search feature in TiDB provides the following capabilities: @@ -20,13 +20,13 @@ The full-text search feature in TiDB provides the following capabilities: > **Tip:** > -> For SQL usage, see [Full-Text Search with SQL](/tidb-cloud/vector-search-full-text-search-sql.md). +> For SQL usage, see [Full-Text Search with SQL](/ai/guides/vector-search-full-text-search-sql.md). > -> To use full-text search and vector search together in your AI apps, see [Hybrid Search](/tidb-cloud/vector-search-hybrid-search.md). +> To use full-text search and vector search together in your AI apps, see [Hybrid Search](/ai/guides/vector-search-hybrid-search.md). ## Prerequisites -Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, Full-text search is only available for the following product option and regions: +Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, full-text search is only available on {{{ .starter }}} and {{{ .essential }}} in the following regions: - TiDB Cloud Serverless: `Frankfurt (eu-central-1)` and `Singapore (ap-southeast-1)` @@ -64,7 +64,7 @@ db = TiDBClient.connect( ) ``` -You can get these connection parameters from the [TiDB Cloud console](https://tidbcloud.com): +You can get these connection parameters from the [TiDB Cloud console](https://tidbcloud.com) as follows: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. @@ -153,21 +153,11 @@ For a complete example, see [pytidb full-text search demo](https://github.com/pi - [pytidb Python SDK Documentation](https://github.com/pingcap/pytidb) -- [Hybrid Search](/tidb-cloud/vector-search-hybrid-search.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) ## Feedback & Help Full-text search is still in the early stages with limited accessibility. If you would like to try full-text search in a region that is not yet available, or if you have feedback or need help, feel free to reach out to us: - - -- [Join our Discord](https://discord.gg/zcqexutz2R) - - - - - -- [Join our Discord](https://discord.gg/zcqexutz2R) -- [Visit our Support Portal](https://tidb.support.pingcap.com/) - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) diff --git a/tidb-cloud/vector-search-full-text-search-sql.md b/ai/guides/vector-search-full-text-search-sql.md similarity index 88% rename from tidb-cloud/vector-search-full-text-search-sql.md rename to ai/guides/vector-search-full-text-search-sql.md index 3bc0b0e9dc269..6622f1a7b11b2 100644 --- a/tidb-cloud/vector-search-full-text-search-sql.md +++ b/ai/guides/vector-search-full-text-search-sql.md @@ -1,12 +1,12 @@ --- title: Full-Text Search with SQL summary: Full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. -aliases: ['/tidb/stable/vector-search-full-text-search-sql'] +aliases: ['/tidb/stable/vector-search-full-text-search-sql/','/tidbcloud/vector-search-full-text-search-sql/'] --- # Full-Text Search with SQL -Unlike [Vector Search](/vector-search/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. +Unlike [Vector Search](/ai/concepts/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. The full-text search feature in TiDB provides the following capabilities: @@ -20,13 +20,13 @@ The full-text search feature in TiDB provides the following capabilities: > **Tip:** > -> For Python usage, see [Full-Text Search with Python](/tidb-cloud/vector-search-full-text-search-python.md). +> For Python usage, see [Full-Text Search with Python](/ai/guides/vector-search-full-text-search-python.md). > -> To use full-text search and vector search together in your AI apps, see [Hybrid Search](/tidb-cloud/vector-search-hybrid-search.md). +> To use full-text search and vector search together in your AI apps, see [Hybrid Search](/ai/guides/vector-search-hybrid-search.md). ## Get started -Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, Full-text search is only available for the following product option and regions: +Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, full-text search is only available on {{{ .starter }}} and {{{ .essential }}} in the following regions: - TiDB Cloud Serverless: `Frankfurt (eu-central-1)` and `Singapore (ap-southeast-1)` @@ -70,7 +70,7 @@ ALTER TABLE stock_items ADD FULLTEXT INDEX (title) WITH PARSER MULTILINGUAL ADD_ The following parsers are accepted in the `WITH PARSER ` clause: -- `STANDARD`: fast, works for English contents, splitting words by spaces and punctuation. +- `STANDARD`: fast, works for English content, splitting words by spaces and punctuation. - `MULTILINGUAL`: supports multiple languages, including English, Chinese, Japanese, and Korean. @@ -200,21 +200,11 @@ WHERE t.author_id IN ## See also -- [Hybrid Search](/tidb-cloud/vector-search-hybrid-search.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) ## Feedback & help Full-text search is still in the early stages with limited accessibility. If you would like to try full-text search in a region that is not yet available, or if you have feedback or need help, feel free to reach out to us: - - -- [Join our Discord](https://discord.gg/zcqexutz2R) - - - - - -- [Join our Discord](https://discord.gg/zcqexutz2R) -- [Visit our Support Portal](https://tidb.support.pingcap.com/) - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) diff --git a/tidb-cloud/vector-search-hybrid-search.md b/ai/guides/vector-search-hybrid-search.md similarity index 61% rename from tidb-cloud/vector-search-hybrid-search.md rename to ai/guides/vector-search-hybrid-search.md index fda60d2f5aff3..af91b9bfbf6c7 100644 --- a/tidb-cloud/vector-search-hybrid-search.md +++ b/ai/guides/vector-search-hybrid-search.md @@ -1,7 +1,7 @@ --- title: Hybrid Search summary: Use full-text search and vector search together to improve the retrieval quality. -aliases: ['/tidb/stable/vector-search-hybrid-search'] +aliases: ['/tidb/stable/vector-search-hybrid-search/','/tidbcloud/vector-search-hybrid-search/'] --- # Hybrid Search @@ -19,7 +19,7 @@ This tutorial demonstrates how to use hybrid search in TiDB with the [pytidb](ht ## Prerequisites -Hybrid search relies on both [full-text search](/tidb-cloud/vector-search-full-text-search-python.md) and vector search. Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, Full-text search is only available for the following product option and regions: +Full-text search is still in the early stages, and we are continuously rolling it out to more customers. Currently, full-text search is only available on {{{ .starter }}} and {{{ .essential }}} in the following regions: - TiDB Cloud Serverless: `Frankfurt (eu-central-1)` and `Singapore (ap-southeast-1)` @@ -53,7 +53,7 @@ db = TiDBClient.connect( ) ``` -You can get these connection parameters from the [TiDB Cloud console](https://tidbcloud.com): +You can get these connection parameters from the [TiDB Cloud console](https://tidbcloud.com) as follows: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. @@ -143,25 +143,105 @@ df = ( For a complete example, see [pytidb hybrid search demo](https://github.com/pingcap/pytidb/tree/main/examples/hybrid_search). -## See also +## Fusion methods -- [pytidb Python SDK Documentation](https://github.com/pingcap/pytidb) +Fusion methods combine results from vector (semantic) and full-text (keyword) searches into a single, unified ranking. This ensures that the final results leverage both semantic relevance and keyword matching. -- [Full-Text Search with Python](/tidb-cloud/vector-search-full-text-search-python.md) +`pytidb` supports two fusion methods: -## Feedback & help +- `rrf`: Reciprocal Rank Fusion (default) +- `weighted`: Weighted Score Fusion -Full-text search is still in the early stages with limited accessibility. If you would like to try full-text search in a region that is not yet available, or if you have feedback or need help, feel free to reach out to us: +You can select the fusion method that best fits your use case to optimize hybrid search results. + +### Reciprocal Rank Fusion (RRF) + +Reciprocal Rank Fusion (RRF) is an algorithm that evaluates search results by leveraging the rank of documents in multiple result sets. + +For more details, see the [RRF paper](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf). + +Enable reciprocal rank fusion by specifying the `method` parameter as `"rrf"` in the `.fusion()` method. + +```python +results = ( + table.search( + "AI database", search_type="hybrid" + ) + .fusion(method="rrf") + .limit(3) + .to_list() +) +``` + +Parameters: + +- `k`: A constant (default: 60) to prevent division by zero and control the impact of high-ranked documents. + +### Weighted Score Fusion + +Weighted Score Fusion combines vector search and full-text search scores using a weighted sum: + +```python +final_score = vs_weight * vector_score + fts_weight * fulltext_score +``` + +Enable weighted score fusion by specifying the `method` parameter as `"weighted"` in the `.fusion()` method. + +For example, to give more weight to vector search, set the `vs_weight` parameter to 0.7 and the `fts_weight` parameter to 0.3: - +```python +results = ( + table.search( + "AI database", search_type="hybrid" + ) + .fusion(method="weighted", vs_weight=0.7, fts_weight=0.3) + .limit(3) + .to_list() +) +``` + +Parameters: + +- `vs_weight`: The weight of the vector search score. +- `fts_weight`: The weight of the full-text search score. + +## Rerank method -- [Join our Discord](https://discord.gg/zcqexutz2R) +Hybrid search also supports reranking using reranker-specific models. - +Use the `rerank()` method to specify a reranker that sorts search results by relevance between the query and the documents. - +**Example: Using Jina AI Reranker to rerank the hybrid search results** -- [Join our Discord](https://discord.gg/zcqexutz2R) -- [Visit our Support Portal](https://tidb.support.pingcap.com/) +```python +reranker = Reranker( + # Use the `jina-reranker-m0` model + model_name="jina_ai/jina-reranker-m0", + api_key="{your-jinaai-api-key}" +) + +results = ( + table.search( + "AI database", search_type="hybrid" + ) + .fusion(method="rrf", k=60) + .rerank(reranker, "text") + .limit(3) + .to_list() +) +``` + +To check other reranker models, see [Reranking](/ai/guides/reranking.md). + +## See also + +- [pytidb Python SDK Documentation](https://github.com/pingcap/pytidb) + +- [Full-Text Search with Python](/ai/guides/vector-search-full-text-search-python.md) + +## Feedback & help + +Full-text search is still in the early stages with limited accessibility. If you would like to try full-text search in a region that is not yet available, or if you have feedback or need help, feel free to reach out to us: - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) diff --git a/ai/guides/vector-search.md b/ai/guides/vector-search.md new file mode 100644 index 0000000000000..2c14019a5e680 --- /dev/null +++ b/ai/guides/vector-search.md @@ -0,0 +1,506 @@ +--- +title: Vector Search +summary: Learn how to use vector search in your application. +--- + +# Vector Search + +Vector search uses semantic similarity to help you find the most relevant records, even if your query does not explicitly include all the keywords. + +> **Note:** +> +> For a complete example of vector search, see [Vector Search Example](/ai/examples/vector-search-with-pytidb.md). + +## Basic usage + +This section shows how to use vector search in your application in just a few steps. Before you start, you need to [connect to the database](/ai/guides/connect.md). + +### Step 1. Create a table with a vector field + + +
+ +You can use `client.create_table()` to create a table and `VectorField` to define a vector field. + +The following example creates a `documents` table with four columns: + +- `id`: The primary key of the table. +- `text`: The text content of the document. +- `text_vec`: The vector embedding of the text content. +- `meta`: The metadata of the document, which is a JSON object. + +```python hl_lines="9" +from pytidb.schema import TableModel, Field, VectorField +from pytidb.datatype import TEXT, JSON + +class Document(TableModel): + __tablename__ = "documents" + + id: int = Field(primary_key=True) + text: str = Field(sa_type=TEXT) + text_vec: list[float] = VectorField(dimensions=3) + meta: dict = Field(sa_type=JSON, default_factory=dict) + +table = client.create_table(schema=Document, if_exists="overwrite") +``` + +The `VectorField` class accepts the following parameters: + +- `dimensions`: The vector dimension. Once specified, only vectors with this exact dimension can be stored in this field. +- `index`: Whether to create a [vector index](https://docs.pingcap.com/tidbcloud/vector-search-index/) for the vector field. Defaults to `True`. +- `distance_metric`: The distance metric to use for the vector index. Supported values: + - `DistanceMetric.COSINE` (default): Cosine distance metric, suitable for measuring text similarity + - `DistanceMetric.L2`: L2 distance metric, suitable for capturing overall difference + +
+
+ +Use the `CREATE TABLE` statement to create a table and use the `VECTOR` type to define a vector column. + +```sql hl_lines="4 5" +CREATE TABLE documents ( + id INT PRIMARY KEY, + text TEXT, + text_vec VECTOR(3), + VECTOR INDEX `vec_idx_text_vec`((VEC_COSINE_DISTANCE(`text_vec`))) +); +``` + +In this example: + +- The `text_vec` column is defined as `VECTOR(3)`, so vectors stored in this column must have 3 dimensions. +- A vector index is created using the `VEC_COSINE_DISTANCE` function to optimize vector search performance. + +TiDB supports two distance functions for vector indexes: + +- `VEC_COSINE_DISTANCE`: Calculates the cosine distance between two vectors +- `VEC_L2_DISTANCE`: Calculates L2 distance (Euclidean distance) between two vectors + +
+
+ +### Step 2. Insert vector data into the table + +For demonstration, insert some text and their corresponding embeddings into the table. + +The following example inserts three documents, each with a simple 3-dimensional vector embedding: + +- `dog` with the vector embedding `[1, 2, 1]` +- `fish` with the vector embedding `[1, 2, 4]` +- `tree` with the vector embedding `[1, 0, 0]` + + +
+ +```python +table.bulk_insert([ + Document(text="dog", text_vec=[1,2,1], meta={"category": "animal"}), + Document(text="fish", text_vec=[1,2,4], meta={"category": "animal"}), + Document(text="tree", text_vec=[1,0,0], meta={"category": "plant"}), +]) +``` + +
+
+ +```sql +INSERT INTO documents (id, text, text_vec, meta) +VALUES + (1, 'dog', '[1,2,1]', '{"category": "animal"}'), + (2, 'fish', '[1,2,4]', '{"category": "animal"}'), + (3, 'tree', '[1,0,0]', '{"category": "plant"}'); +``` + +> **Note:** +> +> In real-world applications, embeddings are usually generated by an [embedding model](/ai/concepts/vector-search-overview.md#embedding-model). + +For convenience, pytidb provides an auto embedding feature that can automatically generate vector embeddings for your text fields when you insert, update, or search—no manual processing needed. + +For details, see the [Auto Embedding](/ai/guides/auto-embedding.md) guide. + +
+
+ +### Step 3. Perform vector search + +Vector search uses vector distance metrics to measure the similarity and relevance between vectors. The closer the distance, the more relevant the record. To find the most relevant documents in the table, you need to specify a query vector. + +The following example assumes the query is `A swimming animal` and its vector embedding is `[1, 2, 3]`. + + +
+ +Use the `table.search()` method to perform vector search. It uses `search_mode="vector"` by default. + +```python +table.search([1, 2, 3]).limit(3).to_list() +``` + +```python title="Execution result" +[ + {"id": 2, "text": "fish", "text_vec": [1,2,4], "_distance": 0.00853986601633272}, + {"id": 1, "text": "dog", "text_vec": [1,2,1], "_distance": 0.12712843905603044}, + {"id": 3, "text": "tree", "text_vec": [1,0,0], "_distance": 0.7327387580875756}, +] +``` + +The result shows that the most relevant document is `fish` with a distance of `0.00853986601633272`. + +
+
+ +Use the `ORDER BY (, ) LIMIT ` clause in a `SELECT` statement to get the `n` nearest neighbors of a query vector. + +The following example uses the `vec_cosine_distance` function to calculate the cosine distance between the vectors stored in the `text_vec` column and the provided query vector `[1, 2, 3]`. + +```sql +SELECT id, text, vec_cosine_distance(text_vec, '[1,2,3]') AS distance +FROM documents +ORDER BY distance +LIMIT 3; +``` + +```plain title="Execution result" ++----+----------+---------------------+ +| id | text | distance | ++----+----------+---------------------+ +| 2 | fish | 0.00853986601633272 | +| 1 | dog | 0.12712843905603044 | +| 3 | tree | 0.7327387580875756 | ++----+----------+---------------------+ +3 rows in set (0.15 sec) +``` + +The result shows that the most relevant document is `fish` with a distance of `0.00853986601633272`. + +
+
+ +## Distance metrics + +Distance metrics are a measure of the similarity between a pair of vectors. Currently, TiDB supports the following distance metrics: + + +
+ +The `table.search()` API supports the following distance metrics: + +| Metric Name | Description | Best For | +|--------------------------|----------------------------------------------------------------|----------| +| `DistanceMetric.COSINE` | Calculates the cosine distance between two vectors (default). Measures the angle between vectors. | Text embeddings, semantic search | +| `DistanceMetric.L2` | Calculates the L2 distance (Euclidean distance) between two vectors. Measures the straight-line distance. | Image features | + +To change the distance metric used for vector search, use the `.distance_metric()` method. + +**Example: Use the L2 distance metric** + +```python +from pytidb.schema import DistanceMetric + +results = ( + table.search([1, 2, 3]) + .distance_metric(DistanceMetric.L2) + .limit(10) + .to_list() +) +``` + +
+
+ +In SQL, you can use the following built-in functions to calculate vector distances directly in your queries: + +| Function Name | Description | +|-------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------| +| [`VEC_L2_DISTANCE`](https://docs.pingcap.com/tidbcloud/vector-search-functions-and-operators/#vec_l2_distance) | Calculates L2 distance (Euclidean distance) between two vectors | +| [`VEC_COSINE_DISTANCE`](https://docs.pingcap.com/tidbcloud/vector-search-functions-and-operators/#vec_cosine_distance) | Calculates the cosine distance between two vectors | +| [`VEC_NEGATIVE_INNER_PRODUCT`](https://docs.pingcap.com/tidbcloud/vector-search-functions-and-operators/#vec_negative_inner_product) | Calculates the negative of the inner product between two vectors| +| [`VEC_L1_DISTANCE`](https://docs.pingcap.com/tidbcloud/vector-search-functions-and-operators/#vec_l1_distance) | Calculates L1 distance (Manhattan distance) between two vectors | + +
+
+ +## Distance threshold + +The `table.search()` API allows you to set a distance threshold to control the similarity of the returned results. By specifying this threshold, you can exclude less similar vectors and return only those that meet your relevance criteria. + + +
+ +Use the `.distance_threshold()` method to set a maximum distance for search results. Only records with a distance less than the threshold are returned. + +**Example: Only return documents with a distance less than 0.5** + +```python +results = table.search([1, 2, 3]).distance_threshold(0.5).limit(10).to_list() +``` + +
+
+ +In SQL, use the `HAVING` clause with a distance function to filter results by distance: + +**Example: Only return documents with a distance less than 0.1** + +```sql +SELECT id, text, vec_cosine_distance(text_vec, '[1,2,3]') AS distance +FROM documents +HAVING distance < 0.1 +ORDER BY distance +LIMIT 10; +``` + +
+
+ +## Distance range + +The `table.search()` API also supports specifying a distance range to further refine the results. + + +
+ +Use the `.distance_range()` method to set both minimum and maximum distance values. Only records with a distance within this range are returned. + +**Example: Only return documents with a distance between 0.01 and 0.05** + +```python +results = table.search([1, 2, 3]).distance_range(0.01, 0.05).limit(10).to_list() +``` + +
+
+ +To specify a distance range in SQL, use `BETWEEN` or other comparison operators in the `HAVING` clause: + +**Example: Only return documents with a distance between 0.01 and 0.05** + +```sql +SELECT id, text, vec_l2_distance(text_vec, '[1,2,3]') AS distance +FROM documents +HAVING distance BETWEEN 0.01 AND 0.05 +ORDER BY distance +LIMIT 10; +``` + +
+
+ +## Metadata filtering + +As a relational database, TiDB supports a rich set of [SQL operators](https://docs.pingcap.com/tidbcloud/operators/) and allows flexible combinations of filtering conditions. + +For vector search in TiDB, you can apply metadata filtering on scalar fields (for example, integers and strings) or JSON fields. + +Typically, there are two modes for vector search combined with metadata filtering: + +- **Post-filtering**: TiDB first performs vector search to retrieve the top-k candidates from the entire vector space, then applies filters to that candidate set. The vector search stage typically uses a vector index for efficiency. +- **Pre-filtering**: TiDB applies filters before vector search. If the filter is highly selective and the filtered field has a scalar index, this mode can reduce the search space and improve performance. + +### Post-filtering + + +
+ +Use the `.filter()` method with a filter dictionary to apply filtering to vector search. + +By default, the `table.search()` API uses post-filtering mode to maximize search performance with the vector index. + +**Example: Vector search with post-filtering** + +```python +results = ( + table.search([1, 2, 3]) + # The `meta` is a JSON field, and its value is a JSON object + # like {"category": "animal"} + .filter({"meta.category": "animal"}) + .num_candidate(50) + .limit(10) + .to_list() +) +``` + +> **Note:** +> +> When using a vector index, if the final `limit` is very small, the accuracy of the results might decrease. You can use the `.num_candidate()` method to control how many candidates to retrieve from the vector index during the vector search phase, without changing the `limit` parameter. + +> A higher `num_candidate` value generally improves recall but might reduce query performance. Adjust this value based on your dataset and accuracy requirements. + +
+
+ +Currently, vector indexes are only effective in strict ANN (Approximate Nearest Neighbor) queries, such as: + +```sql +SELECT * FROM ORDER BY () LIMIT +``` + +In other words, you cannot use a `WHERE` clause together with a vector index in the same query. + +If you need to combine vector search with additional filtering conditions, you can use the post-filtering pattern. In this approach, the ANN query will be divided into two parts: + +- The inner query performs the vector search using the vector index. +- The outer query applies the `WHERE` condition to filter the results. + +```sql hl_lines="8" +SELECT * +FROM ( + SELECT id, text, meta, vec_cosine_distance(text_vec, '[1,2,3]') AS distance + FROM documents + ORDER BY distance + LIMIT 50 +) candidates +WHERE meta->>'$.category' = 'animal' +ORDER BY distance +LIMIT 10; +``` + +> **Note:** +> +> The post-filtering pattern might lead to empty results. For example, the inner query might retrieve the top 50 most similar records, but none of them match the `WHERE` condition. +> +> To mitigate this, you can increase the `LIMIT` value (e.g., 50) in the **inner query** to fetch more candidates, improving the chances of returning enough valid results after filtering. + +For supported SQL operators, see [Operators](https://docs.pingcap.com/tidbcloud/operators/) in the TiDB Cloud documentation. + + + + +### Pre-filtering + + +
+ +To enable pre-filtering, set `prefilter=True` in the `.filter()` method. + +**Example: Vector search with pre-filtering** + +```python +results = ( + table.search([1, 2, 3]) + .filter({"meta.category": "animal"}, prefilter=True) + .limit(10) + .to_list() +) +``` + +For supported filter operators, see [Filtering](/ai/guides/filtering.md). + +
+
+ +In SQL, use the `->>` operator or `JSON_EXTRACT` to access JSON fields in the `WHERE` clause: + +```sql +SELECT id, text, meta, vec_cosine_distance(text_vec, '[1,2,3]') AS distance +FROM documents +WHERE meta->>'$.category' = 'animal' +ORDER BY distance +LIMIT 10; +``` + +For supported SQL operators, see [Operators](https://docs.pingcap.com/tidbcloud/operators/) in the TiDB Cloud documentation. + +
+
+ +## Multiple vector fields + +TiDB supports defining multiple vector columns in a single table, allowing you to store and search different types of vector embeddings. + +For example, you can store both text embeddings and image embeddings in the same table, which is convenient for managing multimodal data. + + +
+ +You can define multiple vector fields in the schema and perform vector search on the specified vector field by using the `.vector_column()` method. + +**Example: Specify the vector field to search on** + +```python hl_lines="6 8 17" +# Create a table with multiple vector fields +class RichTextDocument(TableModel): + __tablename__ = "rich_text_documents" + id: int = Field(primary_key=True) + text: str = Field(sa_type=TEXT) + text_vec: list[float] = VectorField(dimensions=3) + image_url: str + image_vec: list[float] = VectorField(dimensions=3) + +table = client.create_table(schema=RichTextDocument, if_exists="overwrite") + +# Insert sample data ... + +# Search using image vector field +results = ( + table.search([1, 2, 3]) + .vector_column("image_vec") + .distance_metric(DistanceMetric.COSINE) + .limit(10) + .to_list() +) +``` + +
+
+ +You can create multiple vector columns in a table and search them using suitable distance functions: + +```sql +-- Create a table with multiple vector fields +CREATE TABLE rich_text_documents ( + id BIGINT PRIMARY KEY, + text TEXT, + text_vec VECTOR(3), + image_url VARCHAR(255), + image_vec VECTOR(3) +); + +-- Insert sample data ... + +-- Search using text vector +SELECT id, image_url, vec_l2_distance(image_vec, '[4,5,6]') AS image_distance +FROM rich_text_documents +ORDER BY image_distance +LIMIT 10; +``` + +
+
+ +## Output search results + +The `table.search()` API lets you convert search results into several common data processing formats: + +### As SQLAlchemy result rows + +To work with raw SQLAlchemy result rows, use: + +```python +table.search([1, 2, 3]).limit(10).to_rows() +``` + +### As a list of Python dictionaries + +For easier manipulation in Python, convert the results to a list of dictionaries: + +```python +table.search([1, 2, 3]).limit(10).to_list() +``` + +### As a pandas DataFrame + +To display results in a user-friendly table—especially useful in Jupyter notebooks—convert them to a pandas DataFrame: + +```python +table.search([1, 2, 3]).limit(10).to_pandas() +``` + +### As a list of Pydantic model instances + +The `TableModel` class can also be used as a Pydantic model to represent data entities. To work with results as Pydantic model instances, use: + +```python +table.search([1, 2, 3]).limit(10).to_pydantic() +``` \ No newline at end of file diff --git a/ai/integrations/embedding-openai-compatible.md b/ai/integrations/embedding-openai-compatible.md new file mode 100644 index 0000000000000..d89e940586cf5 --- /dev/null +++ b/ai/integrations/embedding-openai-compatible.md @@ -0,0 +1,131 @@ +--- +title: OpenAI-Compatible Embeddings +summary: Learn how to integrate TiDB Vector Search with an OpenAI-compatible embedding model to store embeddings and perform semantic search. +--- + +# OpenAI-Compatible Embeddings + +This tutorial demonstrates how to use OpenAI-compatible embedding services to generate text embeddings, store them in TiDB, and perform semantic search. + +> **Note:** +> +> Currently, [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## OpenAI-compatible embedding services + +Because the OpenAI Embedding API is widely used, many providers offer compatible APIs, such as: + +- [Ollama](https://ollama.com/) +- [vLLM](https://vllm.ai/) + +The TiDB Python SDK [pytidb](https://github.com/pingcap/pytidb) provides the `EmbeddingFunction` class to integrate with OpenAI-compatible embedding services. + +## Usage example + +This example shows how to create a vector table, insert documents, and perform similarity search using an OpenAI-compatible embedding model. + +### Step 1: Connect to the database + +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +### Step 2: Define the embedding function + +To integrate with an OpenAI-compatible embedding service, initialize the `EmbeddingFunction` class and set the `model_name` parameter with the `openai/` prefix. + +```python +from pytidb.embeddings import EmbeddingFunction + +openai_like_embed = EmbeddingFunction( + model_name="openai/{model_name}", + api_base="{your-api-base}", + api_key="{your-api-key}", +) +``` + +The parameters are: + +- `model_name`: Specifies the model to use. Use the format `openai/{model_name}`. +- `api_base`: The base URL of your OpenAI-compatible embedding API service. +- `api_key`: The API key used to authenticate with the embedding API service. + +**Example: Use Ollama with the `nomic-embed-text` model** + +```python +openai_like_embed = EmbeddingFunction( + model_name="openai/nomic-embed-text", + api_base="http://localhost:11434/v1", +) +``` + +**Example: Use vLLM with the `intfloat/e5-mistral-7b-instruct` model** + +```python +openai_like_embed = EmbeddingFunction( + model_name="openai/intfloat/e5-mistral-7b-instruct", + api_base="http://localhost:8000/v1" +) +``` + +### Step 3: Create a vector table + +Create a table with a vector field that uses Ollama and the `nomic-embed-text` model. + +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +openai_like_embed = EmbeddingFunction( + model_name="openai/nomic-embed-text", + api_base="{your-api-base}", +) + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = openai_like_embed.VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +### Step 4: Insert data into the table + +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Java: Object-oriented language for cross-platform development."), + Document(id=2, content="Java coffee: Bold Indonesian beans with low acidity."), + Document(id=3, content="Java island: Densely populated, home to Jakarta."), + Document(id=4, content="Java's syntax is used in Android apps."), + Document(id=5, content="Dark roast Java beans enhance espresso blends."), +] +table.bulk_insert(documents) +``` + +With [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) enabled, TiDB automatically generates vector values when you insert data. + +### Step 5: Search for similar documents + +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How to start learning Java programming?") \ + .limit(2) \ + .to_list() +print(results) +``` + +With [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) enabled, TiDB automatically generates embeddings for query text during vector search. diff --git a/ai/integrations/tidb-mcp-claude-code.md b/ai/integrations/tidb-mcp-claude-code.md new file mode 100644 index 0000000000000..9db834959194c --- /dev/null +++ b/ai/integrations/tidb-mcp-claude-code.md @@ -0,0 +1,74 @@ +--- +title: Get started with Claude Code and TiDB MCP Server +summary: This guide shows you how to configure the TiDB MCP Server in Claude Code. +--- + +# Get Started with Claude Code and TiDB MCP Server + +This guide shows how to configure the TiDB MCP Server in Claude Code. + +## Prerequisites + +Before you begin, ensure you have the following: + +- **Claude Code**: Install it from [claude.com](https://claude.com/product/claude-code). +- **Python (>=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Connect to TiDB Cloud Starter (recommended) + +Use the TiDB Cloud console to generate a ready-to-run Claude Code command. + +1. Go to the [Clusters](https://tidbcloud.com/console/clusters) page, select your cluster, and then click **Use with AI Tools** in the upper-right corner. +2. In the **Access `your_cluster_name` with AI tools** dialog, select the **Branch** and **Database** that Claude Code should access. +3. Review the **Prerequisites** list in the dialog and install any missing dependencies. +4. Configure the root password: + + - If you have not set a password yet, click **Generate Password** and store it in a secure location (it is shown only once). + - If a password already exists, enter it in the **Enter the password for easy setup** field. + - If you forget the password, click **Reset password** in the **Prerequisites** section to generate a new one. + +5. Select the **Claude Code** tab, copy the setup command, and run it in your terminal. + +## Manual configuration (any TiDB cluster) + +If you prefer manual setup, use one of the following methods and replace the placeholders with your connection parameters. + +### Method 1: CLI command + +```bash +claude mcp add --transport stdio TiDB \ + --env TIDB_HOST='' \ + --env TIDB_PORT= \ + --env TIDB_USERNAME='' \ + --env TIDB_PASSWORD='' \ + --env TIDB_DATABASE='' \ + -- uvx --from 'pytidb[mcp]' 'tidb-mcp-server' +``` + +### Method 2: Project config file + +Add the following configuration to your project-level `.mcp.json` file. For details, see the [Claude Code MCP documentation](https://code.claude.com/docs/en/mcp#project-scope). + +```json +{ + "mcpServers": { + "TiDB": { + "type": "stdio", + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } +} +``` + +## See also + +- [TiDB MCP Server](/ai/integrations/tidb-mcp-server.md) diff --git a/ai/integrations/tidb-mcp-claude-desktop.md b/ai/integrations/tidb-mcp-claude-desktop.md new file mode 100644 index 0000000000000..5845b7b67bd5b --- /dev/null +++ b/ai/integrations/tidb-mcp-claude-desktop.md @@ -0,0 +1,48 @@ +--- +title: Get started with Claude Desktop and TiDB MCP Server +summary: This guide shows you how to configure the TiDB MCP Server in Claude Desktop. +--- + +# Get started with Claude Desktop and TiDB MCP Server + +This guide shows how to configure the TiDB MCP Server in Claude Desktop. + +## Prerequisites + +Before you begin, ensure you have the following: + +- **Claude Desktop**: Download and install Claude Desktop from [claude.ai](https://claude.ai/download). +- **Python (>=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Setup steps + +Follow the steps below to set up the TiDB MCP Server in Claude Desktop: + +1. Open the **Settings** dialog. +2. Click the **Developers** tab in the dialog. +3. Click the **Edit Config** button to open the MCP config file `claude_desktop_config.json`. +4. Copy the following configuration into the `claude_desktop_config.json` file. + + ```json + { + "mcpServers": { + "TiDB": { + "command": "uvx --from pytidb[mcp] tidb-mcp-server", + "env": { + "TIDB_HOST": "localhost", + "TIDB_PORT": "4000", + "TIDB_USERNAME": "root", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "test" + } + } + } + } + ``` + +5. Go to the [TiDB Cloud cluster page](https://tidbcloud.com/console/clusters) and navigate to the cluster you want to connect to. +6. Click **Connect** in the upper-right corner to get the connection parameters, and replace the `TIDB_HOST`, `TIDB_PORT`, `TIDB_USERNAME`, `TIDB_PASSWORD`, and `TIDB_DATABASE` values with your own. +7. Restart Claude Desktop. + +For more details, see [how to configure the MCP server in Claude Desktop](https://modelcontextprotocol.io/quickstart/user). \ No newline at end of file diff --git a/ai/integrations/tidb-mcp-cursor.md b/ai/integrations/tidb-mcp-cursor.md new file mode 100644 index 0000000000000..ad27fe67ffb08 --- /dev/null +++ b/ai/integrations/tidb-mcp-cursor.md @@ -0,0 +1,66 @@ +--- +title: Get started with Cursor and TiDB MCP Server +summary: This guide shows you how to configure the TiDB MCP Server in the Cursor editor. +--- + +# Get Started with Cursor and TiDB MCP Server + +This guide shows how to configure the TiDB MCP Server in the Cursor editor. + +For one-click installation, click the following button: + +

Install TiDB MCP Server

+ +## Prerequisites + +Before you begin, ensure you have the following: + +- **Cursor**: Download and install Cursor from [cursor.com](https://cursor.com). +- **Python (>=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Connect to TiDB Cloud Starter (recommended) + +Use the TiDB Cloud console to create a Cursor configuration with your cluster credentials. + +1. Go to the [Clusters](https://tidbcloud.com/console/clusters) page, select your cluster, and then click **Use with AI Tools** in the upper-right corner. +2. In the **Access `your_cluster_name` with AI tools** dialog, select the **Branch** and **Database** that Cursor should access. +3. Review the **Prerequisites** list in the dialog and install any missing dependencies. +4. Configure the root password: + + - If you have not set a password yet, click **Generate Password** and store it in a secure location (it is shown only once). + - If a password already exists, enter it in the **Enter the password for easy setup** field. + - If you forget the password, click **Reset password** in the **Prerequisites** section to generate a new one. + +5. Select the **Cursor** tab, click **Add to Cursor**, and then click **Install** in Cursor. + +## Manual configuration (any TiDB cluster) + +If you prefer manual setup, add the following configuration to your `.cursor/mcp.json` file and replace the placeholders with your connection parameters: + +```json +{ + "mcpServers": { + "TiDB": { + "command": "uvx --from pytidb[mcp] tidb-mcp-server", + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } +} +``` + +For more details, see the [Model Context Protocol documentation](https://docs.cursor.com/context/model-context-protocol#configuring-mcp-servers). + +## Troubleshooting + +If you encounter issues installing the TiDB MCP Server, check the MCP logs in Cursor. + +1. Click **View** > **Output** in the main menu at the top of the editor. +2. Select **MCP** from the dropdown menu in the **Output** panel. +3. If you see errors like `[error] Could not start MCP server tidb-mcp-server: Error: spawn uvx ENOENT`, it means the `uvx` command might not exist in your system `$PATH` environment variable. For macOS users, you can install `uvx` by running `brew install uv`. diff --git a/ai/integrations/tidb-mcp-server.md b/ai/integrations/tidb-mcp-server.md new file mode 100644 index 0000000000000..241d8b48aea7d --- /dev/null +++ b/ai/integrations/tidb-mcp-server.md @@ -0,0 +1,163 @@ +--- +title: TiDB MCP Server +summary: Manage your TiDB databases using natural language instructions with the TiDB MCP Server. +--- + +# TiDB MCP Server + +TiDB MCP Server is an open-source tool that lets you interact with TiDB databases using natural language instructions. + +## Understanding MCP and TiDB MCP Server + +The [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) is a protocol that standardizes communication between LLMs and external tools. + +MCP adopts a client-server architecture, allowing a host application to connect to multiple external servers: + +- **Hosts**: AI-powered applications, such as Claude Desktop or IDEs like Cursor, that initiate connections to MCP servers. + +- **Clients**: Components embedded within host applications that establish one-to-one connections with individual MCP servers. + +- **Servers**: External services, such as the **TiDB MCP Server**, which provide tools, context, and prompts to clients for interacting with external systems. + +The **TiDB MCP Server** is an MCP-compatible server that provides tools and context for MCP clients to interact with TiDB databases. + +## Prerequisites + +Before you begin, ensure you have the following: + +- **An MCP-compatible client**: For example, [Cursor](/ai/integrations/tidb-mcp-cursor.md) or [Claude Desktop](/ai/integrations/tidb-mcp-claude-desktop.md). +- **Python (>=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Supported MCP Clients + +Refer to the following guides for detailed examples of using the TiDB MCP Server with specific MCP clients: + +- [Cursor](/ai/integrations/tidb-mcp-cursor.md) +- [Claude Desktop](/ai/integrations/tidb-mcp-claude-desktop.md) + +If the preceding list does not include your MCP client, follow the setup steps below. + +## Setup steps + +The TiDB MCP Server supports two modes to integrate with MCP clients: + +- Standard Input/Output (STDIO) mode (default) +- Server-Sent Events (SSE) mode + +TiDB MCP Server uses STDIO mode by default, so you do not need to start a standalone server in advance. + +You can choose one of the modes to set up the TiDB MCP Server in your MCP client. + +### STDIO Mode + +To set up the TiDB MCP Server in your MCP client using STDIO mode, take the following steps: + +1. Refer to your MCP client documentation to learn how to configure your MCP server. + +2. Go to your [TiDB Cloud clusters](https://tidbcloud.com/console/clusters) page and navigate to the overview page of your cluster. + +3. Click **Connect** on the cluster overview page to get the connection parameters. + +4. Configure the TiDB MCP Server with your connection parameters in the `mcpServers` section of your AI application’s configuration file. + + Example MCP configuration file: + + ```json + { + "mcpServers": { + "TiDB": { + "command": "uvx --from pytidb[mcp] tidb-mcp-server", + "env": { + "TIDB_HOST": "localhost", + "TIDB_PORT": "4000", + "TIDB_USERNAME": "root", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "test" + } + } + } + } + ``` + +### Server-Sent Events (SSE) Mode + +To set up the TiDB MCP Server in your MCP client using SSE mode, take the following steps: + +1. Refer to your MCP client documentation to learn how to configure an MCP server. + +2. Go to your [TiDB Cloud clusters](https://tidbcloud.com/console/clusters) page and select your cluster. + +3. Click **Connect** on the cluster page to get the connection parameters. + +4. Create a `.env` file with your connection parameters. + + Example `.env` file: + + ```bash + cat > .env <=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Connect to TiDB Cloud Starter (recommended) + +Use the TiDB Cloud console to generate a VS Code configuration. + +1. Go to the [Clusters](https://tidbcloud.com/console/clusters) page, select your cluster, and then click **Use with AI Tools** in the upper-right corner. +2. In the **Access `your_cluster_name` with AI tools** dialog, select the **Branch** and **Database** that VS Code should access. +3. Review the **Prerequisites** list in the dialog and install any missing dependencies. +4. Configure the root password: + + - If you have not set a password yet, click **Generate Password** and store it in a secure location (it is shown only once). + - If a password already exists, enter it in the **Enter the password for easy setup** field. + - If you forget the password, click **Reset password** in the **Prerequisites** section to generate a new one. + +5. Select the **VS Code** tab, click **Add to VS Code**, and then click **Install** in VS Code. + +## Manual configuration (any TiDB cluster) + +If you prefer manual setup, add the following configuration to your `.vscode/mcp.json` file and replace the placeholders with your connection parameters: + +```json +{ + "mcpServers": { + "TiDB": { + "type": "stdio", + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } +} +``` + +## See also + +- [TiDB MCP Server](/ai/integrations/tidb-mcp-server.md) diff --git a/ai/integrations/tidb-mcp-windsurf.md b/ai/integrations/tidb-mcp-windsurf.md new file mode 100644 index 0000000000000..d7eeaa8e713a3 --- /dev/null +++ b/ai/integrations/tidb-mcp-windsurf.md @@ -0,0 +1,58 @@ +--- +title: Get started with Windsurf and TiDB MCP Server +summary: This guide shows you how to configure the TiDB MCP Server in Windsurf. +--- + +# Get Started with Windsurf and TiDB MCP Server + +This guide shows how to configure the TiDB MCP Server in Windsurf. + +## Prerequisites + +Before you begin, ensure you have the following: + +- **Windsurf**: Download and install Windsurf from [windsurf.com](https://windsurf.com). +- **Python (>=3.10) and uv**: Ensure Python (3.10 or later) and `uv` are installed. Follow the [installation guide](https://docs.astral.sh/uv/getting-started/installation/) to install `uv`. +- **A TiDB Cloud Starter cluster**: You can create a free TiDB cluster on [TiDB Cloud](https://tidbcloud.com/free-trial). + +## Connect to TiDB Cloud Starter (recommended) + +Use the TiDB Cloud console to gather the connection details, then update Windsurf's MCP configuration. + +1. Go to the [Clusters](https://tidbcloud.com/console/clusters) page, select your cluster, and then click **Use with AI Tools** in the upper-right corner. +2. In the **Access `your_cluster_name` with AI tools** dialog, select the **Branch** and **Database** that Windsurf should access. +3. Review the **Prerequisites** list in the dialog and install any missing dependencies. +4. Configure the root password: + + - If you have not set a password yet, click **Generate Password** and store it in a secure location (it is shown only once). + - If a password already exists, enter it in the **Enter the password for easy setup** field. + - If you forget the password, click **Reset password** in the **Prerequisites** section to generate a new one. + +5. Select the **Windsurf** tab and copy the provided connection values. +6. Update your `mcp_config.json` file using the copied values. For more information, see the [Windsurf MCP documentation](https://docs.windsurf.com/windsurf/cascade/mcp#adding-a-new-mcp-plugin). + +## Manual configuration (any TiDB cluster) + +If you prefer manual setup, update your `mcp_config.json` file as follows and replace the placeholders with your connection parameters: + +```json +{ + "mcpServers": { + "TiDB": { + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } +} +``` + +## See also + +- [TiDB MCP Server](/ai/integrations/tidb-mcp-server.md) diff --git a/ai/integrations/vector-search-auto-embedding-amazon-titan.md b/ai/integrations/vector-search-auto-embedding-amazon-titan.md new file mode 100644 index 0000000000000..5c002dbe8960f --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-amazon-titan.md @@ -0,0 +1,135 @@ +--- +title: Amazon Titan Embeddings +summary: Learn how to use Amazon Titan embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-amazon-titan/'] +--- + +# Amazon Titan Embeddings + +This document describes how to use Amazon Titan embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +TiDB Cloud provides the following [Amazon Titan embedding model](https://docs.aws.amazon.com/bedrock/latest/userguide/titan-embedding-models.html) natively. No API key is required. + +**Amazon Titan Text Embedding V2 model** + +- Name: `tidbcloud_free/amazon/titan-embed-text-v2` +- Dimensions: 1024 (default), 512, 256 +- Distance metric: Cosine, L2 +- Languages: English (100+ languages in preview) +- Typical use cases: RAG, document search, reranking, and classification +- Maximum input text tokens: 8,192 +- Maximum input text characters: 50,000 +- Price: Free +- Hosted by TiDB Cloud: ✅ +- Bring Your Own Key: ❌ + +For more information about this model, see [Amazon Bedrock documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/titan-embedding-models.html). + +## SQL usage example + +The following example shows how to use the Amazon Titan embedding model with Auto Embedding. + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "tidbcloud_free/amazon/titan-embed-text-v2", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +## Options + +You can specify the following options via the `additional_json_options` parameter of the `EMBED_TEXT()` function: + +- `normalize` (optional): whether to normalize the output embedding. Defaults to `true`. +- `dimensions` (optional): the number of dimensions of the output embedding. Supported values: `1024` (default), `512`, and `256`. + +**Example: Use an alternative dimension** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(512) GENERATED ALWAYS AS (EMBED_TEXT( + "tidbcloud_free/amazon/titan-embed-text-v2", + `content`, + '{"dimensions": 512}' + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-cohere.md b/ai/integrations/vector-search-auto-embedding-cohere.md new file mode 100644 index 0000000000000..e099fb5c5be5e --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-cohere.md @@ -0,0 +1,341 @@ +--- +title: Cohere Embeddings +summary: Learn how to use Cohere embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-cohere/'] +--- + +# Cohere Embeddings + +This document describes how to use Cohere embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +TiDB Cloud provides the following [Cohere](https://cohere.com/) embedding models natively. No API key is required. + +**Cohere Embed v3 model** + +- Name: `tidbcloud_free/cohere/embed-english-v3` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Languages: English +- Maximum input text tokens: 512 (about 4 characters per token) +- Maximum input text characters: 2,048 +- Price: Free +- Hosted by TiDB Cloud: ✅ `tidbcloud_free/cohere/embed-english-v3` +- Bring Your Own Key: ✅ `cohere/embed-english-v3.0` + +**Cohere Multilingual Embed v3 model** + +- Name: `tidbcloud_free/cohere/embed-multilingual-v3` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Languages: 100+ languages +- Maximum input text tokens: 512 (about 4 characters per token) +- Maximum input text characters: 2,048 +- Price: Free +- Hosted by TiDB Cloud: ✅ `tidbcloud_free/cohere/embed-multilingual-v3` +- Bring Your Own Key: ✅ `cohere/embed-multilingual-v3.0` + +Alternatively, all Cohere models are available for use with the `cohere/` prefix if you bring your own Cohere API key (BYOK). For example: + +**Cohere Embed v4 model** + +- Name: `cohere/embed-v4.0` +- Dimensions: 256, 512, 1024, 1536 (default) +- Distance metric: Cosine, L2 +- Maximum input text tokens: 128,000 +- Price: Charged by Cohere +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +For a full list of Cohere models, see [Cohere Documentation](https://docs.cohere.com/docs/cohere-embed). + +## SQL usage example (TiDB Cloud hosted) + +The following example shows how to use a Cohere embedding model hosted by TiDB Cloud with Auto Embedding. + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "tidbcloud_free/cohere/embed-multilingual-v3", + `content`, + '{"input_type": "search_document", "input_type@search": "search_query"}' + )) STORED +); +``` + +> **Note:** +> +> - For the Cohere embedding model, you must specify `input_type` in the `EMBED_TEXT()` function when defining the table. For example, `'{"input_type": "search_document", "input_type@search": "search_query"}'` means that `input_type` is set to `search_document` for data insertion and `search_query` is automatically applied during vector searches. +> - The `@search` suffix indicates that the field takes effect only during vector search queries, so you do not need to specify `input_type` again when writing a query. + +Insert and query data: + +```sql +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +## Options (TiDB Cloud hosted) + +Both the **Embed v3** and **Multilingual Embed v3** models support the following options, which you can specify via the `additional_json_options` parameter of the `EMBED_TEXT()` function. + +- `input_type` (required): prepends special tokens to indicate the purpose of the embedding. You must use the same input type consistently when generating embeddings for the same task, otherwise embeddings will be mapped to different semantic spaces and become incompatible. The only exception is semantic search, where documents are embedded with `search_document` and queries are embedded with `search_query`. + + - `search_document`: generates embeddings from documents to store in a vector database. + - `search_query`: generates embeddings from queries to search against stored embeddings in a vector database. + - `classification`: generates embeddings to be used as input for a text classifier. + - `clustering`: generates embeddings for clustering tasks. + +- `truncate` (optional): controls how the API handles inputs longer than the maximum token length. You can specify one of the following values: + + - `NONE` (default): returns an error when the input exceeds the maximum input token length. + - `START`: discards text from the beginning until the input fits. + - `END`: discards text from the end until the input fits. + +## Usage example (BYOK) + +This example shows how to create a vector table, insert documents, and run similarity search using Bring Your Own Key (BYOK) Cohere models. + +### Step 1: Connect to the database + + +
+ +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +
+
+ +```bash +mysql -h {gateway-region}.prod.aws.tidbcloud.com \ + -P 4000 \ + -u {prefix}.root \ + -p{password} \ + -D {database} +``` + +
+
+ +### Step 2: Configure the API key + +Create your API key from the [Cohere Dashboard](https://dashboard.cohere.com/api-keys) and bring your own key (BYOK) to use the embedding service. + + +
+ +Configure the API key for the Cohere embedding provider using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="cohere", + api_key="{your-cohere-api-key}", +) +``` + +
+
+ +Set the API key for the Cohere embedding provider using SQL: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_COHERE_API_KEY = "{your-cohere-api-key}"; +``` + +
+
+ +### Step 3: Create a vector table + +Create a table with a vector field that uses the `cohere/embed-v4.0` model to generate 1536-dimensional vectors (default dimension): + + +
+ +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="cohere/embed-v4.0" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +
+
+ +```sql +CREATE TABLE sample_documents ( + `id` INT PRIMARY KEY, + `content` TEXT, + `embedding` VECTOR(1536) GENERATED ALWAYS AS (EMBED_TEXT( + "cohere/embed-v4.0", + `content` + )) STORED +); +``` + +
+
+ +### Step 4: Insert data into the table + + +
+ +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Python: High-level programming language for data science and web development."), + Document(id=2, content="Python snake: Non-venomous constrictor found in tropical regions."), + Document(id=3, content="Python framework: Django and Flask are popular web frameworks."), + Document(id=4, content="Python libraries: NumPy and Pandas for data analysis."), + Document(id=5, content="Python ecosystem: Rich collection of packages and tools."), +] +table.bulk_insert(documents) +``` + +
+
+ +Insert data using the `INSERT INTO` statement: + +```sql +INSERT INTO sample_documents (id, content) +VALUES + (1, "Python: High-level programming language for data science and web development."), + (2, "Python snake: Non-venomous constrictor found in tropical regions."), + (3, "Python framework: Django and Flask are popular web frameworks."), + (4, "Python libraries: NumPy and Pandas for data analysis."), + (5, "Python ecosystem: Rich collection of packages and tools."); +``` + +
+
+ +### Step 5: Search for similar documents + + +
+ +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How to learn Python programming?") \ + .limit(2) \ + .to_list() +print(results) +``` + +
+
+ +Use the `VEC_EMBED_COSINE_DISTANCE` function to perform vector search based on cosine distance metric: + +```sql +SELECT + `id`, + `content`, + VEC_EMBED_COSINE_DISTANCE(embedding, "How to learn Python programming?") AS _distance +FROM sample_documents +ORDER BY _distance ASC +LIMIT 2; +``` + +
+
+ +## Options (BYOK) + +All [Cohere embedding options](https://docs.cohere.com/v2/reference/embed) are supported via the `additional_json_options` parameter of the `EMBED_TEXT()` function. + +**Example: Specify different `input_type` for search and insert operations** + +Use the `@search` suffix to indicate that the field takes effect only during vector search queries. + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "cohere/embed-v4.0", + `content`, + '{"input_type": "search_document", "input_type@search": "search_query"}' + )) STORED +); +``` + +**Example: Use an alternative dimension** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(512) GENERATED ALWAYS AS (EMBED_TEXT( + "cohere/embed-v4.0", + `content`, + '{"output_dimension": 512}' + )) STORED +); +``` + +For all available options, see [Cohere Documentation](https://docs.cohere.com/v2/reference/embed). + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-gemini.md b/ai/integrations/vector-search-auto-embedding-gemini.md new file mode 100644 index 0000000000000..bd451c18164f1 --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-gemini.md @@ -0,0 +1,287 @@ +--- +title: Gemini Embeddings +summary: Learn how to use Google Gemini embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-gemini/'] +--- + +# Gemini Embeddings + +This document describes how to use Gemini embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +All Gemini models are available for use with the `gemini/` prefix if you bring your own Gemini API key (BYOK). For example: + +**gemini-embedding-001** + +- Name: `gemini/gemini-embedding-001` +- Dimensions: 128–3072 (default: 3072) +- Distance metric: Cosine, L2 +- Maximum input text tokens: 2,048 +- Price: Charged by Google +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +For a full list of available models, see [Gemini documentation](https://ai.google.dev/gemini-api/docs/embeddings). + +## Usage example + +This example shows how to create a vector table, insert documents, and run similarity search using Google Gemini embedding models. + +### Step 1: Connect to the database + + +
+ +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +
+
+ +```bash +mysql -h {gateway-region}.prod.aws.tidbcloud.com \ + -P 4000 \ + -u {prefix}.root \ + -p{password} \ + -D {database} +``` + +
+
+ +### Step 2: Configure the API key + +Create your API key from the [Google AI Studio](https://makersuite.google.com/app/apikey) and bring your own key (BYOK) to use the embedding service. + + +
+ +Configure the API key for the Google Gemini embedding provider using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="google_gemini", + api_key="{your-google-api-key}", +) +``` + +
+
+ +Set the API key for the Google Gemini embedding provider using SQL: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_GEMINI_API_KEY = "{your-google-api-key}"; +``` + +
+
+ +### Step 3: Create a vector table + +Create a table with a vector field that uses the `gemini-embedding-001` model to generate 3072-dimensional vectors (default): + + +
+ +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="gemini-embedding-001" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +
+
+ +```sql +CREATE TABLE sample_documents ( + `id` INT PRIMARY KEY, + `content` TEXT, + `embedding` VECTOR(3072) GENERATED ALWAYS AS (EMBED_TEXT( + "gemini-embedding-001", + `content` + )) STORED +); +``` + +
+
+ +### Step 4: Insert data into the table + + +
+ +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Java: Object-oriented language for cross-platform development."), + Document(id=2, content="Java coffee: Bold Indonesian beans with low acidity."), + Document(id=3, content="Java island: Densely populated, home to Jakarta."), + Document(id=4, content="Java's syntax is used in Android apps."), + Document(id=5, content="Dark roast Java beans enhance espresso blends."), +] +table.bulk_insert(documents) +``` + +
+
+ +Insert data using the `INSERT INTO` statement: + +```sql +INSERT INTO sample_documents (id, content) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); +``` + +
+
+ +### Step 5: Search for similar documents + + +
+ +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How to start learning Java programming?") \ + .limit(2) \ + .to_list() +print(results) +``` + +
+
+ +Use the `VEC_EMBED_COSINE_DISTANCE` function to perform vector search based on cosine distance: + +```sql +SELECT + `id`, + `content`, + VEC_EMBED_COSINE_DISTANCE(embedding, "How to start learning Java programming?") AS _distance +FROM sample_documents +ORDER BY _distance ASC +LIMIT 2; +``` + +
+
+ +## Custom embedding dimensions + +The `gemini-embedding-001` model supports flexible dimensions through Matryoshka Representation Learning (MRL). You can specify the desired dimensions in your embedding function: + + +
+ +```python +# For 1536 dimensions +embedding: list[float] = EmbeddingFunction( + model_name="gemini-embedding-001", + dimensions=1536 +).VectorField(source_field="content") + +# For 768 dimensions +embedding: list[float] = EmbeddingFunction( + model_name="gemini-embedding-001", + dimensions=768 +).VectorField(source_field="content") +``` + +
+
+ +```sql +-- For 1536 dimensions +`embedding` VECTOR(1536) GENERATED ALWAYS AS (EMBED_TEXT( + "gemini-embedding-001", + `content`, + '{"embedding_config": {"output_dimensionality": 1536}}' +)) STORED + +-- For 768 dimensions +`embedding` VECTOR(768) GENERATED ALWAYS AS (EMBED_TEXT( + "gemini-embedding-001", + `content`, + '{"embedding_config": {"output_dimensionality": 768}}' +)) STORED +``` + +
+
+ +Choose dimensions based on your performance requirements and storage constraints. Higher dimensions can improve accuracy but require more storage and compute resources. + +## Options + +All [Gemini options](https://ai.google.dev/gemini-api/docs/embeddings) are supported via the `additional_json_options` parameter of the `EMBED_TEXT()` function. + +**Example: Specify the task type to improve quality** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "gemini/gemini-embedding-001", + `content`, + '{"task_type": "SEMANTIC_SIMILARITY"}' + )) STORED +); +``` + +**Example: Use an alternative dimension** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(768) GENERATED ALWAYS AS (EMBED_TEXT( + "gemini/gemini-embedding-001", + `content`, + '{"output_dimensionality": 768}' + )) STORED +); +``` + +For all available options, see [Gemini documentation](https://ai.google.dev/gemini-api/docs/embeddings). + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-huggingface.md b/ai/integrations/vector-search-auto-embedding-huggingface.md new file mode 100644 index 0000000000000..3853c54c73522 --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-huggingface.md @@ -0,0 +1,329 @@ +--- +title: Hugging Face Embeddings +summary: Learn how to use Hugging Face embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-huggingface/'] +--- + +# Hugging Face Embeddings + +This document describes how to use Hugging Face embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +Hugging Face models are available for use with the `huggingface/` prefix if you bring your own [Hugging Face Inference API](https://huggingface.co/docs/inference-providers/index) key (BYOK). + +For your convenience, the following sections use several popular models as examples. For a full list of available models, see [Hugging Face models](https://huggingface.co/models?library=sentence-transformers&inference_provider=hf-inference&sort=trending). Note that not all models are available through Hugging Face Inference API or reliably work. + +## multilingual-e5-large + +- Name: `huggingface/intfloat/multilingual-e5-large` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Price: Charged by Hugging Face +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Project home: + +Example: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_HUGGINGFACE_API_KEY = 'your-huggingface-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "huggingface/intfloat/multilingual-e5-large", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +## bge-m3 + +- Name: `huggingface/BAAI/bge-m3` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Price: Charged by Hugging Face +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Project home: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_HUGGINGFACE_API_KEY = 'your-huggingface-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "huggingface/BAAI/bge-m3", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +## all-MiniLM-L6-v2 + +- Name: `huggingface/sentence-transformers/all-MiniLM-L6-v2` +- Dimensions: 384 +- Distance metric: Cosine, L2 +- Price: Charged by Hugging Face +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Project home: + +Example: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_HUGGINGFACE_API_KEY = 'your-huggingface-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(384) GENERATED ALWAYS AS (EMBED_TEXT( + "huggingface/sentence-transformers/all-MiniLM-L6-v2", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +## all-mpnet-base-v2 + +- Name: `huggingface/sentence-transformers/all-mpnet-base-v2` +- Dimensions: 768 +- Distance metric: Cosine, L2 +- Price: Charged by Hugging Face +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Project home: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_HUGGINGFACE_API_KEY = 'your-huggingface-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(768) GENERATED ALWAYS AS (EMBED_TEXT( + "huggingface/sentence-transformers/all-mpnet-base-v2", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +## Qwen3-Embedding-0.6B + +> **Note:** +> +> Hugging Face Inference API might be unstable for this model. + +- Name: `huggingface/Qwen/Qwen3-Embedding-0.6B` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Maximum input text tokens: 512 +- Price: Charged by Hugging Face +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Project home: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_HUGGINGFACE_API_KEY = 'your-huggingface-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "huggingface/Qwen/Qwen3-Embedding-0.6B", + `content` + )) STORED +); + + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +## Python usage example + +This example shows how to create a vector table, insert documents, and run similarity search using Hugging Face embedding models. + +### Step 1: Connect to the database + +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +### Step 2: Configure the API key + +If you're using a private model or need higher rate limits, you can configure your Hugging Face API token. You can create your token from the [Hugging Face Token Settings](https://huggingface.co/settings/tokens) page: + +Configure the API token for Hugging Face models using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="huggingface", + api_key="{your-huggingface-token}", +) +``` + +### Step 3: Create a vector table + +Create a table with a vector field that uses a Hugging Face model to generate embeddings: + +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="huggingface/sentence-transformers/all-MiniLM-L6-v2" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +> **Tip:** +> +> The vector dimensions depend on the model you choose. For example, `huggingface/sentence-transformers/all-MiniLM-L6-v2` produces 384-dimensional vectors, while `huggingface/sentence-transformers/all-mpnet-base-v2` produces 768-dimensional vectors. + +### Step 4: Insert data into the table + +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Machine learning algorithms can identify patterns in data."), + Document(id=2, content="Deep learning uses neural networks with multiple layers."), + Document(id=3, content="Natural language processing helps computers understand text."), + Document(id=4, content="Computer vision enables machines to interpret images."), + Document(id=5, content="Reinforcement learning learns through trial and error."), +] +table.bulk_insert(documents) +``` + +### Step 5: Search for similar documents + +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How do neural networks work?") \ + .limit(3) \ + .to_list() + +for doc in results: + print(f"ID: {doc.id}, Content: {doc.content}") +``` + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-jina-ai.md b/ai/integrations/vector-search-auto-embedding-jina-ai.md new file mode 100644 index 0000000000000..c6a3602bf9faf --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-jina-ai.md @@ -0,0 +1,265 @@ +--- +title: Jina AI Embeddings +summary: Learn how to use Jina AI embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-jina-ai/'] +--- + +# Jina AI Embeddings + +This document describes how to use [Jina AI embedding models](https://jina.ai/embeddings/) with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +Jina AI provides high-performance, multimodal, and multilingual long-context embeddings for search, RAG, and agent applications. + +All Jina AI models are available for use with the `jina_ai/` prefix if you bring your own Jina AI API key (BYOK). For example: + +**jina-embeddings-v4** + +- Name: `jina_ai/jina-embeddings-v4` +- Dimensions: 2048 +- Distance metric: Cosine, L2 +- Maximum input text tokens: 32,768 +- Price: Charged by Jina AI +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +**jina-embeddings-v3** + +- Name: `jina_ai/jina-embeddings-v3` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Maximum input text tokens: 8,192 +- Price: Charged by Jina AI +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +For a full list of available models, see [Jina AI Documentation](https://jina.ai/embeddings/). + +## Usage example + +This example shows how to create a vector table, insert documents, and run a similarity search using Jina AI embedding models. + +### Step 1: Connect to the database + + +
+ +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +
+
+ +```bash +mysql -h {gateway-region}.prod.aws.tidbcloud.com \ + -P 4000 \ + -u {prefix}.root \ + -p{password} \ + -D {database} +``` + +
+
+ +### Step 2: Configure the API key + +Create your API key from the [Jina AI Platform](https://jina.ai/embeddings/) and bring your own key (BYOK) to use the embedding service. + + +
+ +Configure the API key for the Jina AI embedding provider using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="jina_ai", + api_key="{your-jina-api-key}", +) +``` + +
+
+ +Set the API key for the Jina AI embedding provider using SQL: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_JINA_AI_API_KEY = "{your-jina-api-key}"; +``` + +
+
+ +### Step 3: Create a vector table + +Create a table with a vector field that uses the `jina_ai/jina-embeddings-v4` model to generate 2048-dimensional vectors: + + +
+ +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="jina_ai/jina-embeddings-v4" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +
+
+ +```sql +CREATE TABLE sample_documents ( + `id` INT PRIMARY KEY, + `content` TEXT, + `embedding` VECTOR(2048) GENERATED ALWAYS AS (EMBED_TEXT( + "jina_ai/jina-embeddings-v4", + `content` + )) STORED +); +``` + +
+
+ +### Step 4: Insert data into the table + + +
+ +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Java: Object-oriented language for cross-platform development."), + Document(id=2, content="Java coffee: Bold Indonesian beans with low acidity."), + Document(id=3, content="Java island: Densely populated, home to Jakarta."), + Document(id=4, content="Java's syntax is used in Android apps."), + Document(id=5, content="Dark roast Java beans enhance espresso blends."), +] +table.bulk_insert(documents) +``` + +
+
+ +Insert data using the `INSERT INTO` statement: + +```sql +INSERT INTO sample_documents (id, content) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); +``` + +
+
+ +### Step 5: Search for similar documents + + +
+ +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How to start learning Java programming?") \ + .limit(2) \ + .to_list() +print(results) +``` + +
+
+ +Use the `VEC_EMBED_COSINE_DISTANCE` function to perform vector search based on cosine distance metric: + +```sql +SELECT + `id`, + `content`, + VEC_EMBED_COSINE_DISTANCE(embedding, "How to start learning Java programming?") AS _distance +FROM sample_documents +ORDER BY _distance ASC +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +
+
+ +## Options + +All [Jina AI options](https://jina.ai/embeddings/) are supported via the `additional_json_options` parameter of the `EMBED_TEXT()` function. + +**Example: Specify "downstream task" for better performance** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(2048) GENERATED ALWAYS AS (EMBED_TEXT( + "jina_ai/jina-embeddings-v4", + `content`, + '{"task": "retrieval.passage", "task@search": "retrieval.query"}' + )) STORED +); +``` + +**Example: Use an alternative dimension** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(768) GENERATED ALWAYS AS (EMBED_TEXT( + "jina_ai/jina-embeddings-v3", + `content`, + '{"dimensions":768}' + )) STORED +); +``` + +For all available options, see [Jina AI Documentation](https://jina.ai/embeddings/). + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-nvidia-nim.md b/ai/integrations/vector-search-auto-embedding-nvidia-nim.md new file mode 100644 index 0000000000000..364bf76bbc7a2 --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-nvidia-nim.md @@ -0,0 +1,255 @@ +--- +title: NVIDIA NIM Embeddings +summary: Learn how to use NVIDIA NIM embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-nvidia-nim/'] +--- + +# NVIDIA NIM Embeddings + +This document describes how to use NVIDIA NIM embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +Embedding models hosted on NVIDIA NIM are available for use with the `nvidia_nim/` prefix if you bring your own [NVIDIA NIM API key](https://build.nvidia.com/settings/api-keys) (BYOK). + +For your convenience, the following section takes a popular model as an example to show how to use it with Auto Embedding. For a full list of available models, see [NVIDIA NIM Text-to-embedding Models](https://build.nvidia.com/models?filters=usecase%3Ausecase_text_to_embedding). + +## bge-m3 + +- Name: `nvidia_nim/baai/bge-m3` +- Dimensions: 1024 +- Distance metric: Cosine, L2 +- Maximum input text tokens: 8,192 +- Price: Charged by NVIDIA +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ +- Docs: + +Example: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_NVIDIA_NIM_API_KEY = 'your-nvidia-nim-api-key-here'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "nvidia_nim/baai/bge-m3", + `content` + )) STORED +); + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +## nv-embed-v1 + +This example shows how to create a vector table, insert documents, and run similarity search using the `nvidia/nv-embed-v1` model. + +### Step 1: Connect to the database + + +
+ +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +
+
+ +```bash +mysql -h {gateway-region}.prod.aws.tidbcloud.com \ + -P 4000 \ + -u {prefix}.root \ + -p{password} \ + -D {database} +``` + +
+
+ +### Step 2: Configure the API key + +If you're using NVIDIA NIM models that require authentication, you can configure your API key. You can get free access to NIM API endpoints through the [NVIDIA Developer Program](https://developer.nvidia.com/nim) or create your API key from the [NVIDIA Build Platform](https://build.nvidia.com/settings/api-keys): + + +
+ +Configure the API key for NVIDIA NIM models using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="nvidia_nim", + api_key="{your-nvidia-api-key}", +) +``` + +
+
+ +Set the API key for NVIDIA NIM models using SQL: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_NVIDIA_NIM_API_KEY = "{your-nvidia-api-key}"; +``` + +
+
+ +### Step 3: Create a vector table + +Create a table with a vector field that uses an NVIDIA NIM model to generate embeddings: + + +
+ +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="nvidia/nv-embed-v1" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +
+
+ +```sql +CREATE TABLE sample_documents ( + `id` INT PRIMARY KEY, + `content` TEXT, + `embedding` VECTOR(4096) GENERATED ALWAYS AS (EMBED_TEXT( + "nvidia/nv-embed-v1", + `content` + )) STORED +); +``` + +
+
+ +### Step 4: Insert data into the table + + +
+ +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Machine learning algorithms can identify patterns in data."), + Document(id=2, content="Deep learning uses neural networks with multiple layers."), + Document(id=3, content="Natural language processing helps computers understand text."), + Document(id=4, content="Computer vision enables machines to interpret images."), + Document(id=5, content="Reinforcement learning learns through trial and error."), +] +table.bulk_insert(documents) +``` + +
+
+ +Insert data using the `INSERT INTO` statement: + +```sql +INSERT INTO sample_documents (id, content) +VALUES + (1, "Machine learning algorithms can identify patterns in data."), + (2, "Deep learning uses neural networks with multiple layers."), + (3, "Natural language processing helps computers understand text."), + (4, "Computer vision enables machines to interpret images."), + (5, "Reinforcement learning learns through trial and error."); +``` + +
+
+ +### Step 5: Search for similar documents + + +
+ +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How do neural networks work?") \ + .limit(3) \ + .to_list() + +for doc in results: + print(f"ID: {doc.id}, Content: {doc.content}") +``` + +
+
+ +Use the `VEC_EMBED_COSINE_DISTANCE` function to perform vector search with cosine distance: + +```sql +SELECT + `id`, + `content`, + VEC_EMBED_COSINE_DISTANCE(embedding, "How do neural networks work?") AS _distance +FROM sample_documents +ORDER BY _distance ASC +LIMIT 3; +``` + +
+
+ +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) diff --git a/ai/integrations/vector-search-auto-embedding-openai.md b/ai/integrations/vector-search-auto-embedding-openai.md new file mode 100644 index 0000000000000..c2b2f2034a658 --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-openai.md @@ -0,0 +1,297 @@ +--- +title: OpenAI Embeddings +summary: Learn how to use OpenAI embedding models in TiDB Cloud. +aliases: ['/tidbcloud/vector-search-auto-embedding-openai/'] +--- + +# OpenAI Embeddings + +This document describes how to use OpenAI embedding models with [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) in TiDB Cloud to perform semantic searches with text queries. + +> **Note:** +> +> [Auto Embedding](/ai/integrations/vector-search-auto-embedding-overview.md) is only available on {{{ .starter }}} clusters hosted on AWS. + +## Available models + +All OpenAI models are available for use with the `openai/` prefix if you bring your own OpenAI API key (BYOK). For example: + +**text-embedding-3-small** + +- Name: `openai/text-embedding-3-small` +- Dimensions: 512-1536 (default: 1536) +- Distance metric: Cosine, L2 +- Price: Charged by OpenAI +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +**text-embedding-3-large** + +- Name: `openai/text-embedding-3-large` +- Dimensions: 256-3072 (default: 3072) +- Distance metric: Cosine, L2 +- Price: Charged by OpenAI +- Hosted by TiDB Cloud: ❌ +- Bring Your Own Key: ✅ + +For a full list of available models, see [OpenAI Documentation](https://platform.openai.com/docs/guides/embeddings). + +## Usage example + +This example shows how to create a vector table, insert documents, and run similarity search using OpenAI embedding models. + +You can integrate the OpenAI Embeddings API with TiDB using the AI SDK or native SQL functions for automatic embedding generation. + +### Step 1: Connect to the database + + +
+ +```python +from pytidb import TiDBClient + +tidb_client = TiDBClient.connect( + host="{gateway-region}.prod.aws.tidbcloud.com", + port=4000, + username="{prefix}.root", + password="{password}", + database="{database}", + ensure_db=True, +) +``` + +
+
+ +```bash +mysql -h {gateway-region}.prod.aws.tidbcloud.com \ + -P 4000 \ + -u {prefix}.root \ + -p{password} \ + -D {database} +``` + +
+
+ +### Step 2: Configure the API key + +Create an API key in the [OpenAI API Platform](https://platform.openai.com/api-keys) and bring your own key (BYOK) to use the embedding service. + + +
+ +Configure the API key for the OpenAI embedding provider using the TiDB Client: + +```python +tidb_client.configure_embedding_provider( + provider="openai", + api_key="{your-openai-api-key}", +) +``` + +
+
+ +Set the API key for the OpenAI embedding provider using SQL: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_OPENAI_API_KEY = "{your-openai-api-key}"; +``` + +
+
+ +### Step 3: Create a vector table + +Create a table with a vector field that uses the `openai/text-embedding-3-small` model to generate 1536-dimensional vectors: + + +
+ +```python +from pytidb.schema import TableModel, Field +from pytidb.embeddings import EmbeddingFunction +from pytidb.datatype import TEXT + +class Document(TableModel): + __tablename__ = "sample_documents" + id: int = Field(primary_key=True) + content: str = Field(sa_type=TEXT) + embedding: list[float] = EmbeddingFunction( + model_name="openai/text-embedding-3-small" + ).VectorField(source_field="content") + +table = tidb_client.create_table(schema=Document, if_exists="overwrite") +``` + +
+
+ +```sql +CREATE TABLE sample_documents ( + `id` INT PRIMARY KEY, + `content` TEXT, + `embedding` VECTOR(1536) GENERATED ALWAYS AS (EMBED_TEXT( + "openai/text-embedding-3-small", + `content` + )) STORED +); +``` + +
+
+ +### Step 4: Insert data into the table + + +
+ +Use the `table.insert()` or `table.bulk_insert()` API to add data: + +```python +documents = [ + Document(id=1, content="Java: Object-oriented language for cross-platform development."), + Document(id=2, content="Java coffee: Bold Indonesian beans with low acidity."), + Document(id=3, content="Java island: Densely populated, home to Jakarta."), + Document(id=4, content="Java's syntax is used in Android apps."), + Document(id=5, content="Dark roast Java beans enhance espresso blends."), +] +table.bulk_insert(documents) +``` + +
+
+ +Insert data using the `INSERT INTO` statement: + +```sql +INSERT INTO sample_documents (id, content) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); +``` + +
+
+ +### Step 5: Search for similar documents + + +
+ +Use the `table.search()` API to perform vector search: + +```python +results = table.search("How to start learning Java programming?") \ + .limit(2) \ + .to_list() +print(results) +``` + +
+
+ +Use the `VEC_EMBED_COSINE_DISTANCE` function to perform vector search with cosine distance: + +```sql +SELECT + `id`, + `content`, + VEC_EMBED_COSINE_DISTANCE(embedding, "How to start learning Java programming?") AS _distance +FROM sample_documents +ORDER BY _distance ASC +LIMIT 2; +``` + +Result: + +``` ++------+----------------------------------------------------------------+ +| id | content | ++------+----------------------------------------------------------------+ +| 1 | Java: Object-oriented language for cross-platform development. | +| 4 | Java's syntax is used in Android apps. | ++------+----------------------------------------------------------------+ +``` + +
+
+ +## Use Azure OpenAI + +To use OpenAI embedding models on Azure, set the global variable `TIDB_EXP_EMBED_OPENAI_API_BASE` to the URL of your Azure resource. For example: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_OPENAI_API_KEY = 'your-openai-api-key-here'; +SET @@GLOBAL.TIDB_EXP_EMBED_OPENAI_API_BASE = 'https://.openai.azure.com/openai/v1'; + +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(3072) GENERATED ALWAYS AS (EMBED_TEXT( + "openai/text-embedding-3-large", + `content` + )) STORED +); + +INSERT INTO sample + (`id`, `content`) +VALUES + (1, "Java: Object-oriented language for cross-platform development."), + (2, "Java coffee: Bold Indonesian beans with low acidity."), + (3, "Java island: Densely populated, home to Jakarta."), + (4, "Java's syntax is used in Android apps."), + (5, "Dark roast Java beans enhance espresso blends."); + +SELECT `id`, `content` FROM sample +ORDER BY + VEC_EMBED_COSINE_DISTANCE( + embedding, + "How to start learning Java programming?" + ) +LIMIT 2; +``` + +Even if your resource URL appears as `https://.cognitiveservices.azure.com/`, you still need to use `https://.openai.azure.com/openai/v1` as the API base to keep OpenAI-compatible request and response formats. + +To switch from Azure OpenAI to OpenAI directly, set `TIDB_EXP_EMBED_OPENAI_API_BASE` to an empty string: + +```sql +SET @@GLOBAL.TIDB_EXP_EMBED_OPENAI_API_BASE = ''; +``` + +> **Note:** +> +> - For security reasons, you can only set the API base to an Azure OpenAI URL or the OpenAI URL. Arbitrary base URLs are not allowed. +> - To use another OpenAI-compatible embedding service, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +## Options + +All [OpenAI embedding options](https://platform.openai.com/docs/api-reference/embeddings/create) are supported via the `additional_json_options` parameter of the `EMBED_TEXT()` function. + +**Example: Use an alternative dimension for text-embedding-3-large** + +```sql +CREATE TABLE sample ( + `id` INT, + `content` TEXT, + `embedding` VECTOR(1024) GENERATED ALWAYS AS (EMBED_TEXT( + "openai/text-embedding-3-large", + `content`, + '{"dimensions": 1024}' + )) STORED +); +``` + +For all available options, see [OpenAI Documentation](https://platform.openai.com/docs/api-reference/embeddings/create). + +## See also + +- [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md) +- [Vector Search](/ai/concepts/vector-search-overview.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) \ No newline at end of file diff --git a/ai/integrations/vector-search-auto-embedding-overview.md b/ai/integrations/vector-search-auto-embedding-overview.md new file mode 100644 index 0000000000000..ecbbb2a8bc7ca --- /dev/null +++ b/ai/integrations/vector-search-auto-embedding-overview.md @@ -0,0 +1,205 @@ +--- +title: Auto Embedding Overview +summary: Learn how to use Auto Embedding to perform semantic searches with plain text instead of vectors. +aliases: ['/tidbcloud/vector-search-auto-embedding-overview/'] +--- + +# Auto Embedding Overview + +The Auto Embedding feature lets you perform vector searches directly with plain text, without providing your own vectors. With this feature, you can insert text data directly and perform semantic searches using text queries, while TiDB automatically converts the text into vectors behind the scenes. + +To use Auto Embedding, the basic workflow is as follows: + +1. **Define a table** with a text column and a generated vector column using `EMBED_TEXT()`. +2. **Insert text data** — vectors are generated and stored automatically. +3. **Query using text** — use `VEC_EMBED_COSINE_DISTANCE()` or `VEC_EMBED_L2_DISTANCE()` to find semantically similar content. + +> **Note:** +> +> Auto Embedding is only available on {{{ .starter }}} clusters hosted on AWS. + +## Quick start example + +> **Tip:** +> +> For Python usage, see [Use Auto Embedding in Python](#use-auto-embedding-in-python). + +The following example shows how to use Auto Embedding with cosine distance to perform a semantic search. No API key is required in this example. + +```sql +-- Create a table with auto-embedding +-- The dimension of the vector column must match the dimension of the embedding model; +-- Otherwise, TiDB returns an error when inserting data. +CREATE TABLE documents ( + id INT PRIMARY KEY AUTO_INCREMENT, + content TEXT, + content_vector VECTOR(1024) GENERATED ALWAYS AS ( + EMBED_TEXT("tidbcloud_free/amazon/titan-embed-text-v2", content) + ) STORED +); + +-- Insert text data (vectors are generated automatically) +INSERT INTO documents (content) VALUES + ("Electric vehicles reduce air pollution in cities."), + ("Solar panels convert sunlight into renewable energy."), + ("Plant-based diets lower carbon footprints significantly."), + ("Deep learning algorithms improve medical diagnosis accuracy."), + ("Blockchain technology enhances data security systems."); + +-- Search for semantically similar content using text query +SELECT id, content FROM documents +ORDER BY VEC_EMBED_COSINE_DISTANCE( + content_vector, + "Renewable energy solutions for environmental protection" +) +LIMIT 3; +``` + +The output is as follows: + +``` ++----+--------------------------------------------------------------+ +| id | content | ++----+--------------------------------------------------------------+ +| 2 | Solar panels convert sunlight into renewable energy. | +| 1 | Electric vehicles reduce air pollution in cities. | +| 4 | Deep learning algorithms improve medical diagnosis accuracy. | ++----+--------------------------------------------------------------+ +``` + +The preceding example uses the Amazon Titan model. For other models, see [Available text embedding models](#available-text-embedding-models). + +## Auto Embedding + Vector index + +Auto Embedding is compatible with [Vector index](/ai/reference/vector-search-index.md) for better query performance. You can define a vector index on the generated vector column, and it will be used automatically: + +```sql +-- Create a table with auto-embedding and a vector index +CREATE TABLE documents ( + id INT PRIMARY KEY AUTO_INCREMENT, + content TEXT, + content_vector VECTOR(1024) GENERATED ALWAYS AS ( + EMBED_TEXT("tidbcloud_free/amazon/titan-embed-text-v2", content) + ) STORED, + VECTOR INDEX ((VEC_COSINE_DISTANCE(content_vector))) +); + +-- Insert text data (vectors are generated automatically) +INSERT INTO documents (content) VALUES + ("Electric vehicles reduce air pollution in cities."), + ("Solar panels convert sunlight into renewable energy."), + ("Plant-based diets lower carbon footprints significantly."), + ("Deep learning algorithms improve medical diagnosis accuracy."), + ("Blockchain technology enhances data security systems."); + +-- Search for semantically similar content with a text query on the vector index using the same VEC_EMBED_COSINE_DISTANCE() function +SELECT id, content FROM documents +ORDER BY VEC_EMBED_COSINE_DISTANCE( + content_vector, + "Renewable energy solutions for environmental protection" +) +LIMIT 3; +``` + +> **Note:** +> +> - When defining a vector index, use `VEC_COSINE_DISTANCE()` or `VEC_L2_DISTANCE()`. +> - When running queries, use `VEC_EMBED_COSINE_DISTANCE()` or `VEC_EMBED_L2_DISTANCE()`. + +## Available text embedding models + +TiDB Cloud supports various embedding models. Choose the one that best fits your needs: + +| Embedding model | Documentation | Hosted by TiDB Cloud 1 | BYOK 2 | +| --------------- | ----------------------------------------------------------------------------------- | --------------------------------- | ----------------- | +| Amazon Titan | [Amazon Titan Embeddings](/ai/integrations/vector-search-auto-embedding-amazon-titan.md) | ✅ | | +| Cohere | [Cohere Embeddings](/ai/integrations/vector-search-auto-embedding-cohere.md) | ✅ | ✅ | +| Jina AI | [Jina AI Embeddings](/ai/integrations/vector-search-auto-embedding-jina-ai.md) | | ✅ | +| OpenAI | [OpenAI Embeddings](/ai/integrations/vector-search-auto-embedding-openai.md) | | ✅ | +| Gemini | [Gemini Embeddings](/ai/integrations/vector-search-auto-embedding-gemini.md) | | ✅ | + +You can also use open-source embedding models through the following inference services that TiDB Cloud supports: + +| Embedding model | Documentation | Hosted by TiDB Cloud 1 | BYOK 2 | Example supported models | +| --------------------- | --------------------------------------------------------------------------------- | --------------------------------- | ----------------- | --------------------------------- | +| Hugging Face Inference | [Hugging Face Embeddings](/ai/integrations/vector-search-auto-embedding-huggingface.md) | | ✅ | `bge-m3`, `multilingual-e5-large` | +| NVIDIA NIM | [NVIDIA NIM Embeddings](/ai/integrations/vector-search-auto-embedding-nvidia-nim.md) | | ✅ | `bge-m3`, `nv-embed-v1` | + +​1 Hosted models are hosted by TiDB Cloud and do not require any API keys. Currently, these hosted models are free to use, but certain usage limits might be applied to keep them available to everyone. + +​2 BYOK (Bring Your Own Key) models require you to provide your own API keys from the corresponding embedding provider. TiDB Cloud does not charge for the usage of BYOK models. You are responsible for managing and monitoring the costs associated with using these models. + +## How Auto Embedding works + +Auto Embedding uses the [`EMBED_TEXT()`](#embed_text) function to convert text into vector embeddings with your chosen embedding model. The generated vectors are stored in `VECTOR` columns and can be queried with plain text using [`VEC_EMBED_COSINE_DISTANCE()`](#vec_embed_cosine_distance) or [`VEC_EMBED_L2_DISTANCE()`](#vec_embed_l2_distance). + +Internally, [`VEC_EMBED_COSINE_DISTANCE()`](#vec_embed_cosine_distance) and [`VEC_EMBED_L2_DISTANCE()`](#vec_embed_l2_distance) are executed as [`VEC_COSINE_DISTANCE()`](/ai/reference/vector-search-functions-and-operators.md#vec_cosine_distance) and [`VEC_L2_DISTANCE()`](/ai/reference/vector-search-functions-and-operators.md#vec_l2_distance), with the text query automatically converted into a vector embedding. + +## Key functions + +### `EMBED_TEXT()` + +Converts text to vector embeddings: + +```sql +EMBED_TEXT("model_name", text_content[, additional_json_options]) +``` + +Use this function in `GENERATED ALWAYS AS` clauses to automatically generate embeddings when inserting or updating text data. + +### `VEC_EMBED_COSINE_DISTANCE()` + +Calculates cosine similarity between a stored vector in the vector column and a text query: + +```sql +VEC_EMBED_COSINE_DISTANCE(vector_column, "query_text") +``` + +Use this function in `ORDER BY` clauses to rank results by cosine distance. It uses the same calculation as [`VEC_COSINE_DISTANCE()`](/ai/reference/vector-search-functions-and-operators.md#vec_cosine_distance), but automatically generates the embedding for the query text. + +### `VEC_EMBED_L2_DISTANCE()` + +Calculates L2 (Euclidean) distance between a stored vector and a text query: + +```sql +VEC_EMBED_L2_DISTANCE(vector_column, "query_text") +``` + +Use this function in `ORDER BY` clauses to rank results by L2 distance. It uses the same calculation as [`VEC_L2_DISTANCE()`](/ai/reference/vector-search-functions-and-operators.md#vec_l2_distance), but automatically generates the embedding for the query text. + +## Use Auto Embedding in Python + +TiDB provides a unified interface for integrating with various embedding providers and models: + +- **Programmatic use**: Use the `EmbeddingFunction` class from the AI SDK to create embedding functions for specific providers or models. +- **SQL use**: Use the `EMBED_TEXT` function to generate embeddings directly from text data. + +Use the `EmbeddingFunction` class to work with different embedding providers and models. + + ```python + from pytidb.embeddings import EmbeddingFunction + + embed_func = EmbeddingFunction( + model_name="/", + ) + ``` + +**Parameters:** + +- `model_name` *(required)*: specifies the embedding model to use, in the format `{provider_name}/{model_name}`. + +- `dimensions` *(optional)*: the dimensionality of output vector embeddings. If not provided and the model lacks a default dimension, a test string is embedded during initialization to determine the actual dimension automatically. + +- `api_key` *(optional)*: the API key for accessing the embedding service. If not explicitly set, retrieves the key from the provider's default environment variable. + +- `api_base` *(optional)*: the base URL of the embedding API service. + +- `use_server` *(optional)*: whether to use TiDB Cloud's hosted embedding service. Defaults to `True` for TiDB Cloud Starter. + +- `multimodal` *(optional)*: whether to use a multimodal embedding model. When enabled, `use_server` is automatically set to `False`, and the embedding service is called client-side. + +## See also + +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/tidb-cloud/vector-search-integrate-with-amazon-bedrock.md b/ai/integrations/vector-search-integrate-with-amazon-bedrock.md similarity index 86% rename from tidb-cloud/vector-search-integrate-with-amazon-bedrock.md rename to ai/integrations/vector-search-integrate-with-amazon-bedrock.md index 2799070fe8561..5329e26758c5d 100644 --- a/tidb-cloud/vector-search-integrate-with-amazon-bedrock.md +++ b/ai/integrations/vector-search-integrate-with-amazon-bedrock.md @@ -1,23 +1,21 @@ --- title: Integrate TiDB Vector Search with Amazon Bedrock summary: Learn how to integrate TiDB Vector Search with Amazon Bedrock to build a Retrieval-Augmented Generation (RAG) Q&A bot. +aliases: ['/tidbcloud/vector-search-integrate-with-amazon-bedrock/'] --- # Integrate TiDB Vector Search with Amazon Bedrock -This tutorial demonstrates how to integrate the [vector search](/vector-search/vector-search-overview.md) feature of TiDB with [Amazon Bedrock](https://aws.amazon.com/bedrock/) to build a Retrieval-Augmented Generation (RAG) Q&A bot. - - - > **Note:** > -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> This document is applicable to TiDB Cloud only and not applicable to TiDB Self-Managed. - +This tutorial demonstrates how to integrate [TiDB Vector Search](/ai/concepts/vector-search-overview.md) with [Amazon Bedrock](https://aws.amazon.com/bedrock/) to build a Retrieval-Augmented Generation (RAG) Q&A bot. -> **Note** +> **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-serverless), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). > **Tip** > @@ -31,7 +29,7 @@ To complete this tutorial, you need: - [Pip](https://pypi.org/project/pip/) installed - [AWS CLI](https://aws.amazon.com/cli/) installed - Ensure your AWS CLI profile is configured to a supported [Amazon Bedrock](https://aws.amazon.com/bedrock/) region for this tutorial. You can find the list of supported regions at [Amazon Bedrock Regions](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html). To switch to a supported region, run the following command: + Ensure your AWS CLI profile is configured to a supported [Amazon Bedrock](https://aws.amazon.com/bedrock/) region. You can find the list of supported regions at [Amazon Bedrock Regions](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html). To switch to a supported region, run the following command: ```shell aws configure set region @@ -39,7 +37,7 @@ To complete this tutorial, you need: - A TiDB Cloud Serverless cluster - Follow [creating a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) to create your own TiDB Cloud cluster if you don't have one. + Follow [creating a {{{ .starter }}} cluster](/tidb-cloud/select-cluster-tier.md#starter) to create your own TiDB Cloud cluster if you don't have one. - An AWS account with the [required permissions for Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html) and access to the following models: @@ -50,7 +48,7 @@ To complete this tutorial, you need: ## Get started -This section provides step-by-step instructions for integrating TiDB Vector Search with Amazon Bedrock to build a RAG-based Q&A bot. +This section provides step-by-step instructions to integrate TiDB Vector Search with Amazon Bedrock to build a RAG-based Q&A bot. ### Step 1. Set the environment variables @@ -317,5 +315,5 @@ def save_entities_with_embedding(session, contents): ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) \ No newline at end of file +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-django-orm.md b/ai/integrations/vector-search-integrate-with-django-orm.md similarity index 78% rename from vector-search/vector-search-integrate-with-django-orm.md rename to ai/integrations/vector-search-integrate-with-django-orm.md index 4a676a1b595de..d81a46641c0d8 100644 --- a/vector-search/vector-search-integrate-with-django-orm.md +++ b/ai/integrations/vector-search-integrate-with-django-orm.md @@ -1,31 +1,17 @@ --- title: Integrate TiDB Vector Search with Django ORM summary: Learn how to integrate TiDB Vector Search with Django ORM to store embeddings and perform semantic search. +aliases: ['/tidb/stable/vector-search-integrate-with-django-orm/','/tidb/dev/vector-search-integrate-with-django-orm/','/tidbcloud/vector-search-integrate-with-django-orm/'] --- # Integrate TiDB Vector Search with Django ORM -This tutorial walks you through how to use [Django](https://www.djangoproject.com/) ORM to interact with the [TiDB Vector Search](/vector-search/vector-search-overview.md), store embeddings, and perform vector search queries. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial walks you through how to use the [Django](https://www.djangoproject.com/) ORM to interact with [TiDB Vector Search](/ai/concepts/vector-search-overview.md), store embeddings, and perform vector search queries. > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites @@ -35,26 +21,14 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Run the sample app -You can quickly learn about how to integrate TiDB Vector Search with Django ORM by following the steps below. +You can quickly learn how to integrate TiDB Vector Search with Django ORM by following the steps below. ### Step 1. Clone the repository @@ -90,7 +64,7 @@ pip install Django django-tidb mysqlclient numpy python-dotenv If you encounter installation issues with mysqlclient, refer to the mysqlclient official documentation. -#### What is `django-tidb` +#### What is `django-tidb`? `django-tidb` is a TiDB dialect for Django, which enhances the Django ORM to support TiDB-specific features (for example, Vector Search) and resolves compatibility issues between TiDB and Django. @@ -149,7 +123,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ``` -
+
For a TiDB Self-Managed cluster, create a `.env` file in the root directory of your Python project. Copy the following content into the `.env` file, and modify the environment variable values according to the connection parameters of your TiDB cluster: @@ -257,7 +231,7 @@ Document.objects.create(content="tree", embedding=[1, 0, 0]) ### Search the nearest neighbor documents -TiDB Vector support the following distance functions: +TiDB Vector supports the following distance functions: - `L1Distance` - `L2Distance` @@ -284,5 +258,5 @@ results = Document.objects.annotate( ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-jinaai-embedding.md b/ai/integrations/vector-search-integrate-with-jinaai-embedding.md similarity index 79% rename from vector-search/vector-search-integrate-with-jinaai-embedding.md rename to ai/integrations/vector-search-integrate-with-jinaai-embedding.md index 5f88e16096833..2dafff89ce87f 100644 --- a/vector-search/vector-search-integrate-with-jinaai-embedding.md +++ b/ai/integrations/vector-search-integrate-with-jinaai-embedding.md @@ -1,31 +1,17 @@ --- title: Integrate TiDB Vector Search with Jina AI Embeddings API summary: Learn how to integrate TiDB Vector Search with Jina AI Embeddings API to store embeddings and perform semantic search. +aliases: ['/tidb/stable/vector-search-integrate-with-jinaai-embedding/','/tidb/dev/vector-search-integrate-with-jinaai-embedding/','/tidbcloud/vector-search-integrate-with-jinaai-embedding/'] --- # Integrate TiDB Vector Search with Jina AI Embeddings API -This tutorial walks you through how to use [Jina AI](https://jina.ai/) to generate embeddings for text data, and then store the embeddings in TiDB vector storage and search similar texts based on embeddings. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial walks you through how to use [Jina AI](https://jina.ai/) to generate text embeddings, store them in TiDB, and search for similar text based on embeddings. > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites @@ -35,26 +21,14 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Run the sample app -You can quickly learn about how to integrate TiDB Vector Search with JinaAI Embedding by following the steps below. +You can quickly learn how to integrate TiDB Vector Search with Jina AI embeddings by following the steps below. ### Step 1. Clone the repository @@ -126,7 +100,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
For a TiDB Self-Managed cluster, set the environment variables for connecting to your TiDB cluster in your terminal as follows: @@ -278,7 +252,7 @@ with Session(engine) as session: ### Perform semantic search with Jina AI embeddings in TiDB -Generate the embedding for the query text via Jina AI embeddings API, and then search for the most relevant document based on the cosine distance between **the embedding of the query text** and **each embedding in the vector table**: +Generate an embedding for the query text via Jina AI embeddings API, and then search for the most relevant document based on the cosine distance between **the embedding of the query text** and **each embedding in the vector table**: ```python query = 'What is TiDB?' @@ -299,5 +273,5 @@ with Session(engine) as session: ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-langchain.md b/ai/integrations/vector-search-integrate-with-langchain.md similarity index 90% rename from vector-search/vector-search-integrate-with-langchain.md rename to ai/integrations/vector-search-integrate-with-langchain.md index 580ff8f95b6a8..201904fb49fba 100644 --- a/vector-search/vector-search-integrate-with-langchain.md +++ b/ai/integrations/vector-search-integrate-with-langchain.md @@ -1,35 +1,21 @@ --- title: Integrate Vector Search with LangChain summary: Learn how to integrate TiDB Vector Search with LangChain. +aliases: ['/tidb/stable/vector-search-integrate-with-langchain/','/tidb/dev/vector-search-integrate-with-langchain/','/tidbcloud/vector-search-integrate-with-langchain/'] --- # Integrate Vector Search with LangChain -This tutorial demonstrates how to integrate the [vector search](/vector-search/vector-search-overview.md) feature of TiDB with [LangChain](https://python.langchain.com/). - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial demonstrates how to integrate [TiDB Vector Search](/ai/concepts/vector-search-overview.md) with [LangChain](https://python.langchain.com/). > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). > **Tip** > -> You can view the complete [sample code](https://github.com/langchain-ai/langchain/blob/master/docs/docs/integrations/vectorstores/tidb_vector.ipynb) on Jupyter Notebook, or run the sample code directly in the [Colab](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/vectorstores/tidb_vector.ipynb) online environment. +> You can view the complete [sample code](https://github.com/langchain-ai/langchain/blob/master/docs/docs/integrations/vectorstores/tidb_vector.ipynb) in Jupyter Notebook, or run it directly in the [Colab](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/vectorstores/tidb_vector.ipynb) online environment. ## Prerequisites @@ -40,22 +26,10 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Get started @@ -133,7 +107,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
This document uses [OpenAI](https://platform.openai.com/docs/introduction) as the embedding model provider. In this step, you need to provide the connection string obtained from the previous step and your [OpenAI API key](https://platform.openai.com/docs/quickstart/step-2-set-up-your-api-key). @@ -194,7 +168,7 @@ docs = text_splitter.split_documents(documents) ### Step 5. Embed and store document vectors -TiDB vector store supports both cosine distance (`consine`) and Euclidean distance (`l2`) for measuring similarity between vectors. The default strategy is cosine distance. +TiDB vector store supports both cosine distance (`cosine`) and Euclidean distance (`l2`) for measuring similarity between vectors. The default strategy is cosine distance. The following code creates a table named `embedded_documents` in TiDB, which is optimized for vector search. @@ -325,7 +299,7 @@ We’re securing commitments and supporting partners in South and Central Americ ### Use as a retriever -In Langchain, a [retriever](https://python.langchain.com/v0.2/docs/concepts/#retrievers) is an interface that retrieves documents in response to an unstructured query, providing more functionality than a vector store. The following code demonstrates how to use TiDB vector store as a retriever. +In LangChain, a [retriever](https://python.langchain.com/v0.2/docs/concepts/#retrievers) is an interface that retrieves documents for an unstructured query and provides more functionality than a vector store. The following code demonstrates how to use TiDB vector store as a retriever. ```python retriever = vector_store.as_retriever( @@ -618,7 +592,7 @@ Alternatively, you can streamline the entire process using a single SQL query: ```python search_query = f""" SELECT - VEC_Cosine_Distance(se.embedding, :query_vector) as distance, + VEC_COSINE_DISTANCE(se.embedding, :query_vector) as distance, ar.*, se.document as airport_review FROM @@ -658,5 +632,5 @@ The expected output is as follows: ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-llamaindex.md b/ai/integrations/vector-search-integrate-with-llamaindex.md similarity index 82% rename from vector-search/vector-search-integrate-with-llamaindex.md rename to ai/integrations/vector-search-integrate-with-llamaindex.md index 73c4977266a4e..b9a20381b6101 100644 --- a/vector-search/vector-search-integrate-with-llamaindex.md +++ b/ai/integrations/vector-search-integrate-with-llamaindex.md @@ -1,35 +1,21 @@ --- title: Integrate Vector Search with LlamaIndex summary: Learn how to integrate TiDB Vector Search with LlamaIndex. +aliases: ['/tidb/stable/vector-search-integrate-with-llamaindex/','/tidb/dev/vector-search-integrate-with-llamaindex/','/tidbcloud/vector-search-integrate-with-llamaindex/'] --- # Integrate Vector Search with LlamaIndex -This tutorial demonstrates how to integrate the [vector search](/vector-search/vector-search-overview.md) feature of TiDB with [LlamaIndex](https://www.llamaindex.ai). - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial demonstrates how to integrate [TiDB Vector Search](/ai/concepts/vector-search-overview.md) with [LlamaIndex](https://www.llamaindex.ai). > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). > **Tip** > -> You can view the complete [sample code](https://github.com/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/TiDBVector.ipynb) on Jupyter Notebook, or run the sample code directly in the [Colab](https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/TiDBVector.ipynb) online environment. +> You can view the complete [sample code](https://github.com/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/TiDBVector.ipynb) in Jupyter Notebook, or run it directly in the [Colab](https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/TiDBVector.ipynb) online environment. ## Prerequisites @@ -40,22 +26,10 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Get started @@ -132,7 +106,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
This document uses [OpenAI](https://platform.openai.com/docs/introduction) as the embedding model provider. In this step, you need to provide the connection string of your TiDB cluster and your [OpenAI API key](https://platform.openai.com/docs/quickstart/step-2-set-up-your-api-key). @@ -200,7 +174,7 @@ The following code creates a table named `paul_graham_test` in TiDB, which is op ```python tidbvec = TiDBVectorStore( - connection_string=tidb_connection_url, + connection_string=tidb_connection_string, table_name="paul_graham_test", distance_strategy="cosine", vector_dimension=1536, @@ -322,7 +296,7 @@ Delete the first document from the index: tidbvec.delete(documents[0].doc_id) ``` -Check whether the documents had been deleted: +Check whether the documents have been deleted: ```python query_engine = index.as_query_engine() @@ -338,5 +312,5 @@ Empty Response ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-peewee.md b/ai/integrations/vector-search-integrate-with-peewee.md similarity index 77% rename from vector-search/vector-search-integrate-with-peewee.md rename to ai/integrations/vector-search-integrate-with-peewee.md index b19e45cab9229..d30fa1effc5a8 100644 --- a/vector-search/vector-search-integrate-with-peewee.md +++ b/ai/integrations/vector-search-integrate-with-peewee.md @@ -1,31 +1,17 @@ --- title: Integrate TiDB Vector Search with peewee summary: Learn how to integrate TiDB Vector Search with peewee to store embeddings and perform semantic searches. +aliases: ['/tidb/stable/vector-search-integrate-with-peewee/','/tidb/dev/vector-search-integrate-with-peewee/','/tidbcloud/vector-search-integrate-with-peewee/'] --- # Integrate TiDB Vector Search with peewee -This tutorial walks you through how to use [peewee](https://docs.peewee-orm.com/) to interact with the [TiDB Vector Search](/vector-search/vector-search-overview.md), store embeddings, and perform vector search queries. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial walks you through how to use [peewee](https://docs.peewee-orm.com/) to interact with [TiDB Vector Search](/ai/concepts/vector-search-overview.md), store embeddings, and perform vector search queries. > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites @@ -35,26 +21,14 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Run the sample app -You can quickly learn about how to integrate TiDB Vector Search with peewee by following the steps below. +You can quickly learn how to integrate TiDB Vector Search with peewee by following the steps below. ### Step 1. Clone the repository @@ -139,7 +113,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
For a TiDB Self-Managed cluster, create a `.env` file in the root directory of your Python project. Copy the following content into the `.env` file, and modify the environment variable values according to the connection parameters of your TiDB cluster: @@ -194,7 +168,7 @@ You can refer to the following sample code snippets to develop your application. ### Create vector tables -#### Connect to TiDB cluster +#### Connect to a TiDB cluster ```python import os @@ -274,5 +248,5 @@ results = Document.select(Document, distance).where(distance_expression < 0.2).o ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integrate-with-sqlalchemy.md b/ai/integrations/vector-search-integrate-with-sqlalchemy.md similarity index 75% rename from vector-search/vector-search-integrate-with-sqlalchemy.md rename to ai/integrations/vector-search-integrate-with-sqlalchemy.md index 214dcb3232752..0432a3d9b844b 100644 --- a/vector-search/vector-search-integrate-with-sqlalchemy.md +++ b/ai/integrations/vector-search-integrate-with-sqlalchemy.md @@ -1,31 +1,17 @@ --- title: Integrate TiDB Vector Search with SQLAlchemy summary: Learn how to integrate TiDB Vector Search with SQLAlchemy to store embeddings and perform semantic searches. +aliases: ['/tidb/stable/vector-search-integrate-with-sqlalchemy/','/tidb/dev/vector-search-integrate-with-sqlalchemy/','/tidbcloud/vector-search-integrate-with-sqlalchemy/'] --- # Integrate TiDB Vector Search with SQLAlchemy -This tutorial walks you through how to use [SQLAlchemy](https://www.sqlalchemy.org/) to interact with [TiDB Vector Search](/vector-search/vector-search-overview.md), store embeddings, and perform vector search queries. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +This tutorial walks you through how to use [SQLAlchemy](https://www.sqlalchemy.org/) to interact with [TiDB Vector Search](/ai/concepts/vector-search-overview.md), store embeddings, and perform vector search queries. > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites @@ -35,26 +21,14 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Run the sample app -You can quickly learn about how to integrate TiDB Vector Search with SQLAlchemy by following the steps below. +You can quickly learn how to integrate TiDB Vector Search with SQLAlchemy by following the steps below. ### Step 1. Clone the repository @@ -127,7 +101,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
For a TiDB Self-Managed cluster, create a `.env` file in the root directory of your Python project. Copy the following content into the `.env` file, and modify the environment variable values according to the connection parameters of your TiDB cluster: @@ -179,7 +153,7 @@ You can refer to the following sample code snippets to develop your application. ### Create vector tables -#### Connect to TiDB cluster +#### Connect to a TiDB cluster ```python import os @@ -245,5 +219,5 @@ with Session(engine) as session: ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/vector-search/vector-search-integration-overview.md b/ai/integrations/vector-search-integration-overview.md similarity index 50% rename from vector-search/vector-search-integration-overview.md rename to ai/integrations/vector-search-integration-overview.md index b14a9b512a074..1eafb078f0fbb 100644 --- a/vector-search/vector-search-integration-overview.md +++ b/ai/integrations/vector-search-integration-overview.md @@ -1,31 +1,17 @@ --- title: Vector Search Integration Overview summary: An overview of TiDB vector search integration, including supported AI frameworks, embedding models, and ORM libraries. +aliases: ['/tidb/stable/vector-search-integration-overview/','/tidb/dev/vector-search-integration-overview/','/tidbcloud/vector-search-integration-overview/'] --- # Vector Search Integration Overview This document provides an overview of TiDB vector search integration, including supported AI frameworks, embedding models, and Object Relational Mapping (ORM) libraries. - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## AI frameworks @@ -33,22 +19,22 @@ TiDB provides official support for the following AI frameworks, enabling you to | AI frameworks | Tutorial | |---------------|---------------------------------------------------------------------------------------------------| -| Langchain | [Integrate Vector Search with LangChain](/vector-search/vector-search-integrate-with-langchain.md) | -| LlamaIndex | [Integrate Vector Search with LlamaIndex](/vector-search/vector-search-integrate-with-llamaindex.md) | +| LangChain | [Integrate Vector Search with LangChain](/ai/integrations/vector-search-integrate-with-langchain.md) | +| LlamaIndex | [Integrate Vector Search with LlamaIndex](/ai/integrations/vector-search-integrate-with-llamaindex.md) | -Moreover, you can also use TiDB for various purposes, such as document storage and knowledge graph storage for AI applications. +You can also use TiDB for various tasks such as document storage and knowledge graph storage for AI applications. ## Embedding models and services TiDB Vector Search supports storing vectors of up to 16383 dimensions, which accommodates most embedding models. -You can either use self-deployed open-source embedding models or third-party embedding APIs provided by third-party embedding providers to generate vectors. +You can use either self-deployed open-source embedding models or third-party embedding APIs to generate vectors. The following table lists some mainstream embedding service providers and the corresponding integration tutorials. | Embedding service providers | Tutorial | |-----------------------------|---------------------------------------------------------------------------------------------------------------------| -| Jina AI | [Integrate Vector Search with Jina AI Embeddings API](/vector-search/vector-search-integrate-with-jinaai-embedding.md) | +| Jina AI | [Integrate Vector Search with Jina AI Embeddings API](/ai/integrations/vector-search-integrate-with-jinaai-embedding.md) | ## Object Relational Mapping (ORM) libraries @@ -58,7 +44,7 @@ The following table lists the supported ORM libraries and the corresponding inte | Language | ORM/Client | How to install | Tutorial | |----------|--------------------|-----------------------------------|----------| -| Python | TiDB Vector Client | `pip install tidb-vector[client]` | [Get Started with Vector Search Using Python](/vector-search/vector-search-get-started-using-python.md) | -| Python | SQLAlchemy | `pip install tidb-vector` | [Integrate TiDB Vector Search with SQLAlchemy](/vector-search/vector-search-integrate-with-sqlalchemy.md) -| Python | peewee | `pip install tidb-vector` | [Integrate TiDB Vector Search with peewee](/vector-search/vector-search-integrate-with-peewee.md) | -| Python | Django | `pip install django-tidb[vector]` | [Integrate TiDB Vector Search with Django](/vector-search/vector-search-integrate-with-django-orm.md) \ No newline at end of file +| Python | TiDB Vector Client | `pip install tidb-vector[client]` | [Get Started with Vector Search Using Python](/ai/quickstart-via-python.md) | +| Python | SQLAlchemy | `pip install tidb-vector` | [Integrate TiDB Vector Search with SQLAlchemy](/ai/integrations/vector-search-integrate-with-sqlalchemy.md) +| Python | peewee | `pip install tidb-vector` | [Integrate TiDB Vector Search with peewee](/ai/integrations/vector-search-integrate-with-peewee.md) | +| Python | Django | `pip install django-tidb[vector]` | [Integrate TiDB Vector Search with Django](/ai/integrations/vector-search-integrate-with-django-orm.md) \ No newline at end of file diff --git a/ai/quickstart-via-python.md b/ai/quickstart-via-python.md new file mode 100644 index 0000000000000..a6edc20cdca37 --- /dev/null +++ b/ai/quickstart-via-python.md @@ -0,0 +1,245 @@ +--- +title: Get Started with TiDB + AI via Python +summary: Learn how to get started with vector search in TiDB using Python SDK. +aliases: ['/tidb/stable/vector-search-get-started-using-python/','/tidb/dev/vector-search-get-started-using-python/','/tidbcloud/vector-search-get-started-using-python/'] +--- + +# Get Started with TiDB + AI via Python + +This document demonstrates how to get started with [Vector Search](/ai/concepts/vector-search-overview.md) in TiDB using Python SDK. Follow along to build your first AI application working with TiDB. + +By following this document, you will learn how to: + +- Connect to TiDB using the TiDB Python SDK. +- Generate text embeddings with popular embedding models. +- Store vectors in TiDB tables. +- Perform semantic search using vector similarity. + +> **Note:** +> +> - The vector search feature is in beta and might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). + +## Prerequisites + +- Go to [tidbcloud.com](https://tidbcloud.com/) to create a TiDB Cloud Starter cluster for free or using [tiup playground](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb/#deploy-a-local-test-cluster) to deploy a TiDB Self-Managed cluster for local testing. + +## Installation + +[pytidb](https://github.com/pingcap/pytidb) is the official Python SDK for TiDB, designed to help developers build AI applications efficiently. + +To install the Python SDK, run the following command: + +```bash +pip install pytidb +``` + +To use built-in embedding function, install the `models` extension (alternative): + +```bash +pip install "pytidb[models]" +``` + +## Connect to database + + +
+ +You can get these connection parameters from the [TiDB Cloud console](https://tidbcloud.com/clusters): + +1. Navigate to the [Clusters page](https://tidbcloud.com/clusters), and then click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed, with connection parameters listed. + +For example, if the connection parameters are displayed as follows: + +```text +HOST: gateway01.us-east-1.prod.shared.aws.tidbcloud.com +PORT: 4000 +USERNAME: 4EfqPF23YKBxaQb.root +PASSWORD: abcd1234 +DATABASE: test +CA: /etc/ssl/cert.pem +``` + +The corresponding Python code to connect to the TiDB Cloud Starter cluster would be as follows: + +```python +from pytidb import TiDBClient + +client = TiDBClient.connect( + host="gateway01.us-east-1.prod.shared.aws.tidbcloud.com", + port=4000, + username="4EfqPF23YKBxaQb.root", + password="abcd1234", + database="test", +) +``` + +> **Note:** +> +> The preceding example is for demonstration purposes only. You need to fill in the parameters with your own values and keep them secure. + +
+
+ +Here is a basic example for connecting to a self-managed TiDB cluster: + +```python +from pytidb import TiDBClient + +client = TiDBClient.connect( + host="localhost", + port=4000, + username="root", + password="", + database="test", + ensure_db=True, +) +``` + +> **Note:** +> +> Make sure to update the connection parameters according to your actual deployment. + +
+
+ +Once connected, you can use the `client` object to operate tables, query data, and more. + +## Create an embedding function + +When working with [embedding models](/ai/concepts/vector-search-overview.md#embedding-model), you can leverage the embedding function to automatically vectorize your data at both insertion and query stages. It natively supports popular embedding models like OpenAI, Jina AI, Hugging Face, Sentence Transformers, and others. + + +
+ +Go to [OpenAI platform](https://platform.openai.com/api-keys) to create your API key for embedding. + +```python +from pytidb.embeddings import EmbeddingFunction + +text_embed = EmbeddingFunction( + model_name="openai/text-embedding-3-small", + api_key="", +) +``` + +
+
+ +Go to [Jina AI](https://jina.ai/embeddings/) to create your API key for embedding. + +```python +from pytidb.embeddings import EmbeddingFunction + +text_embed = EmbeddingFunction( + model_name="jina/jina-embeddings-v3", + api_key="", +) +``` + +
+
+ +## Create a table + +As an example, create a table named `chunks` with the following columns: + +- `id` (int): the ID of the chunk. +- `text` (text): the text content of the chunk. +- `text_vec` (vector): the vector embeddings of the text. +- `user_id` (int): the ID of the user who created the chunk. + +```python hl_lines="6" +from pytidb.schema import TableModel, Field, VectorField + +class Chunk(TableModel): + id: int | None = Field(default=None, primary_key=True) + text: str = Field() + text_vec: list[float] = text_embed.VectorField(source_field="text") + user_id: int = Field() + +table = client.create_table(schema=Chunk, if_exists="overwrite") +``` + +Once created, you can use the `table` object to insert data, search data, and more. + +## Insert Data + +Now let's add some sample data to our table. + +```python +table.bulk_insert([ + # 👇 The text will be automatically embedded and populated into the `text_vec` field. + Chunk(text="PyTiDB is a Python library for developers to connect to TiDB.", user_id=2), + Chunk(text="LlamaIndex is a framework for building AI applications.", user_id=2), + Chunk(text="OpenAI is a company and platform that provides AI models service and tools.", user_id=3), +]) +``` + +## Search for nearest neighbors + +To search for nearest neighbors of a given query, you can use the `table.search()` method. This method performs a [vector search](/ai/guides/vector-search.md) by default. + +```python +table.search( + # 👇 Pass the query text directly, it will be embedded to a query vector automatically. + "A library for my artificial intelligence software" +) +.limit(3).to_list() +``` + +In this example, vector search compares the query vector with the stored vectors in the `text_vec` field of the `chunks` table and returns the top 3 most semantically relevant results based on similarity scores. + +The closer `_distance` means the more similar the two vectors are. + +```json title="Expected output" +[ + { + 'id': 2, + 'text': 'LlamaIndex is a framework for building AI applications.', + 'text_vec': [...], + 'user_id': 2, + '_distance': 0.5719928358786761, + '_score': 0.4280071641213239 + }, + { + 'id': 3, + 'text': 'OpenAI is a company and platform that provides AI models service and tools.', + 'text_vec': [...], + 'user_id': 3, + '_distance': 0.603133726213383, + '_score': 0.396866273786617 + }, + { + 'id': 1, + 'text': 'PyTiDB is a Python library for developers to connect to TiDB.', + 'text_vec': [...], + 'user_id': 2, + '_distance': 0.6202191842385758, + '_score': 0.3797808157614242 + } +] +``` + +## Delete data + +To delete a specific row from the table, you can use the `table.delete()` method: + +```python +table.delete({ + "id": 1 +}) +``` + +## Drop table + +When you no longer need a table, you can drop it using the `client.drop_table()` method: + +```python +client.drop_table("chunks") +``` + +## Next steps + +- Learn more details about [Vector Search](/ai/guides/vector-search.md), [Full-Text Search](/ai/guides/vector-search-full-text-search-python.md) and [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) in TiDB. diff --git a/vector-search/vector-search-get-started-using-sql.md b/ai/quickstart-via-sql.md similarity index 65% rename from vector-search/vector-search-get-started-using-sql.md rename to ai/quickstart-via-sql.md index 94d494f5c83f3..eefe644429ead 100644 --- a/vector-search/vector-search-get-started-using-sql.md +++ b/ai/quickstart-via-sql.md @@ -1,62 +1,36 @@ --- -title: Get Started with Vector Search via SQL +title: Get Started with TiDB + AI via SQL summary: Learn how to quickly get started with Vector Search in TiDB using SQL statements to power your generative AI applications. +aliases: ['/tidb/stable/vector-search-get-started-using-sql/','/tidb/dev/vector-search-get-started-using-sql/','/tidbcloud/vector-search-get-started-using-sql/'] --- -# Get Started with Vector Search via SQL +# Get Started with TiDB + AI via SQL -TiDB extends MySQL syntax to support [Vector Search](/vector-search/vector-search-overview.md) and introduce new [Vector data types](/vector-search/vector-search-data-types.md) and several [vector functions](/vector-search/vector-search-functions-and-operators.md). +TiDB extends MySQL syntax to support [Vector Search](/ai/concepts/vector-search-overview.md) and introduce new [Vector data types](/ai/reference/vector-search-data-types.md) and several [vector functions](/ai/reference/vector-search-functions-and-operators.md). -This tutorial demonstrates how to get started with TiDB Vector Search just using SQL statements. You will learn how to use the [MySQL command-line client](https://dev.mysql.com/doc/refman/8.4/en/mysql.html) to complete the following operations: +This document demonstrates how to get started with TiDB Vector Search just using SQL statements. You will learn how to use the [MySQL command-line client](https://dev.mysql.com/doc/refman/8.4/en/mysql.html) to complete the following operations: - Connect to your TiDB cluster. - Create a vector table. - Store vector embeddings. - Perform vector search queries. - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta and might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites -To complete this tutorial, you need: +To complete steps in this document, you need: - [MySQL command-line client](https://dev.mysql.com/doc/refman/8.4/en/mysql.html) (MySQL CLI) installed on your machine. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Get started @@ -82,7 +56,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele ```
-
+
After your TiDB Self-Managed cluster is started, execute your cluster connection command in the terminal. @@ -98,7 +72,7 @@ mysql --comments --host 127.0.0.1 --port 4000 -u root ### Step 2. Create a vector table -When creating a table, you can define a column as a [vector](/vector-search/vector-search-overview.md#vector-embedding) column by specifying the `VECTOR` data type. +When creating a table, you can define a column as a [vector](/ai/concepts/vector-search-overview.md#vector-embedding) column by specifying the `VECTOR` data type. For example, to create a table `embedded_documents` with a three-dimensional `VECTOR` column, execute the following SQL statements using your MySQL CLI: @@ -121,7 +95,7 @@ Query OK, 0 rows affected (0.27 sec) ### Step 3. Insert vector embeddings to the table -Insert three documents with their [vector embeddings](/vector-search/vector-search-overview.md#vector-embedding) into the `embedded_documents` table: +Insert three documents with their [vector embeddings](/ai/concepts/vector-search-overview.md#vector-embedding) into the `embedded_documents` table: ```sql INSERT INTO embedded_documents @@ -142,7 +116,7 @@ Records: 3 Duplicates: 0 Warnings: 0 > > This example simplifies the dimensions of the vector embeddings and uses only 3-dimensional vectors for demonstration purposes. > -> In real-world applications, [embedding models](/vector-search/vector-search-overview.md#embedding-model) often produce vector embeddings with hundreds or thousands of dimensions. +> In real-world applications, [embedding models](/ai/concepts/vector-search-overview.md#embedding-model) often produce vector embeddings with hundreds or thousands of dimensions. ### Step 4. Query the vector table @@ -199,5 +173,5 @@ Therefore, according to the output, the swimming animal is most likely a fish, o ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/tidb-cloud/vector-search-changelogs.md b/ai/reference/vector-search-changelogs.md similarity index 91% rename from tidb-cloud/vector-search-changelogs.md rename to ai/reference/vector-search-changelogs.md index 98c18b5252e25..8a7dc6a225072 100644 --- a/tidb-cloud/vector-search-changelogs.md +++ b/ai/reference/vector-search-changelogs.md @@ -1,6 +1,7 @@ --- title: Vector Search Changelogs summary: Learn about the new features, compatibility changes, improvements, and bug fixes for the TiDB vector search feature. +aliases: ['/tidbcloud/vector-search-changelogs/'] --- # Vector Search Changelogs diff --git a/vector-search/vector-search-data-types.md b/ai/reference/vector-search-data-types.md similarity index 82% rename from vector-search/vector-search-data-types.md rename to ai/reference/vector-search-data-types.md index 208581aa0a4af..16804a0257ea5 100644 --- a/vector-search/vector-search-data-types.md +++ b/ai/reference/vector-search-data-types.md @@ -1,31 +1,17 @@ --- title: Vector Data Types summary: Learn about the Vector data types in TiDB. +aliases: ['/tidb/stable/vector-search-data-types/','/tidb/dev/vector-search-data-types/','/tidbcloud/vector-search-data-types/'] --- # Vector Data Types A vector is a sequence of floating-point numbers, such as `[0.3, 0.5, -0.1, ...]`. TiDB offers Vector data types, specifically optimized for efficiently storing and querying vector embeddings widely used in AI applications. - - -> **Warning:** -> -> This feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> This feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> Vector data types are available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - Vector data types are in beta and might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - Vector data types are available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). The following Vector data types are currently available: @@ -34,7 +20,7 @@ The following Vector data types are currently available: Using vector data types provides the following advantages over using the [`JSON`](/data-type-json.md) type: -- Vector index support: You can build a [vector search index](/vector-search/vector-search-index.md) to speed up vector searching. +- Vector index support: You can build a [vector search index](/ai/reference/vector-search-index.md) to speed up vector searching. - Dimension enforcement: You can specify a dimension to forbid inserting vectors with different dimensions. - Optimized storage format: Vector data types are optimized for handling vector data, offering better space efficiency and performance compared to `JSON` types. @@ -73,9 +59,9 @@ In the following example, because dimension `3` is enforced for the `embedding` ERROR 1105 (HY000): vector has 2 dimensions, does not fit VECTOR(3) ``` -For available functions and operators over the vector data types, see [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md). +For available functions and operators over the vector data types, see [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md). -For more information about building and using a vector search index, see [Vector Search Index](/vector-search/vector-search-index.md). +For more information about building and using a vector search index, see [Vector Search Index](/ai/reference/vector-search-index.md). ## Store vectors with different dimensions @@ -91,11 +77,11 @@ INSERT INTO vector_table VALUES (1, '[0.3, 0.5, -0.1]'); -- 3 dimensions vector, INSERT INTO vector_table VALUES (2, '[0.3, 0.5]'); -- 2 dimensions vector, OK ``` -However, note that you cannot build a [vector search index](/vector-search/vector-search-index.md) for this column, as vector distances can be only calculated between vectors with the same dimensions. +However, note that you cannot build a [vector search index](/ai/reference/vector-search-index.md) for this column, as vector distances can be only calculated between vectors with the same dimensions. ## Comparison -You can compare vector data types using [comparison operators](/functions-and-operators/operators.md) such as `=`, `!=`, `<`, `>`, `<=`, and `>=`. For a complete list of comparison operators and functions for vector data types, see [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md). +You can compare vector data types using [comparison operators](/functions-and-operators/operators.md) such as `=`, `!=`, `<`, `>`, `<=`, and `>=`. For a complete list of comparison operators and functions for vector data types, see [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md). Vector data types are compared element-wise numerically. For example: @@ -239,7 +225,7 @@ You can also explicitly cast a vector to its string representation. Take using t 1 row in set (0.01 sec) ``` -For additional cast functions, see [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md). +For additional cast functions, see [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md). ### Cast between Vector ⇔ other data types @@ -249,7 +235,7 @@ Note that vector data type columns stored in a table cannot be converted to othe ## Restrictions -For restrictions on vector data types, see [Vector search limitations](/vector-search/vector-search-limitations.md) and [Vector index restrictions](/vector-search/vector-search-index.md#restrictions). +For restrictions on vector data types, see [Vector search limitations](/ai/reference/vector-search-limitations.md) and [Vector index restrictions](/ai/reference/vector-search-index.md#restrictions). ## MySQL compatibility @@ -257,6 +243,6 @@ Vector data types are TiDB specific, and are not supported in MySQL. ## See also -- [Vector Functions and Operators](/vector-search/vector-search-functions-and-operators.md) -- [Vector Search Index](/vector-search/vector-search-index.md) -- [Improve Vector Search Performance](/vector-search/vector-search-improve-performance.md) \ No newline at end of file +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) +- [Improve Vector Search Performance](/ai/reference/vector-search-improve-performance.md) diff --git a/vector-search/vector-search-functions-and-operators.md b/ai/reference/vector-search-functions-and-operators.md similarity index 89% rename from vector-search/vector-search-functions-and-operators.md rename to ai/reference/vector-search-functions-and-operators.md index 44fb7cff34d5d..4ecd1e840b88e 100644 --- a/vector-search/vector-search-functions-and-operators.md +++ b/ai/reference/vector-search-functions-and-operators.md @@ -1,39 +1,25 @@ --- title: Vector Functions and Operators summary: Learn about functions and operators available for Vector data types. +aliases: ['/tidb/stable/vector-search-functions-and-operators/','/tidb/dev/vector-search-functions-and-operators/','/tidbcloud/vector-search-functions-and-operators/'] --- # Vector Functions and Operators This document lists the functions and operators available for Vector data types. - - -> **Warning:** -> -> This feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> This feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> Vector data types and these vector functions are available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - Vector functions and operators are in beta and might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - Vector data types and these vector functions are available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Vector functions -The following functions are designed specifically for [Vector data types](/vector-search/vector-search-data-types.md). +The following functions are designed specifically for [Vector data types](/ai/reference/vector-search-data-types.md). **Vector distance functions:** -| Function name | Description | Supported by [vector index](/vector-search/vector-search-index.md#vector-search-index) | +| Function name | Description | Supported by [vector index](/ai/reference/vector-search-index.md) | | ----------------------------------------------------------- | ---------------------------------------------------------------- |---------------------------| | [`VEC_L2_DISTANCE`](#vec_l2_distance) | Calculates L2 distance (Euclidean distance) between two vectors | Yes | | [`VEC_COSINE_DISTANCE`](#vec_cosine_distance) | Calculates the cosine distance between two vectors | Yes | @@ -51,7 +37,7 @@ The following functions are designed specifically for [Vector data types](/vecto ## Extended built-in functions and operators -The following built-in functions and operators are extended to support operations on [Vector data types](/vector-search/vector-search-data-types.md). +The following built-in functions and operators are extended to support operations on [Vector data types](/ai/reference/vector-search-data-types.md). **Arithmetic operators:** @@ -60,7 +46,7 @@ The following built-in functions and operators are extended to support operation | [`+`](https://dev.mysql.com/doc/refman/8.0/en/arithmetic-functions.html#operator_plus) | Vector element-wise addition operator | | [`-`](https://dev.mysql.com/doc/refman/8.0/en/arithmetic-functions.html#operator_minus) | Vector element-wise subtraction operator | -For more information about how vector arithmetic works, see [Vector Data Type | Arithmetic](/vector-search/vector-search-data-types.md#arithmetic). +For more information about how vector arithmetic works, see [Vector Data Type | Arithmetic](/ai/reference/vector-search-data-types.md#arithmetic). **Aggregate (GROUP BY) functions:** @@ -92,7 +78,7 @@ For more information about how vector arithmetic works, see [Vector Data Type | | [`!=`, `<>`](https://dev.mysql.com/doc/refman/8.0/en/comparison-operators.html#operator_not-equal) | Not equal operator | | [`NOT IN()`](https://dev.mysql.com/doc/refman/8.0/en/comparison-operators.html#operator_not-in) | Check whether a value is not within a set of values | -For more information about how vectors are compared, see [Vector Data Type | Comparison](/vector-search/vector-search-data-types.md#comparison). +For more information about how vectors are compared, see [Vector Data Type | Comparison](/ai/reference/vector-search-data-types.md#comparison). **Control flow functions:** @@ -110,7 +96,7 @@ For more information about how vectors are compared, see [Vector Data Type | Com | [`CAST()`](https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast) | Cast a value as a string or vector | | [`CONVERT()`](https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_convert) | Cast a value as a string | -For more information about how to use `CAST()`, see [Vector Data Type | Cast](/vector-search/vector-search-data-types.md#cast). +For more information about how to use `CAST()`, see [Vector Data Type | Cast](/ai/reference/vector-search-data-types.md#cast). ## Full references @@ -328,4 +314,4 @@ The vector functions and the extended usage of built-in functions and operators ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) diff --git a/vector-search/vector-search-improve-performance.md b/ai/reference/vector-search-improve-performance.md similarity index 59% rename from vector-search/vector-search-improve-performance.md rename to ai/reference/vector-search-improve-performance.md index 3ccc729b4f69b..324bf44e595fe 100644 --- a/vector-search/vector-search-improve-performance.md +++ b/ai/reference/vector-search-improve-performance.md @@ -1,39 +1,25 @@ --- title: Improve Vector Search Performance summary: Learn best practices for improving the performance of TiDB Vector Search. +aliases: ['/tidb/stable/vector-search-improve-performance/','/tidb/dev/vector-search-improve-performance/','/tidbcloud/vector-search-improve-performance/'] --- # Improve Vector Search Performance TiDB Vector Search enables you to perform Approximate Nearest Neighbor (ANN) queries that search for results similar to an image, document, or other input. To improve the query performance, review the following best practices. - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Add vector search index for vector columns -The [vector search index](/vector-search/vector-search-index.md) dramatically improves the performance of vector search queries, usually by 10x or more, with a trade-off of only a small decrease of recall rate. +The [vector search index](/ai/reference/vector-search-index.md) dramatically improves the performance of vector search queries, usually by 10x or more, with a trade-off of only a small decrease of recall rate. ## Ensure vector indexes are fully built -After you insert a large volume of vector data, some of it might be in the Delta layer waiting for persistence. The vector index for such data will be built after the data is persisted. Until all vector data is indexed, vector search performance is suboptimal. To check the index build progress, see [View index build progress](/vector-search/vector-search-index.md#view-index-build-progress). +After you insert a large volume of vector data, some of the data might be in the Delta layer waiting for persistence. TiDB builds the vector index for such data after the data is persisted. Until all vector data is indexed, vector search performance is suboptimal. To check the index build progress, see [View index build progress](/ai/reference/vector-search-index.md#view-index-build-progress). ## Reduce vector dimensions or shorten embeddings diff --git a/vector-search/vector-search-index.md b/ai/reference/vector-search-index.md similarity index 89% rename from vector-search/vector-search-index.md rename to ai/reference/vector-search-index.md index 5cae74e4a1fb6..0e1b2c0378a58 100644 --- a/vector-search/vector-search-index.md +++ b/ai/reference/vector-search-index.md @@ -1,33 +1,19 @@ --- title: Vector Search Index summary: Learn how to build and use the vector search index to accelerate K-Nearest neighbors (KNN) queries in TiDB. +aliases: ['/tidb/stable/vector-search-index/','/tidb/dev/vector-search-index/','/tidbcloud/vector-search-index/'] --- # Vector Search Index -As described in the [Vector Search](/vector-search/vector-search-overview.md) document, vector search identifies the Top K-Nearest Neighbors (KNN) to a given vector by calculating the distance between the given vector and all vectors stored in the database. While this approach provides accurate results, it can be slow when the table contains a large number of vectors because it involves a full table scan. [^1] +As described in the [Vector Search](/ai/concepts/vector-search-overview.md) document, vector search identifies the Top K-Nearest Neighbors (KNN) to a given vector by calculating the distance between the given vector and all vectors stored in the database. While this approach provides accurate results, it can be slow when the table contains a large number of vectors because it involves a full table scan. [^1] To improve search efficiency, you can create vector search indexes in TiDB for approximate KNN (ANN) search. When using vector indexes for vector search, TiDB can greatly improve query performance with only a slight reduction in accuracy, generally maintaining a search recall rate above 90%. - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). Currently, TiDB supports the [HNSW (Hierarchical Navigable Small World)](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world) vector search index algorithm. @@ -41,13 +27,13 @@ Currently, TiDB supports the [HNSW (Hierarchical Navigable Small World)](https:/ - Directly dropping columns with vector search indexes is not supported. You can drop such a column by first dropping the vector search index on that column and then dropping the column itself. - Modifying the type of a column with a vector index is not supported. - Setting vector search indexes as [invisible](/sql-statements/sql-statement-alter-index.md) is not supported. -- Building vector search indexes on TiFlash nodes with [encryption at rest](https://docs.pingcap.com/tidb/stable/encryption-at-rest) enabled is not supported. +- Building vector search indexes on TiFlash nodes with [encryption at rest](/encryption-at-rest.md) enabled is not supported. ## Create the HNSW vector index [HNSW](https://en.wikipedia.org/wiki/Hierarchical_navigable_small_world) is one of the most popular vector indexing algorithms. The HNSW index provides good performance with relatively high accuracy, up to 98% in specific cases. -In TiDB, you can create an HNSW index for a column with a [vector data type](/vector-search/vector-search-data-types.md) in either of the following ways: +In TiDB, you can create an HNSW index for a column with a [vector data type](/ai/reference/vector-search-data-types.md) in either of the following ways: - When creating a table, use the following syntax to specify the vector column for the HNSW index: @@ -267,7 +253,7 @@ See [`EXPLAIN`](/sql-statements/sql-statement-explain.md), [`EXPLAIN ANALYZE`](/ ## See also -- [Improve Vector Search Performance](/vector-search/vector-search-improve-performance.md) -- [Vector Data Types](/vector-search/vector-search-data-types.md) +- [Improve Vector Search Performance](/ai/reference/vector-search-improve-performance.md) +- [Vector Data Types](/ai/reference/vector-search-data-types.md) [^1]: The explanation of KNN search is adapted from the [Approximate Nearest Neighbor Search Indexes](https://github.com/ClickHouse/ClickHouse/pull/50661/files#diff-7ebd9e71df96e74230c9a7e604fa7cb443be69ba5e23bf733fcecd4cc51b7576) document authored by [rschu1ze](https://github.com/rschu1ze) in ClickHouse documentation, licensed under the Apache License 2.0. diff --git a/ai/reference/vector-search-limitations.md b/ai/reference/vector-search-limitations.md new file mode 100644 index 0000000000000..56b6873318f73 --- /dev/null +++ b/ai/reference/vector-search-limitations.md @@ -0,0 +1,50 @@ +--- +title: Vector Search Limitations +summary: Learn the limitations of the TiDB vector search. +aliases: ['/tidb/stable/vector-search-limitations/','/tidb/dev/vector-search-limitations/','/tidbcloud/vector-search-limitations/'] +--- + +# Vector Search Limitations + +This document describes the known limitations of TiDB vector search. + +> **Note:** +> +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). + +## Vector data type limitations + +- Each [vector](/ai/reference/vector-search-data-types.md) supports up to 16383 dimensions. +- Vector data types cannot store `NaN`, `Infinity`, or `-Infinity` values. +- Vector data types cannot store double-precision floating-point numbers. If you insert or store double-precision floating-point numbers in vector columns, TiDB converts them to single-precision floating-point numbers. +- Vector columns cannot be used as primary keys or as part of a primary key. +- Vector columns cannot be used as unique indexes or as part of a unique index. +- Vector columns cannot be used as partition keys or as part of a partition key. +- Currently, TiDB does not support modifying a vector column to other data types (such as `JSON` and `VARCHAR`). + +## Vector index limitations + +See [Vector search restrictions](/ai/reference/vector-search-index.md#restrictions). + +## Compatibility with TiDB tools + +When using vector search, note the following compatibility issues: + +- TiDB Cloud features: + + - The [Data Migration feature in the TiDB Cloud console](/tidb-cloud/migrate-from-mysql-using-data-migration.md) does not support migrating or replicating MySQL vector data types to TiDB Cloud. + +- TiDB Self-Managed tools: + + - Make sure that you are using v8.4.0 or a later version of [BR](/br/backup-and-restore-overview.md) to back up and restore data. Restoring tables with vector data types to TiDB clusters earlier than v8.4.0 is not supported. + - [TiDB Data Migration (DM)](/dm/dm-overview.md) does not support migrating or replicating MySQL vector data types to TiDB. + - When [TiCDC](/ticdc/ticdc-overview.md) replicates vector data to a downstream that does not support vector data types, it will change the vector data types to another type. For more information, see [Compatibility with vector data types](/ticdc/ticdc-compatibility.md#compatibility-with-vector-data-types). + +## Feedback + +We value your feedback and are always here to help: + +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/vector-search/vector-search-get-started-using-python.md b/ai/vector-search-get-started-using-python.md similarity index 77% rename from vector-search/vector-search-get-started-using-python.md rename to ai/vector-search-get-started-using-python.md index 9c6e6284c2105..df34d7b755107 100644 --- a/vector-search/vector-search-get-started-using-python.md +++ b/ai/vector-search-get-started-using-python.md @@ -1,33 +1,19 @@ --- title: Get Started with TiDB + AI via Python summary: Learn how to quickly develop an AI application that performs semantic search using Python and TiDB Vector Search. +aliases: ['/tidb/stable/vector-search-get-started-using-python/','/tidb/dev/vector-search-get-started-using-python/','/tidbcloud/vector-search-get-started-using-python/'] --- # Get Started with TiDB + AI via Python This tutorial demonstrates how to develop a simple AI application that provides **semantic search** features. Unlike traditional keyword search, semantic search intelligently understands the meaning behind your query and returns the most relevant result. For example, if you have documents titled "dog", "fish", and "tree", and you search for "a swimming animal", the application would identify "fish" as the most relevant result. -Throughout this tutorial, you will develop this AI application using [TiDB Vector Search](/vector-search/vector-search-overview.md), Python, [TiDB Vector SDK for Python](https://github.com/pingcap/tidb-vector-python), and AI models. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - +Throughout this tutorial, you will develop this AI application using [TiDB Vector Search](/ai/concepts/vector-search-overview.md), Python, [TiDB Vector SDK for Python](https://github.com/pingcap/tidb-vector-python), and AI models. > **Note:** > -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). +> - The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. +> - The vector search feature is available on [TiDB Self-Managed](/overview.md), [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter), [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential), and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). ## Prerequisites @@ -37,22 +23,10 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads) installed. - A TiDB cluster. - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. -- Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - - - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster of v8.4.0 or a later version. - - +- Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. ## Get started @@ -77,7 +51,7 @@ pip install sqlalchemy pymysql sentence-transformers tidb-vector python-dotenv ``` - `tidb-vector`: the Python client for interacting with TiDB vector search. -- [`sentence-transformers`](https://sbert.net): a Python library that provides pre-trained models for generating [vector embeddings](/vector-search/vector-search-overview.md#vector-embedding) from text. +- [`sentence-transformers`](https://sbert.net): a Python library that provides pre-trained models for generating [vector embeddings](/ai/concepts/vector-search-overview.md#vector-embedding) from text. ### Step 3. Configure the connection string to the TiDB cluster @@ -118,7 +92,7 @@ For a {{{ .starter }}} cluster, take the following steps to obtain the cluster c ```
-
+
For a TiDB Self-Managed cluster, create a `.env` file in the root directory of your Python project. Copy the following content into the `.env` file, and modify the environment variable values according to the connection parameters of your TiDB cluster: @@ -143,7 +117,7 @@ The following are descriptions for each parameter: ### Step 4. Initialize the embedding model -An [embedding model](/vector-search/vector-search-overview.md#embedding-model) transforms data into [vector embeddings](/vector-search/vector-search-overview.md#vector-embedding). This example uses the pre-trained model [**msmarco-MiniLM-L12-cos-v5**](https://huggingface.co/sentence-transformers/msmarco-MiniLM-L12-cos-v5) for text embedding. This lightweight model, provided by the `sentence-transformers` library, transforms text data into 384-dimensional vector embeddings. +An [embedding model](/ai/concepts/vector-search-overview.md#embedding-model) transforms data into [vector embeddings](/ai/concepts/vector-search-overview.md#vector-embedding). This example uses the pre-trained model [**msmarco-MiniLM-L12-cos-v5**](https://huggingface.co/sentence-transformers/msmarco-MiniLM-L12-cos-v5) for text embedding. This lightweight model, provided by the `sentence-transformers` library, transforms text data into 384-dimensional vector embeddings. To set up the model, copy the following code into the `example.py` file. This code initializes a `SentenceTransformer` instance and defines a `text_to_embedding()` function for later use. @@ -255,5 +229,5 @@ Therefore, according to the output, the swimming animal is most likely a fish, o ## See also -- [Vector Data Types](/vector-search/vector-search-data-types.md) -- [Vector Search Index](/vector-search/vector-search-index.md) \ No newline at end of file +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Search Index](/ai/reference/vector-search-index.md) diff --git a/api/_index.md b/api/_index.md new file mode 100644 index 0000000000000..f71ff5e65332c --- /dev/null +++ b/api/_index.md @@ -0,0 +1,30 @@ +--- +title: TiDB API Overview +summary: Learn about the APIs available for TiDB Cloud and TiDB Self-Managed. +aliases: ['/tidbcloud/api-overview/'] +--- + +# TiDB API Overview + +TiDB provides various APIs for querying and operating clusters, managing data replication, monitoring system status, and more. This document provides an overview of the available APIs for both [TiDB Cloud](https://docs.pingcap.com/tidbcloud/) and [TiDB Self-Managed](https://docs.pingcap.com/tidb/stable/). + +## TiDB Cloud API (beta) + +[TiDB Cloud API](/api/tidb-cloud-api-overview.md) is a [REST interface](https://en.wikipedia.org/wiki/Representational_state_transfer) that provides you with programmatic access to manage administrative objects within TiDB Cloud, such as projects, clusters, backups, restores, imports, billings, and Data Service resources. + +| API | Description | +| --- | --- | +| [v1beta1](/api/tidb-cloud-api-v1beta1.md) | Manage TiDB Cloud Starter, Essential, and Dedicated clusters, as well as billing, Data Service, and IAM resources. | +| [v1beta](/api/tidb-cloud-api-v1beta.md) | Manage projects, clusters, backups, imports, and restores for TiDB Cloud. | + +## TiDB Self-Managed API + +TiDB Self-Managed provides various APIs for TiDB tools to help you manage cluster components, monitor system status, and control data replication workflows. + +| API | Description | +| --- | --- | +| [TiProxy API](/tiproxy/tiproxy-api.md) | Access TiProxy configuration, health status, and monitoring data. | +| [Data Migration API](/dm/dm-open-api.md) | Manage DM-master and DM-worker nodes, data sources, and data replication tasks. | +| [Monitoring API](/tidb-monitoring-api.md) | Get TiDB server running status, table storage information, and TiKV cluster details. | +| [TiCDC API](/ticdc/ticdc-open-api-v2.md) | Query TiCDC node status and manage replication tasks, including creating, pausing, resuming, and updating operations. | +| [TiDB Operator API](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md) | Manage TiDB clusters on Kubernetes, including deployment, upgrades, scaling, backup, and failover. | diff --git a/api/dm-api-overview.md b/api/dm-api-overview.md new file mode 100644 index 0000000000000..db3a45dbf4c20 --- /dev/null +++ b/api/dm-api-overview.md @@ -0,0 +1,18 @@ +--- +title: Data Migration API Overview +summary: Learn the API of Data Migration (DM). +--- + +# Data Migration API Overview + +[TiDB Data Migration](/dm/dm-overview.md) (DM) is an integrated data migration task management platform that supports full data migration and incremental data replication from MySQL-compatible databases (such as MySQL, MariaDB, and Aurora MySQL) into TiDB. + +DM provides an OpenAPI for querying and operating the DM cluster, similar to the [dmctl tool](/dm/dmctl-introduction.md). + +You can use DM APIs to perform the following maintenance operations on the DM cluster: + +- [Cluster management](/dm/dm-open-api.md#apis-for-managing-clusters): Get information about or stop DM-master and DM-worker nodes. +- [Data source management](/dm/dm-open-api.md#apis-for-managing-data-sources): Create, update, delete, enable, or disable data sources, manage relay-log features, and change the bindings between your data source and DM-workers. +- [Replication task management](/dm/dm-open-api.md#apis-for-managing-replication-tasks): Create, update, delete, start, or stop replication tasks; manage schemas and migration rules. + +For more information about each API, including request parameters, response examples, and usage instructions, see [Maintain DM Clusters Using OpenAPI](/dm/dm-open-api.md). diff --git a/api/monitoring-api-overview.md b/api/monitoring-api-overview.md new file mode 100644 index 0000000000000..46983fc07730a --- /dev/null +++ b/api/monitoring-api-overview.md @@ -0,0 +1,15 @@ +--- +title: TiDB Monitoring API Overview +summary: Learn the API of TiDB monitoring services. +--- + +# TiDB Monitoring API Overview + +The TiDB monitoring framework uses two open-source projects: [Prometheus](https://prometheus.io) and [Grafana](https://grafana.com/grafana). TiDB uses Prometheus to store monitoring and performance metrics and Grafana to visualize these metrics. TiDB also provides the built-in [TiDB Dashboard](/dashboard/dashboard-intro.md) for monitoring and diagnosing TiDB clusters. + +You can use the following interfaces to monitor TiDB cluster status: + +- [Status interface](/tidb-monitoring-api.md#use-the-status-interface): monitor the [running status](/tidb-monitoring-api.md#running-status) of the current TiDB server and the [storage information](/tidb-monitoring-api.md#storage-information) of a table. +- [Metrics interface](/tidb-monitoring-api.md#use-the-metrics-interface): get detailed information about various operations in components and view these metrics using Grafana. + +For more information about each API, including request parameters, response examples, and usage instructions, see [TiDB Monitoring API](/tidb-monitoring-api.md). diff --git a/api/ticdc-api-overview.md b/api/ticdc-api-overview.md new file mode 100644 index 0000000000000..b25f6e5808e88 --- /dev/null +++ b/api/ticdc-api-overview.md @@ -0,0 +1,19 @@ +--- +title: TiCDC API Overview +summary: Learn the API of TiCDC. +--- + +# TiCDC API Overview + +[TiCDC](/ticdc/ticdc-overview.md) is a tool used to replicate incremental data from TiDB. Specifically, TiCDC pulls TiKV change logs, sorts captured data, and exports row-based incremental data to downstream databases. + +TiCDC provides the following two versions of APIs for querying and operating the TiCDC cluster: + +- [TiCDC OpenAPI v1](/ticdc/ticdc-open-api.md) +- [TiCDC OpenAPI v2](/ticdc/ticdc-open-api-v2.md) + +> **Note:** +> +> TiCDC OpenAPI v1 will be removed in the future. It is recommended to use TiCDC OpenAPI v2. + +For more information about each API, including request parameters, response examples, and usage instructions, see [TiCDC OpenAPI v1](/ticdc/ticdc-open-api.md) and [TiCDC OpenAPI v2](/ticdc/ticdc-open-api-v2.md). \ No newline at end of file diff --git a/api/tidb-cloud-api-overview.md b/api/tidb-cloud-api-overview.md new file mode 100644 index 0000000000000..a143bf587c295 --- /dev/null +++ b/api/tidb-cloud-api-overview.md @@ -0,0 +1,23 @@ +--- +title: TiDB Cloud API Overview +summary: Learn about what TiDB Cloud API is, its features, and how to use the API to manage your TiDB Cloud clusters. +--- + +# TiDB Cloud API Overview + +> **Note:** +> +> TiDB Cloud API is in beta. + +The TiDB Cloud API is a [REST interface](https://en.wikipedia.org/wiki/Representational_state_transfer) that provides you with programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can automatically and efficiently manage resources such as projects, clusters, backups, restores, imports, billings, and resources in the [Data Service](https://docs.pingcap.com/tidbcloud/data-service-overview). + +The API has the following features: + +- **JSON entities.** All entities are expressed in JSON. +- **HTTPS-only.** You can only access the API via HTTPS, ensuring all the data sent over the network is encrypted with TLS. +- **Key-based access and digest authentication.** Before you access the TiDB Cloud API, you must generate an API key. For more information, see [API Key Management](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication/API-key-management). All requests are authenticated through [HTTP Digest Authentication](https://en.wikipedia.org/wiki/Digest_access_authentication), ensuring the API key is never sent over the network. + +The TiDB Cloud API is available in two versions: + +- [v1beta1](/api/tidb-cloud-api-v1beta1.md): manage TiDB Cloud Starter, Essential, and Dedicated clusters, as well as billing, Data Service, and IAM resources. +- [v1beta](/api/tidb-cloud-api-v1beta.md): manage projects, clusters, backups, imports, and restores for TiDB Cloud. diff --git a/api/tidb-cloud-api-v1beta.md b/api/tidb-cloud-api-v1beta.md new file mode 100644 index 0000000000000..7afc9a05aff3e --- /dev/null +++ b/api/tidb-cloud-api-v1beta.md @@ -0,0 +1,16 @@ +--- +title: TiDB Cloud API v1beta Overview +summary: Learn about the v1beta API of TiDB Cloud. +--- + +# TiDB Cloud API v1beta Overview + +The [v1beta API](https://docs.pingcap.com/tidbcloud/api/v1beta) is a RESTful API that gives you programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can automatically and efficiently manage resources such as projects, clusters, backups, restores, and imports. + +Currently, you can use the following v1beta APIs to manage the resources in TiDB Cloud: + +- [Project](https://docs.pingcap.com/tidbcloud/api/v1beta/#tag/Project) +- [Cluster](https://docs.pingcap.com/tidbcloud/api/v1beta/#tag/Cluster) +- [Backup](https://docs.pingcap.com/tidbcloud/api/v1beta/#tag/Backup) +- [Import (Deprecated)](https://docs.pingcap.com/tidbcloud/api/v1beta/#tag/Import) +- [Restore](https://docs.pingcap.com/tidbcloud/api/v1beta/#tag/Restore) \ No newline at end of file diff --git a/api/tidb-cloud-api-v1beta1.md b/api/tidb-cloud-api-v1beta1.md new file mode 100644 index 0000000000000..d852357ec6d57 --- /dev/null +++ b/api/tidb-cloud-api-v1beta1.md @@ -0,0 +1,19 @@ +--- +title: TiDB Cloud API v1beta1 Overview +summary: Learn about the v1beta1 API of TiDB Cloud. +--- + +# TiDB Cloud API v1beta1 Overview + +The TiDB Cloud API v1beta1 is a RESTful API that gives you programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can automatically and efficiently manage cluster-level resources (such as clusters and branches) and organization- or project-level resources (such as billing, Data Service, and IAM). + +Currently, you can use the following v1beta1 APIs to manage the resources in TiDB Cloud: + +- Cluster-level resources: + - [TiDB Cloud Starter or Essential Cluster](https://docs.pingcap.com/tidbcloud/api/v1beta1/serverless): manage clusters, branches, data export tasks, and data import tasks for TiDB Cloud Starter or Essential clusters. + - [TiDB Cloud Dedicated Cluster](https://docs.pingcap.com/tidbcloud/api/v1beta1/dedicated): manage clusters, regions, private endpoint connections, and data import tasks for TiDB Cloud Dedicated clusters. +- Organization or project-level resources: + - [Billing](https://docs.pingcap.com/tidbcloud/api/v1beta1/billing): manage billing for TiDB Cloud clusters. + - [Data Service](https://docs.pingcap.com/tidbcloud/api/v1beta1/dataservice): manage resources in the Data Service for TiDB Cloud clusters. + - [IAM](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam): manage API keys for TiDB Cloud clusters. + - [MSP (Deprecated)](https://docs.pingcap.com/tidbcloud/api/v1beta1/msp) \ No newline at end of file diff --git a/api/tidb-operator-api-overview.md b/api/tidb-operator-api-overview.md new file mode 100644 index 0000000000000..34b760fa72f31 --- /dev/null +++ b/api/tidb-operator-api-overview.md @@ -0,0 +1,20 @@ +--- +title: TiDB Operator API Overview +summary: Learn the API of TiDB Operator. +--- + +# TiDB Operator API Overview + +[TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/) is an automatic operation system for TiDB clusters on Kubernetes. It provides full life-cycle management for TiDB including deployment, upgrades, scaling, backup, failover, and configuration changes. With TiDB Operator, TiDB can run seamlessly in the Kubernetes clusters deployed on a public or private cloud. + +To manage TiDB clusters on Kubernetes, you can use the following TiDB Operator APIs: + +- [Backup](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#backup) +- [BackupSchedule](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#backupschedule) +- [DMCluster](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#dmcluster) +- [Restore](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#restore) +- [TidbCluster](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#tidbcluster) +- [TidbInitializer](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#tidbinitializer) +- [TidbMonitor](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md#tidbmonitor) + +For more information, see [TiDB Operator API Document](https://github.com/pingcap/tidb-operator/blob/{{{.tidb-operator-version}}}/docs/api-references/docs.md). diff --git a/api/tiproxy-api-overview.md b/api/tiproxy-api-overview.md new file mode 100644 index 0000000000000..a295a39b6040a --- /dev/null +++ b/api/tiproxy-api-overview.md @@ -0,0 +1,19 @@ +--- +title: TiProxy API Overview +summary: Learn about the API for TiProxy. +--- + +# TiProxy API Overview + +[TiProxy](/tiproxy/tiproxy-overview.md) is the official proxy component of PingCAP. It is placed between the client and the TiDB server to provide load balancing, connection persistence, service discovery, and other features for TiDB. + +TiProxy is an optional component. You can also use a third-party proxy component or connect directly to the TiDB server without using a proxy. + +You can use TiProxy APIs to perform the following operations on the TiProxy cluster: + +- [Get TiProxy configuration](/tiproxy/tiproxy-api.md#get-tiproxy-configuration) +- [Set TiProxy configuration](/tiproxy/tiproxy-api.md#set-tiproxy-configuration) +- [Get TiProxy health status](/tiproxy/tiproxy-api.md#get-tiproxy-health-status) +- [Get TiProxy monitoring data](/tiproxy/tiproxy-api.md#get-tiproxy-monitoring-data) + +For more information about each API, including request parameters, response examples, and usage instructions, see [TiProxy API](/tiproxy/tiproxy-api.md). \ No newline at end of file diff --git a/basic-features.md b/basic-features.md index f38b70991e627..811dcb8bad812 100644 --- a/basic-features.md +++ b/basic-features.md @@ -28,7 +28,7 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u | [Date and time types](/data-type-date-and-time.md) | Y | Y | Y | Y | Y | Y | Y | | [String types](/data-type-string.md) | Y | Y | Y | Y | Y | Y | Y | | [JSON type](/data-type-json.md) | Y | Y | Y | Y | Y | E | E | -| [Vector types](/vector-search/vector-search-data-types.md) | E | N | N | N | N | N | N | +| [Vector types](/ai/reference/vector-search-data-types.md) | E | N | N | N | N | N | N | | [Control flow functions](/functions-and-operators/control-flow-functions.md) | Y | Y | Y | Y | Y | Y | Y | | [String functions](/functions-and-operators/string-functions.md) | Y | Y | Y | Y | Y | Y | Y | | [Numeric functions and operators](/functions-and-operators/numeric-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | @@ -36,7 +36,7 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u | [Bit functions and operators](/functions-and-operators/bit-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | | [Cast functions and operators](/functions-and-operators/cast-functions-and-operators.md) | Y | Y | Y | Y | Y | Y | Y | | [Encryption and compression functions](/functions-and-operators/encryption-and-compression-functions.md) | Y | Y | Y | Y | Y | Y | Y | -| [Vector functions and operators](/vector-search/vector-search-functions-and-operators.md) | E | N | N | N | N | N | N | +| [Vector functions and operators](/ai/reference/vector-search-functions-and-operators.md) | E | N | N | N | N | N | N | | [Information functions](/functions-and-operators/information-functions.md) | Y | Y | Y | Y | Y | Y | Y | | [JSON functions](/functions-and-operators/json-functions.md) | Y | Y | Y | Y | Y | E | E | | [Aggregation functions](/functions-and-operators/aggregate-group-by-functions.md) | Y | Y | Y | Y | Y | Y | Y | @@ -67,7 +67,7 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u | [Foreign key](/foreign-key.md) | Y | E | E | E | N | N | N | | [TiFlash late materialization](/tiflash/tiflash-late-materialization.md) | Y | Y | Y | Y | N | N | N | | [Global indexes](/global-indexes.md) | Y | N | N | N | N | N | N | -| [Vector indexes](/vector-search/vector-search-index.md) | E | N | N | N | N | N | N | +| [Vector indexes](/ai/reference/vector-search-index.md) | E | N | N | N | N | N | N | ## SQL statements @@ -98,7 +98,7 @@ You can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?u | Advanced SQL features | 8.5 | 8.1 | 7.5 | 7.1 | 6.5 | 6.1 | 5.4 | |---|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [Vector search](/vector-search/vector-search-overview.md) | E | N | N | N | N | N | N | +| [Vector search](/ai/concepts/vector-search-overview.md) | E | N | N | N | N | N | N | | [Prepared statement cache](/sql-prepared-plan-cache.md) | Y | Y | Y | Y | Y | Y | Y | | [Non-prepared statement cache](/sql-non-prepared-plan-cache.md) | Y | Y | Y | E | N | N | N | | [Instance-level execution plan cache](/system-variables.md#tidb_enable_instance_plan_cache-new-in-v840) | E | N | N | N | N | N | N | diff --git a/best-practices/_index.md b/best-practices/_index.md new file mode 100644 index 0000000000000..19002ec9f2f67 --- /dev/null +++ b/best-practices/_index.md @@ -0,0 +1,58 @@ +--- +title: TiDB Best Practices +summary: Learn the best practices for deploying, configuring, and using TiDB effectively. +--- + +# TiDB Best Practices + +By following best practices for deploying, configuring, and using TiDB, you can optimize the performance, reliability, and scalability of your TiDB deployments. This document provides an overview of the best practices for using TiDB. + +## Overview + +Get started with basic principles and general recommendations for using TiDB effectively. + +| Best practice topic | Description | +| ------------------- | ----------- | +| [Use TiDB](/best-practices/tidb-best-practices.md) | A comprehensive overview of best practices for using TiDB. | + +## Schema design + +Learn best practices for designing schemas in TiDB, including managing DDL operations, choosing primary keys, and designing and maintaining indexes to balance performance, scalability, and maintainability. + +| Best practice topic | Description | +| ------------------- | ----------- | +| [Manage DDL](/best-practices/ddl-introduction.md) | Best practices for managing Data Definition Language (DDL) operations in TiDB. | +| [Use UUIDs as Primary Keys](/best-practices/uuid.md) | Best practices for storing and indexing UUIDs (Universally Unique Identifiers) efficiently when using UUIDs as primary keys. | +| [Optimize Multi-Column Indexes](/best-practices/multi-column-index-best-practices.md) | Best practices for designing and using multi-column indexes in TiDB to improve query performance. | +| [Manage Indexes and Identify Unused Indexes](/best-practices/index-management-best-practices.md) | Best practices for managing and optimizing indexes, identifying and removing unused indexes in TiDB to optimize performance. | + +## Deployment + +Explore recommended deployment patterns for different scenarios, such as deployment on public cloud and multi-data center setups, to ensure high availability and efficient resource usage. + +| Best practice topic | Description | +| ------------------- | ----------- | +| [Deploy TiDB on Public Cloud](/best-practices/best-practices-on-public-cloud.md) | Best practices for deploying TiDB on public cloud to maximize performance, cost efficiency, reliability, and scalability of your TiDB deployment. | +| [Three-Node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) | Best practices for a cost-effective, hybrid three-node deployment while maintaining stability. | +| [Local Reads in Three-Data-Center Deployments](/best-practices/three-dc-local-read.md) | Best practices for reducing cross-center latency by using Stale Read. | + +## Operations + +Find operational best practices for running TiDB in production, such as traffic routing, load balancing, and monitoring, to ensure system stability and observability. + +| Best practice topic | Description | +| ------------------- | ----------- | +| [Use HAProxy for Load Balancing](/best-practices/haproxy-best-practices.md) | Best practices for configuring HAProxy to distribute application traffic across multiple TiDB nodes. | +| [Use Read-Only Storage Nodes](/best-practices/readonly-nodes.md) | Best practices for using read-only nodes to isolate analytical or heavy read workloads from OLTP traffic. | +| [Monitor TiDB Using Grafana](/best-practices/grafana-monitor-best-practices.md) | Best practices for using key metrics and dashboard configurations for proactive troubleshooting. | + +## Performance tuning + +Understand how to tune TiDB components such as TiKV and PD, and how to use features like read-only storage nodes to improve performance under different workloads. + +| Best practice topic | Description | +| ------------------- | ----------- | +| [Handle Millions of Tables in SaaS Multi-Tenant Scenarios](/best-practices/saas-best-practices.md) | Best practices for using TiDB in SaaS (Software as a Service) multi-tenant environments, especially in scenarios where the number of tables in a single cluster exceeds one million. | +| [Handle High-Concurrency Writes](/best-practices/high-concurrency-best-practices.md) | Best practices for handling high-concurrency write-heavy workloads in TiDB to avoid write hotspots and optimize performance. | +| [Tune TiKV Performance with Massive Regions](/best-practices/massive-regions-best-practices.md) | Best practices for optimizing TiKV performance and reducing heartbeat overhead when managing millions of Regions. | +| [Tune PD Scheduling](/best-practices/pd-scheduling-best-practices.md) | Best practices for adjusting PD policies to balance load and speed up failure recovery. | diff --git a/best-practices-on-public-cloud.md b/best-practices/best-practices-on-public-cloud.md similarity index 98% rename from best-practices-on-public-cloud.md rename to best-practices/best-practices-on-public-cloud.md index c2e429c7e9d69..24654d4ed2e95 100644 --- a/best-practices-on-public-cloud.md +++ b/best-practices/best-practices-on-public-cloud.md @@ -1,9 +1,10 @@ --- title: TiDB Best Practices on Public Cloud summary: Learn about the best practices for deploying TiDB on public cloud. +aliases: ['/tidb/stable/best-practices-on-public-cloud/','/tidb/dev/best-practices-on-public-cloud/'] --- -# TiDB Best Practices on Public Cloud +# Best Practices for Deploying TiDB on Public Cloud Public cloud infrastructure has become an increasingly popular choice for deploying and managing TiDB. However, deploying TiDB on public cloud requires careful consideration of several critical factors, including performance tuning, cost optimization, reliability, and scalability. diff --git a/ddl-introduction.md b/best-practices/ddl-introduction.md similarity index 98% rename from ddl-introduction.md rename to best-practices/ddl-introduction.md index 5a6f252d36fce..9197894a26f2f 100644 --- a/ddl-introduction.md +++ b/best-practices/ddl-introduction.md @@ -1,6 +1,7 @@ --- title: Best Practices for DDL Execution in TiDB summary: Learn about how DDL statements are implemented in TiDB, the online change process, and best practices. +aliases: ['/tidb/stable/ddl-introduction/','/tidb/dev/ddl-introduction/','/tidbcloud/ddl-introduction/'] --- # Best Practices for DDL Execution in TiDB @@ -86,7 +87,7 @@ To improve the user experience of DDL execution, starting from v6.2.0, TiDB enab + DDL statements to be performed on the same table are mutually blocked. + `DROP DATABASE` and DDL statements that affect all objects in the database are mutually blocked. + Adding indexes and column type changes on different tables can be executed concurrently. -+ Starting from v8.2.0, [logical DDL statements](/ddl-introduction.md#types-of-ddl-statements) for different tables can be executed in parallel. ++ Starting from v8.2.0, [logical DDL statements](/best-practices/ddl-introduction.md#types-of-ddl-statements) for different tables can be executed in parallel. + In other cases, DDL can be executed based on the level of availability for concurrent DDL execution. Specifically, TiDB 6.2.0 has enhanced the DDL execution framework in the following aspects: diff --git a/best-practices/grafana-monitor-best-practices.md b/best-practices/grafana-monitor-best-practices.md index 6792c6f4121db..58a3392f9e652 100644 --- a/best-practices/grafana-monitor-best-practices.md +++ b/best-practices/grafana-monitor-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for Monitoring TiDB Using Grafana summary: Best Practices for Monitoring TiDB Using Grafana. Deploy a TiDB cluster using TiUP and add Grafana and Prometheus for monitoring. Use metrics to analyze cluster status and diagnose problems. Prometheus collects metrics from TiDB components, and Grafana displays them. Tips for efficient Grafana use include modifying query expressions, switching Y-axis scale, and using API for query results. The platform is powerful for analyzing and diagnosing TiDB cluster status. -aliases: ['/docs/dev/best-practices/grafana-monitor-best-practices/','/docs/dev/reference/best-practices/grafana-monitor/'] +aliases: ['/docs/dev/best-practices/grafana-monitor-best-practices/','/docs/dev/reference/best-practices/grafana-monitor/','/tidb/stable/grafana-monitor-best-practices/','/tidb/dev/grafana-monitor-best-practices/'] --- # Best Practices for Monitoring TiDB Using Grafana diff --git a/best-practices/haproxy-best-practices.md b/best-practices/haproxy-best-practices.md index 7f88aad6de4ab..0ae816e7eea26 100644 --- a/best-practices/haproxy-best-practices.md +++ b/best-practices/haproxy-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for Using HAProxy in TiDB summary: HAProxy is a free, open-source load balancer and proxy server for TCP and HTTP-based applications. It provides high availability, load balancing, health checks, sticky sessions, SSL support, and monitoring. To deploy HAProxy, ensure hardware and software requirements are met, then install and configure it. Use the latest stable version for best results. -aliases: ['/docs/dev/best-practices/haproxy-best-practices/','/docs/dev/reference/best-practices/haproxy/'] +aliases: ['/docs/dev/best-practices/haproxy-best-practices/','/docs/dev/reference/best-practices/haproxy/','/tidb/stable/haproxy-best-practices/','/tidb/dev/haproxy-best-practices/'] --- # Best Practices for Using HAProxy in TiDB diff --git a/best-practices/high-concurrency-best-practices.md b/best-practices/high-concurrency-best-practices.md index c5c8278b2258d..46eaf72f4388d 100644 --- a/best-practices/high-concurrency-best-practices.md +++ b/best-practices/high-concurrency-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for High-Concurrency Writes -summary: This document provides best practices for handling high-concurrency write-heavy workloads in TiDB. It addresses challenges and solutions for data distribution, hotspot cases, and complex hotspot problems. The article also discusses parameter configuration for optimizing performance. -aliases: ['/docs/dev/best-practices/high-concurrency-best-practices/','/docs/dev/reference/best-practices/high-concurrency/'] +summary: This document provides best practices for handling highly-concurrent write-heavy workloads in TiDB. It addresses challenges and solutions for data distribution, hotspot cases, and complex hotspot problems. The article also discusses parameter configuration for optimizing performance. +aliases: ['/tidb/stable/high-concurrency-best-practices/','/tidb/dev/high-concurrency-best-practices/','/docs/dev/best-practices/high-concurrency-best-practices/','/docs/dev/reference/best-practices/high-concurrency/'] --- # Best Practices for High-Concurrency Writes diff --git a/best-practices/index-management-best-practices.md b/best-practices/index-management-best-practices.md index 3410ede7fa8ac..48528336537b6 100644 --- a/best-practices/index-management-best-practices.md +++ b/best-practices/index-management-best-practices.md @@ -1,6 +1,7 @@ --- title: Best Practices for Managing Indexes and Identifying Unused Indexes summary: Learn the best practices for managing and optimizing indexes, identifying and removing unused indexes in TiDB. +aliases: ['/tidb/stable/index-management-best-practices/','/tidb/dev/index-management-best-practices/'] --- # Best Practices for Managing Indexes and Identifying Unused Indexes diff --git a/best-practices/massive-regions-best-practices.md b/best-practices/massive-regions-best-practices.md index 30f185135af1c..274c9c82f9596 100644 --- a/best-practices/massive-regions-best-practices.md +++ b/best-practices/massive-regions-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for Tuning TiKV Performance with Massive Regions summary: TiKV performance tuning involves reducing the number of Regions and messages, increasing Raftstore concurrency, enabling Hibernate Region and Region Merge, adjusting Raft base tick interval, increasing TiKV instances, and adjusting Region size. Other issues include slow PD leader switching and outdated PD routing information. -aliases: ['/docs/dev/best-practices/massive-regions-best-practices/','/docs/dev/reference/best-practices/massive-regions/'] +aliases: ['/docs/dev/best-practices/massive-regions-best-practices/','/docs/dev/reference/best-practices/massive-regions/,'/tidb/stable/massive-regions-best-practices/','/tidb/dev/massive-regions-best-practices/'] --- # Best Practices for Tuning TiKV Performance with Massive Regions diff --git a/best-practices/multi-column-index-best-practices.md b/best-practices/multi-column-index-best-practices.md index c6a7dfe757749..59906be563d38 100644 --- a/best-practices/multi-column-index-best-practices.md +++ b/best-practices/multi-column-index-best-practices.md @@ -1,6 +1,7 @@ --- title: Best Practices for Optimizing Multi-Column Indexes summary: Learn how to use multi-column indexes effectively in TiDB and apply advanced optimization techniques. +aliases: ['/tidb/stable/multi-column-index-best-practices/','/tidb/dev/multi-column-index-best-practices/'] --- # Best Practices for Optimizing Multi-Column Indexes diff --git a/best-practices/pd-scheduling-best-practices.md b/best-practices/pd-scheduling-best-practices.md index 907135c26f981..d76b49fe6af3d 100644 --- a/best-practices/pd-scheduling-best-practices.md +++ b/best-practices/pd-scheduling-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for PD Scheduling summary: This document summarizes PD scheduling best practices, including scheduling process, load balancing, hot regions scheduling, cluster topology awareness, scale-down and failure recovery, region merge, query scheduling status, and control scheduling strategy. It also covers common scenarios such as uneven distribution of leaders/regions, slow node recovery, and troubleshooting TiKV nodes. -aliases: ['/docs/dev/best-practices/pd-scheduling-best-practices/','/docs/dev/reference/best-practices/pd-scheduling/'] +aliases: ['/docs/dev/best-practices/pd-scheduling-best-practices/','/docs/dev/reference/best-practices/pd-scheduling/','/tidb/stable/pd-scheduling-best-practices/','/tidb/dev/pd-scheduling-best-practices/'] --- # Best Practices for PD Scheduling @@ -92,11 +92,11 @@ Cluster topology awareness enables PD to distribute replicas of a region as much The component to check region distribution is `replicaChecker`, which is similar to a scheduler except that it cannot be disabled. `replicaChecker` schedules based on the configuration of `location-labels`. For example, `[zone,rack,host]` defines a three-tier topology for a cluster. PD attempts to schedule region peers to different zones first, or to different racks when zones are insufficient (for example, 2 zones for 3 replicas), or to different hosts when racks are insufficient. -### Scale-down and failure recovery +### Scale-in and failure recovery -Scale-down refers to the process when you take a store offline and mark it as "offline" using a command. PD replicates the regions on the offline node to other nodes by scheduling. Failure recovery applies when stores failed and cannot be recovered. In this case, regions with peers distributed on the corresponding store might lose replicas, which requires PD to replenish on other nodes. +Scale-in refers to the process when you take a store offline and mark it as "offline" using a command. PD replicates the regions on the offline node to other nodes by scheduling. Failure recovery applies when stores failed and cannot be recovered. In this case, regions with peers distributed on the corresponding store might lose replicas, which requires PD to replenish on other nodes. -The processes of scale-down and failure recovery are basically the same. `replicaChecker` finds a region peer in abnormal states, and then generates an operator to replace the abnormal peer with a new one on a healthy store. +The processes of scale-in and failure recovery are basically the same. `replicaChecker` finds a region peer in abnormal states, and then generates an operator to replace the abnormal peer with a new one on a healthy store. ### Region merge diff --git a/best-practices/readonly-nodes.md b/best-practices/readonly-nodes.md index 80faadb4da59f..acfe4fbf66254 100644 --- a/best-practices/readonly-nodes.md +++ b/best-practices/readonly-nodes.md @@ -1,6 +1,7 @@ --- title: Best Practices for Read-Only Storage Nodes summary: This document introduces configuring read-only storage nodes for isolating high-tolerance delay loads from online services. Steps include marking TiKV nodes as read-only, using Placement Rules to store data on read-only nodes as learners, and using Follower Read to read data from read-only nodes. +aliases: ['/tidb/stable/readonly-nodes/','/tidb/dev/readonly-nodes/'] --- # Best Practices for Read-Only Storage Nodes diff --git a/best-practices/saas-best-practices.md b/best-practices/saas-best-practices.md index fbb291b961a17..b137e369bc3af 100644 --- a/best-practices/saas-best-practices.md +++ b/best-practices/saas-best-practices.md @@ -1,6 +1,7 @@ --- title: Best Practices for Handling Millions of Tables in SaaS Multi-Tenant Scenarios summary: Learn best practices for TiDB in SaaS (Software as a Service) multi-tenant scenarios, especially for environments where the number of tables in a single cluster exceeds one million. +aliases: ['/tidb/stable/saas-best-practices/','/tidb/dev/saas-best-practices/'] --- # Best Practices for Handling Millions of Tables in SaaS Multi-Tenant Scenarios diff --git a/best-practices/three-dc-local-read.md b/best-practices/three-dc-local-read.md index 7f0c2e4c9a2dd..afbf697d0dcf1 100644 --- a/best-practices/three-dc-local-read.md +++ b/best-practices/three-dc-local-read.md @@ -1,6 +1,7 @@ --- title: Best Practices for Local Reads in Three-Data-Center Deployments summary: TiDB's three data center deployment model can cause increased access latency due to cross-center data reads. To mitigate this, the Stale Read feature allows for local historical data access, reducing latency at the expense of real-time data availability. When using Stale Read in geo-distributed scenarios, TiDB accesses local replicas to avoid cross-center network latency. This is achieved by configuring the `zone` label and setting `tidb_replica_read` to `closest-replicas`. For more information on performing Stale Read, refer to the documentation. +aliases: ['/tidb/stable/three-dc-local-read/','/tidb/dev/three-dc-local-read/'] --- # Best Practices for Local Reads in Three-Data-Center Deployments diff --git a/best-practices/three-nodes-hybrid-deployment.md b/best-practices/three-nodes-hybrid-deployment.md index 52794404ba6d0..ca13fc6c9ec02 100644 --- a/best-practices/three-nodes-hybrid-deployment.md +++ b/best-practices/three-nodes-hybrid-deployment.md @@ -1,6 +1,7 @@ --- title: Best Practices for Three-Node Hybrid Deployment summary: TiDB cluster can be deployed in a cost-effective way on three machines. Best practices for this hybrid deployment include adjusting parameters for stability and performance. Limiting resource consumption and adjusting thread pool sizes are key to optimizing the cluster. Adjusting parameters for TiKV background tasks and TiDB execution operators is also important. +aliases: ['/tidb/stable/three-nodes-hybrid-deployment/','/tidb/dev/three-nodes-hybrid-deployment/'] --- # Best Practices for Three-Node Hybrid Deployment diff --git a/best-practices/tidb-best-practices.md b/best-practices/tidb-best-practices.md index 718e23f959167..a59395b85c770 100644 --- a/best-practices/tidb-best-practices.md +++ b/best-practices/tidb-best-practices.md @@ -1,7 +1,7 @@ --- title: TiDB Best Practices summary: This document summarizes best practices for using TiDB, covering SQL use and optimization tips for OLAP and OLTP scenarios, with a focus on TiDB-specific optimization options. It also recommends reading three blog posts introducing TiDB's technical principles before diving into the best practices. -aliases: ['/docs/dev/tidb-best-practices/'] +aliases: ['/docs/dev/tidb-best-practices/','/tidb/stable/tidb-best-practices/','/tidb/dev/tidb-best-practices/'] --- # TiDB Best Practices diff --git a/best-practices/uuid.md b/best-practices/uuid.md index 36d77654a6392..7d4ef00b19578 100644 --- a/best-practices/uuid.md +++ b/best-practices/uuid.md @@ -1,6 +1,7 @@ --- title: Best Practices for Using UUIDs as Primary Keys summary: UUIDs, when used as primary keys, offer benefits such as reduced network trips, support in most programming languages and databases, and protection against enumeration attacks. Storing UUIDs as binary in a `BINARY(16)` column is recommended. It's also advised to avoid setting the `swap_flag` with TiDB to prevent hotspots. MySQL compatibility is available for UUIDs. +aliases: ['/tidb/stable/uuid/','/tidb/dev/uuid/','/tidbcloud/uuid/'] --- # Best Practices for Using UUIDs as Primary Keys @@ -27,33 +28,13 @@ The textual UUID format looks like this: `ab06f63e-8fe7-11ec-a514-5405db7aad56`, The `UUID_TO_BIN()` function can be used with one argument, the UUID or with two arguments where the second argument is a `swap_flag`. - - It is recommended to not set the `swap_flag` with TiDB to avoid [hotspots](/best-practices/high-concurrency-best-practices.md). - - - - -It is recommended to not set the `swap_flag` with TiDB to avoid hotspots. - - - You can also explicitly set the [`CLUSTERED` option](/clustered-indexes.md) for UUID based primary keys to avoid hotspots. To demonstrate the effect of the `swap_flag`, here are two tables with an identical structure. The difference is that the data inserted into `uuid_demo_1` uses `UUID_TO_BIN(?, 0)` and `uuid_demo_2` uses `UUID_TO_BIN(?, 1)`. - - -In the screenshot of the [Key Visualizer](/dashboard/dashboard-key-visualizer.md) below, you can see that writes are concentrated in a single region of the `uuid_demo_2` table that has the order of the fields swapped in the binary format. - - - - - -In the screenshot of the [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) below, you can see that writes are concentrated in a single region of the `uuid_demo_2` table that has the order of the fields swapped in the binary format. - - +In the screenshot of the Key Visualizer below, you can see that writes are concentrated in a single region of the `uuid_demo_2` table that has the order of the fields swapped in the binary format. ![Key Visualizer](/media/best-practices/uuid_keyviz.png) @@ -73,6 +54,11 @@ CREATE TABLE `uuid_demo_2` ( ) ``` +For more information about Key Visualizer, see the following documentation: + +- [Key Visualizer](/dashboard/dashboard-key-visualizer.md) for TiDB Self-Managed +- [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) for TiDB Cloud + ## MySQL compatibility UUIDs can be used in MySQL as well. The `BIN_TO_UUID()` and `UUID_TO_BIN()` functions were introduced in MySQL 8.0. The `UUID()` function is available in earlier MySQL versions as well. diff --git a/br/backup-and-restore-overview.md b/br/backup-and-restore-overview.md index 00305ee8e8e41..2356b123b7a81 100644 --- a/br/backup-and-restore-overview.md +++ b/br/backup-and-restore-overview.md @@ -118,7 +118,7 @@ Backup and restore might go wrong when some TiDB features are enabled or disable | Global temporary tables | | Make sure that you are using v5.3.0 or a later version of BR to back up and restore data. Otherwise, an error occurs in the definition of the backed global temporary tables. | | TiDB Lightning Physical Import| | If the upstream database uses the physical import mode of TiDB Lightning, data cannot be backed up in log backup. It is recommended to perform a full backup after the data import. For more information, see [When the upstream database imports data using TiDB Lightning in the physical import mode, the log backup feature becomes unavailable. Why?](/faq/backup-and-restore-faq.md#when-the-upstream-database-imports-data-using-tidb-lightning-in-the-physical-import-mode-the-log-backup-feature-becomes-unavailable-why).| | TiCDC | | BR v8.2.0 and later: if the target cluster to be restored has a changefeed and the changefeed [CheckpointTS](/ticdc/ticdc-classic-architecture.md#checkpointts) is earlier than the BackupTS, BR does not perform the restoration. BR versions before v8.2.0: if the target cluster to be restored has any active TiCDC changefeeds, BR does not perform the restoration. | -| Vector search | | Make sure that you are using v8.4.0 or a later version of BR to back up and restore data. Restoring tables with [vector data types](/vector-search/vector-search-data-types.md) to TiDB clusters earlier than v8.4.0 is not supported. | +| Vector search | | Make sure that you are using v8.4.0 or a later version of BR to back up and restore data. Restoring tables with [vector data types](/ai/reference/vector-search-data-types.md) to TiDB clusters earlier than v8.4.0 is not supported. | ### Version compatibility diff --git a/data-type-default-values.md b/data-type-default-values.md index 426b757f75e27..a1a790b495fbd 100644 --- a/data-type-default-values.md +++ b/data-type-default-values.md @@ -51,7 +51,7 @@ TiDB supports specifying the following expressions as default values in the `DEF * [`NEXTVAL()`](/functions-and-operators/sequence-functions.md#nextval) * [`RAND()`](/functions-and-operators/numeric-functions-and-operators.md) * [`UUID()`](/functions-and-operators/miscellaneous-functions.md#uuid), [`UUID_TO_BIN()`](/functions-and-operators/miscellaneous-functions.md#uuid_to_bin) -* [`VEC_FROM_TEXT()`](/vector-search/vector-search-functions-and-operators.md#vec_from_text) +* [`VEC_FROM_TEXT()`](/ai/reference/vector-search-functions-and-operators.md#vec_from_text) TiDB supports assigning default values to `BLOB`, `TEXT`, and `JSON` data types. However, you can only use expressions, not literals, to define default values for these data types. The following is an example of `BLOB`: diff --git a/develop/_index.md b/develop/_index.md new file mode 100644 index 0000000000000..5ab9af68118f8 --- /dev/null +++ b/develop/_index.md @@ -0,0 +1,182 @@ +--- +title: Developer Guide Overview +summary: Introduce the overview of the developer guide for TiDB Cloud and TiDB Self-Managed. +aliases: ['/tidb/stable/dev-guide-overview/','/tidb/dev/dev-guide-overview/','/tidbcloud/dev-guide-overview/'] +--- + +# Developer Guide Overview + +[TiDB](https://github.com/pingcap/tidb) is an open-source distributed SQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads. + +This guide helps application developers quickly learn how to connect to TiDB, design databases, write and query data, and build reliable, high-performance applications on TiDB. + +> **Note:** +> +> This guide is written for application developers, but if you are interested in the inner workings of TiDB or want to get involved in TiDB development, read the [TiDB Kernel Development Guide](https://pingcap.github.io/tidb-dev-guide/) for more information about TiDB. + +## Guides by language and framework + +Build your application with the language you use by following the guides with sample codes. + + + + +Connect to TiDB over HTTPS from edge environments. + + + + +Connect Next.js with mysql2 to TiDB. + + + + +Connect to TiDB with Prisma ORM. + + + + +Connect to TiDB with TypeORM. + + + + +Connect to TiDB with Sequelize ORM. + + + + +Connect Node.js with mysql.js module to TiDB. + + + + +Connect Node.js with node-mysql2 module to TiDB. + + + + +Connect AWS Lambda Function with mysql2 to TiDB. + + + + + + + +Connect Django application with django-tidb to TiDB. + + + + +Connect to TiDB with the official MySQL package. + + + + +Connect to TiDB with PyMySQL package. + + + + +Connect to TiDB with mysqlclient package. + + + + +Connect to TiDB with SQLAlchemy ORM. + + + + +Connect to TiDB with Peewee ORM. + + + + + + + +Connect to TiDB with JDBC (MySQL Connector/J). + + + + +Connect to TiDB with MyBatis ORM. + + + + +Connect to TiDB with Hibernate ORM. + + + + +Connect Spring based application with Spring Data JPA to TiDB. + + + + + + + +Connect to TiDB with MySQL driver for Go. + + + + +Connect to TiDB with GORM. + + + + + + + +Connect Ruby on Rails application with Active Record ORM to TiDB. + + + + +Connect to TiDB with mysql2 driver. + + + + +In addition to these guides, PingCAP works with the community to support [third-party MySQL drivers, ORMs, and tools](/develop/dev-guide-third-party-support.md). + +## Use MySQL client software + +As TiDB is a MySQL-compatible database, you can use many familiar client software tools to connect to TiDB and manage your databases. Or, you can use our command line tool to connect and manage your databases. + + + + +Connect and manage TiDB databases with MySQL Workbench. + + + + +Connect and manage TiDB databases with the SQLTools extension in VS Code. + + + + +Connect and manage TiDB databases with DBeaver. + + + + +Connect and manage TiDB databases with DataGrip by JetBrains. + + + + +## Additional resources + +Learn other topics about developing with TiDB. + +- Use TiDB Cloud CLI to develop, manage and deploy your applications. +- Explore popular service integrations with TiDB Cloud. +- Follow [TiDB database development reference](/develop/dev-guide-schema-design-overview.md) to design, interact with, optimize, and troubleshoot your data and schema. +- Follow the free online course [Introduction to TiDB](https://eng.edu.pingcap.com/catalog/info/id:203/?utm_source=docs-dev-guide). diff --git a/develop/dev-guide-aws-appflow-integration.md b/develop/dev-guide-aws-appflow-integration.md index 0068b66d24646..a826da95e3a69 100644 --- a/develop/dev-guide-aws-appflow-integration.md +++ b/develop/dev-guide-aws-appflow-integration.md @@ -1,6 +1,7 @@ --- title: Integrate TiDB with Amazon AppFlow summary: Introduce how to integrate TiDB with Amazon AppFlow step by step. +aliases: ['/tidb/stable/dev-guide-aws-appflow-integration/','/tidb/dev/dev-guide-aws-appflow-integration/','/tidbcloud/dev-guide-aws-appflow-integration/'] --- # Integrate TiDB with Amazon AppFlow @@ -249,14 +250,6 @@ test> SELECT * FROM sf_account; ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-bookshop-schema-design.md b/develop/dev-guide-bookshop-schema-design.md index b4b4000cbb682..3058b6b6cd833 100644 --- a/develop/dev-guide-bookshop-schema-design.md +++ b/develop/dev-guide-bookshop-schema-design.md @@ -1,6 +1,7 @@ --- title: Bookshop Example Application summary: Bookshop is an online bookstore app for buying and rating books. You can import table structures and data via TiUP or TiDB Cloud. Method 1 uses TiUP to quickly generate and import sample data, while Method 2 imports data from Amazon S3 to TiDB Cloud. The database tables include books, authors, users, ratings, book_authors, and orders. The database initialization script `dbinit.sql` creates the table structures for the Bookshop application. +aliases: ['/tidb/stable/dev-guide-bookshop-schema-design/','/tidb/dev/dev-guide-bookshop-schema-design/','/tidbcloud/dev-guide-bookshop-schema-design/'] --- # Bookshop Example Application @@ -11,32 +12,15 @@ To make your reading on the application developer guide more smoothly, we presen ## Import table structures and data - +To import table structures and data of the Bookshop application, choose one of the following import methods: -You can import Bookshop table structures and data either [via TiUP](#method-1-via-tiup-demo) or [via the import feature of TiDB Cloud](#method-2-via-tidb-cloud-import). +- [TiDB Self-Managed: via `tiup demo`](#tidb-self-managed-via-tiup-demo). +- [TiDB Cloud: via the Import feature](#tidb-cloud-via-the-import-feature). - - - - -For TiDB Cloud, you can skip [Method 1: Via `tiup demo`](#method-1-via-tiup-demo) and import Bookshop table structures [via the import feature of TiDB Cloud](#method-2-via-tidb-cloud-import). - - - -### Method 1: Via `tiup demo` - - +### TiDB Self-Managed: via `tiup demo` If your TiDB cluster is deployed using [TiUP](/tiup/tiup-reference.md#tiup-reference) or you can connect to your TiDB server, you can quickly generate and import sample data for the Bookshop application by running the following command: - - - - -If your TiDB cluster is deployed using [TiUP](https://docs.pingcap.com/tidb/stable/tiup-reference) or you can connect to your TiDB server, you can quickly generate and import sample data for the Bookshop application by running the following command: - - - ```shell tiup demo bookshop prepare ``` @@ -87,7 +71,7 @@ tiup demo bookshop prepare --users=200000 --books=500000 --authors=100000 --rati You can delete the original table structure through the `--drop-tables` parameter. For more parameter descriptions, run the `tiup demo bookshop --help` command. -### Method 2: Via TiDB Cloud Import +### TiDB Cloud: via the Import feature 1. Open the **Import** page for your target cluster. @@ -293,14 +277,6 @@ CREATE TABLE `bookshop`.`orders` ( ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-build-cluster-in-cloud.md b/develop/dev-guide-build-cluster-in-cloud.md index 18084ab8c28bb..d4f98b7415855 100644 --- a/develop/dev-guide-build-cluster-in-cloud.md +++ b/develop/dev-guide-build-cluster-in-cloud.md @@ -1,26 +1,17 @@ --- -title: Build a {{{ .starter }}} Cluster -summary: Learn how to build a {{{ .starter }}} cluster in TiDB Cloud and connect to it. +title: Create a {{{ .starter }}} Cluster +summary: Learn how to create a {{{ .starter }}} cluster and connect to it. +aliases: ['/tidb/stable/dev-guide-build-cluster-in-cloud/','/tidb/dev/dev-guide-build-cluster-in-cloud/','/tidbcloud/dev-guide-build-cluster-in-cloud/'] --- -# Build a {{{ .starter }}} Cluster - - +# Create a {{{ .starter }}} Cluster This document walks you through the quickest way to get started with TiDB. You will use [TiDB Cloud](https://www.pingcap.com/tidb-cloud) to create a {{{ .starter }}} cluster, connect to it, and run a sample application on it. If you need to run TiDB on your local machine, see [Starting TiDB Locally](/quick-start-with-tidb.md). - - - - -This document walks you through the quickest way to get started with TiDB Cloud. You will create a TiDB cluster, connect to it, and run a sample application on it. - - - ## Step 1. Create a {{{ .starter }}} cluster {#step-1-create-a-tidb-cloud-cluster} 1. If you do not have a TiDB Cloud account, click [here](https://tidbcloud.com/free-trial) to sign up for an account. @@ -41,22 +32,10 @@ This document walks you through the quickest way to get started with TiDB Cloud. 8. Click **Generate Password** to generate a random password. The generated password will not show again, so save your password in a secure location. If you do not set a root password, you cannot connect to the cluster. - - > **Note:** > > For [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter) clusters, when you connect to your cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](https://docs.pingcap.com/tidbcloud/select-cluster-tier#user-name-prefix). - - - - -> **Note:** -> -> For [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter) clusters, when you connect to your cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). - - - ## Step 2. Connect to a cluster 1. If the MySQL client is not installed, select your operating system and follow the steps below to install it. @@ -138,24 +117,11 @@ mysql Ver 15.1 Distrib 5.5.68-MariaDB, for Linux (x86_64) using readline 5.1 mysql --connect-timeout 15 -u '.root' -h -P 4000 -D test --ssl-mode=VERIFY_IDENTITY --ssl-ca=/etc/ssl/cert.pem -p ``` - - > **Note:** > > - When you connect to a {{{ .starter }}} cluster, you must [use the TLS connection](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-clusters). > - If you encounter problems when connecting to a {{{ .starter }}} cluster, you can read [Secure Connections to {{{ .starter }}} Clusters](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-clusters) for more information. - - - - -> **Note:** -> -> - When you connect to a {{{ .starter }}} cluster, you must [use the TLS connection](/tidb-cloud/secure-connections-to-serverless-clusters.md). -> - If you encounter problems when connecting to a {{{ .starter }}} cluster, you can read [Secure Connections to {{{ .starter }}} Clusters](/tidb-cloud/secure-connections-to-serverless-clusters.md) for more information. - - - 3. Fill in the password to sign in. ## Step 3. Execute a SQL statement @@ -180,14 +146,6 @@ If your actual output is similar to the expected output, congratulations, you ha ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-choose-driver-or-orm.md b/develop/dev-guide-choose-driver-or-orm.md index c57d0ece19d83..716c872243b35 100644 --- a/develop/dev-guide-choose-driver-or-orm.md +++ b/develop/dev-guide-choose-driver-or-orm.md @@ -1,9 +1,10 @@ --- -title: Choose Driver or ORM +title: Choose a Driver or ORM summary: Learn how to choose a driver or ORM framework to connect to TiDB. +aliases: ['/tidb/stable/dev-guide-choose-driver-or-orm/','/tidb/dev/dev-guide-choose-driver-or-orm/','/tidbcloud/dev-guide-choose-driver-or-orm/'] --- -# Choose Driver or ORM +# Choose a Driver or ORM > **Note:** > @@ -302,22 +303,8 @@ For an example of using peewee to build a TiDB application, see [Connect to TiDB
- - -After you have determined the driver or ORM, you can [connect to your TiDB cluster](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster). - - - ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-connect-to-tidb.md b/develop/dev-guide-connect-to-tidb.md index 5fc0cf7d6de4b..c28d37a0caa3b 100644 --- a/develop/dev-guide-connect-to-tidb.md +++ b/develop/dev-guide-connect-to-tidb.md @@ -1,155 +1,29 @@ --- title: Connect to TiDB -summary: Learn how to connect to TiDB. +summary: An overview of methods to connect to TiDB. +aliases: ['/tidb/stable/dev-guide-connect-to-tidb/','/tidb/dev/dev-guide-connect-to-tidb/'] --- # Connect to TiDB -TiDB is highly compatible with the MySQL protocol. For a full list of client link parameters, see [MySQL Client Options](https://dev.mysql.com/doc/refman/8.0/en/mysql-command-options.html). +TiDB is highly compatible with the MySQL protocol, so you can connect to it using most MySQL tools, drivers, and ORMs. -TiDB supports the [MySQL Client/Server Protocol](https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html), which allows most client drivers and ORM frameworks to connect to TiDB just as they connect to MySQL. +- To execute SQL manually (for connectivity testing, debugging, or quick verification), start with [MySQL CLI tools](/develop/dev-guide-mysql-tools.md). -## MySQL +- To connect using a visual interface, refer to the documents of the following popular GUI tools: -You can choose to use MySQL Client or MySQL Shell based on your personal preferences. + - [JetBrains DataGrip](/develop/dev-guide-gui-datagrip.md) + - [DBeaver](/develop/dev-guide-gui-dbeaver.md) + - [VS Code](/develop/dev-guide-gui-vscode-sqltools.md) + - [MySQL Workbench](/develop/dev-guide-gui-mysql-workbench.md) + - [Navicat](/develop/dev-guide-gui-navicat.md) - +- To build applications on TiDB, [choose a driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) based on your programming language and framework. -
- -You can connect to TiDB using MySQL Client, which can be used as a command-line tool for TiDB. To install MySQL Client, follow the instructions below for YUM based Linux distributions. - -```shell -sudo yum install mysql -``` - -After the installation, you can connect to TiDB using the following command: - -```shell -mysql --host --port 4000 -u root -p --comments -``` - -The MySQL v9.0 client on macOS cannot correctly load the `mysql_native_password` plugin, causing the error `ERROR 2059 (HY000): Authentication plugin 'mysql_native_password' cannot be loaded` when connecting to TiDB. To address this issue, it is recommended to install and use the MySQL v8.0 client to connect to TiDB. Run the following commands to install it: - -```shell -brew install mysql-client@8.0 -brew unlink mysql -brew link mysql-client@8.0 -``` - -If you still encounter errors, you can specify the installation path of the MySQL v8.0 client to connect to TiDB. Run the following command: - -```shell -/opt/homebrew/opt/mysql-client@8.0/bin/mysql --comments --host ${YOUR_IP_ADDRESS} --port ${YOUR_PORT_NUMBER} -u ${your_user_name} -p -``` - -Replace `/opt/homebrew/opt/mysql-client@8.0/bin/mysql` in the preceding command with the installation path of the MySQL v8.0 client in your actual environment. - -
- -
- -You can connect to TiDB using MySQL Shell, which can be used as a command-line tool for TiDB. To install MySQL Shell, follow the instructions in the [MySQL Shell documentation](https://dev.mysql.com/doc/mysql-shell/8.0/en/mysql-shell-install.html). After the installation, you can connect to TiDB using the following command: - -```shell -mysqlsh --sql mysql://root@:4000 -``` - -
- -
- -## JDBC - -You can connect to TiDB using the [JDBC](https://dev.mysql.com/doc/connector-j/en/) driver. To do that, you need to create a `MysqlDataSource` or `MysqlConnectionPoolDataSource` object (both objects support the `DataSource` interface), and then set the connection string using the `setURL` function. - -For example: - -```java -MysqlDataSource mysqlDataSource = new MysqlDataSource(); -mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/{database}?user={username}&password={password}"); -``` - -For more information on JDBC connections, see the [JDBC documentation](https://dev.mysql.com/doc/connector-j/en/) - -### Connection parameters - -| Parameter name | Description | -| :---: | :----------------------------: | -| `{username}` | A SQL user to connect to the TiDB cluster | -| `{password}` | The password of the SQL user | -| `{host}` | [Host](https://en.wikipedia.org/wiki/Host_(network)) of a TiDB node | -| `{port}` | Port that the TiDB node is listening on | -| `{database}` | Name of an existing database | - - - -For more information about TiDB SQL users, see [TiDB User Account Management](/user-account-management.md). - - - - - -For more information about TiDB SQL users, see [TiDB User Account Management](https://docs.pingcap.com/tidb/stable/user-account-management). - - - -## Hibernate - -You can connect to TiDB using the [Hibernate ORM](https://hibernate.org/orm/). To do that, you need to set `hibernate.connection.url` in the Hibernate configuration file to a legal TiDB connection string. - -For example, if you use a `hibernate.cfg.xml` configuration file, set `hibernate.connection.url` as follows: - -```xml - - - - - com.mysql.cj.jdbc.Driver - org.hibernate.dialect.TiDBDialect - jdbc:mysql://{host}:{port}/{database}?user={user}&password={password} - - -``` - -After the configuration is done, you can use the following command to read the configuration file and get the `SessionFactory` object: - -```java -SessionFactory sessionFactory = new Configuration().configure("hibernate.cfg.xml").buildSessionFactory(); -``` - -Note the following: - -- Because the `hibernate.cfg.xml` configuration file is in the XML format and `&` is a special character in XML, you need to change `&` to `&` when configuring the file. For example, you need to change the connection string `hibernate.connection.url` from `jdbc:mysql://{host}:{port}/{database}?user={user}&password={password}` to `jdbc:mysql://{host}:{ port}/{database}?user={user}&password={password}`. -- It is recommended that you use the `TiDB` dialect by setting `hibernate.dialect` to `org.hibernate.dialect.TiDBDialect`. -- Hibernate supports TiDB dialects starting from `6.0.0.Beta2`, so it is recommended that you use Hibernate `6.0.0.Beta2` or a later version to connect to TiDB. - -For more information about Hibernate connection parameters, see [Hibernate documentation](https://hibernate.org/orm/documentation). - -### Connection parameters - -| Parameter name | Description | -| :---: | :----------------------------: | -| `{username}` | A SQL user to connect to the TiDB cluster | -| `{password}` | The password of the SQL user | -| `{host}` | [Host](https://en.wikipedia.org/wiki/Host_(network)) of a TiDB node | -| `{port}` | Port that the TiDB node is listening on | -| `{database}` | Name of an existing database | - - - -For more information about TiDB SQL users, see [TiDB User Account Management](/user-account-management.md). - - - - - -For more information about TiDB SQL users, see [TiDB User Account Management](https://docs.pingcap.com/tidb/stable/user-account-management). - - +- To connect to {{{ .starter }}} or {{{ .essential }}} clusters from edge environments via HTTP, use the [TiDB Cloud Serverless Driver](/develop/serverless-driver.md). Note that the serverless driver is in beta and only applicable to {{{ .starter }}} or {{{ .essential }}} clusters. ## Need help? -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-connection-parameters.md b/develop/dev-guide-connection-parameters.md index 2cbf121056a10..14d8b74fcc3ba 100644 --- a/develop/dev-guide-connection-parameters.md +++ b/develop/dev-guide-connection-parameters.md @@ -1,23 +1,20 @@ --- -title: Connection Pools and Connection Parameters +title: Configure Connection Pools and Connection Parameters summary: This document explains how to configure connection pools and parameters for TiDB. It covers connection pool size, probe configuration, and formulas for optimal throughput. It also discusses JDBC API usage and MySQL Connector/J parameter configurations for performance optimization. +aliases: ['/tidb/stable/dev-guide-connection-parameters/','/tidb/dev/dev-guide-connection-parameters/','/tidbcloud/dev-guide-connection-parameters/'] --- -# Connection Pools and Connection Parameters +# Configure Connection Pools and Connection Parameters This document describes how to configure connection pools and connection parameters when you use a driver or ORM framework to connect to TiDB. - - -If you are interested in more tips about Java application development, see [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md#connection-pool) - - - - - -If you are interested in more tips about Java application development, see [Best Practices for Developing Java Applications with TiDB](https://docs.pingcap.com/tidb/stable/java-app-best-practices) - - +> **Tip:** +> +> In this document, the following sections are excerpted from [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md): +> +> - [Configure the number of connections](#configure-the-number-of-connections) +> - [Probe configuration](#probe-configuration) +> - [Connection parameters](#connection-parameters) ## Connection pool @@ -34,6 +31,38 @@ It is a common practice that the connection pool size is well adjusted according The application needs to return the connection after finishing using it. It is recommended that the application uses the corresponding connection pool monitoring (such as **metricRegistry**) to locate connection pool issues in time. +### Configure the lifetime of connections + +When a TiDB server shuts down, restarts for maintenance, or encounters unexpected issues such as hardware or network failures, your existing client connections might be reset, which can lead to application disruptions. To avoid such issues, it is recommended to close and recreate long-running database connections at least once a day. + +Most connection pool libraries provide a parameter to control the maximum lifetime of a connection: + + +
+ +- **`maxLifetime`**: The maximum lifetime of a connection in the pool. + +
+ +
+ +- **`maxAge`**: The maximum lifetime of a connection in the pool. + +
+ +
+ +- **`maxConnectionAge`**: The maximum lifetime of a connection in the pool. + +
+ +
+ +- **`maxConnLifetimeMillis`**: The maximum lifetime of a connection in the pool. + +
+
+ ### Probe configuration The connection pool maintains persistent connections from clients to TiDB as follows: @@ -283,14 +312,6 @@ However, in an actual production environment, idle connections and SQL statement ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-create-database.md b/develop/dev-guide-create-database.md index 82bd9af01100d..17b4dff049f57 100644 --- a/develop/dev-guide-create-database.md +++ b/develop/dev-guide-create-database.md @@ -1,6 +1,7 @@ --- title: Create a Database summary: Learn steps, rules, and examples to create a database. +aliases: ['/tidb/stable/dev-guide-create-database/','/tidb/dev/dev-guide-create-database/','/tidbcloud/dev-guide-create-database/'] --- # Create a Database @@ -11,7 +12,7 @@ This document describes how to create a database using SQL and various programmi Before creating a database, do the following: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md). ## What is database @@ -83,14 +84,6 @@ After creating a database, you can add **tables** to it. For more information, s ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-create-secondary-indexes.md b/develop/dev-guide-create-secondary-indexes.md index d2ae7e3dc3b36..d6ffb927eab37 100644 --- a/develop/dev-guide-create-secondary-indexes.md +++ b/develop/dev-guide-create-secondary-indexes.md @@ -1,6 +1,7 @@ --- title: Create a Secondary Index summary: Learn steps, rules, and examples to create a secondary index. +aliases: ['/tidb/stable/dev-guide-create-secondary-indexes/','/tidb/dev/dev-guide-create-secondary-indexes/','/tidbcloud/dev-guide-create-secondary-indexes/'] --- # Create a Secondary Index @@ -11,7 +12,7 @@ This document describes how to create a secondary index using SQL and various pr Before creating a secondary index, do the following: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md). - [Create a Database](/develop/dev-guide-create-database.md). - [Create a Table](/develop/dev-guide-create-table.md). @@ -20,18 +21,8 @@ Before creating a secondary index, do the following: A secondary index is a logical object in a TiDB cluster. You can simply regard it as a sorting type of data that TiDB uses to improve the query performance. In TiDB, creating a secondary index is an online operation, which does not block any data read and write operations on a table. For each index, TiDB creates references for each row in a table and sorts the references by selected columns instead of by data directly. - - For more information about secondary indexes, see [Secondary Indexes](/best-practices/tidb-best-practices.md#secondary-index). - - - - -For more information about secondary indexes, see [Secondary Indexes](https://docs.pingcap.com/tidb/stable/tidb-best-practices#secondary-index). - - - In TiDB, you can either [add a secondary index to an existing table](#add-a-secondary-index-to-an-existing-table) or [create a secondary index when creating a new table](#create-a-secondary-index-when-creating-a-new-table). ## Add a secondary index to an existing table @@ -146,17 +137,12 @@ In the output, **IndexRangeScan** is displayed instead of **TableFullScan**, whi The words such as **TableFullScan** and **IndexRangeScan** in the execution plan are [operators](/explain-overview.md#operator-overview) in TiDB. For more information about execution plans and operators, see [TiDB Query Execution Plan Overview](/explain-overview.md). - - -The execution plan does not return the same operator every time. This is because TiDB uses a **Cost-Based Optimization (CBO)** approach, in which an execution plan depends on both rules and data distribution. For more information about TiDB SQL performance, see [SQL Tuning Overview](/sql-tuning-overview.md). - - +The execution plan does not return the same operator every time. This is because TiDB uses a **Cost-Based Optimization (CBO)** approach, in which an execution plan depends on both rules and data distribution. - +For more information about SQL performance tuning, see the following documents: -The execution plan does not return the same operator every time. This is because TiDB uses a **Cost-Based Optimization (CBO)** approach, in which an execution plan depends on both rules and data distribution. For more information about TiDB SQL performance, see [SQL Tuning Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md). - - +- [SQL Tuning Overview for TiDB Cloud](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) +- [SQL Tuning Overview for TiDB Self-Managed](/sql-tuning-overview.md) > **Note:** > @@ -186,14 +172,6 @@ After creating a database and adding tables and secondary indexes to it, you can ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-create-table.md b/develop/dev-guide-create-table.md index ff5cae4ba6e94..5d66ed9af5b1f 100644 --- a/develop/dev-guide-create-table.md +++ b/develop/dev-guide-create-table.md @@ -1,6 +1,7 @@ --- title: Create a Table summary: Learn the definitions, rules, and guidelines in table creation. +aliases: ['/tidb/stable/dev-guide-create-table/','/tidb/dev/dev-guide-create-table/','/tidbcloud/dev-guide-create-table/'] --- # Create a Table @@ -11,7 +12,7 @@ This document introduces how to create tables using the SQL statement and the re Before reading this document, make sure that the following tasks are completed: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md). - [Create a Database](/develop/dev-guide-create-database.md). @@ -114,11 +115,7 @@ A table can be created without a **primary key** or with a non-integer **primary When the **primary key** of a table is an [integer type](/data-type-numeric.md#integer-types) and `AUTO_INCREMENT` is used, hotspots cannot be avoided by using `SHARD_ROW_ID_BITS`. If you need to avoid hotspots and do not need a continuous and incremental primary key, you can use [`AUTO_RANDOM`](/auto-random.md) instead of `AUTO_INCREMENT` to eliminate row ID continuity. - - -For more information on how to handle hotspot issues, refer to [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). - - +For more information on how to handle hotspot issues in TiDB Self-Managed, see [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). Following the [guidelines for selecting primary key](#guidelines-to-follow-when-selecting-primary-key), the following example shows how an `AUTO_RANDOM` primary key is defined in the `users` table. @@ -231,43 +228,17 @@ CREATE TABLE `bookshop`.`users` ( ## Use HTAP capabilities - - -> **Note:** -> -> The steps provided in this guide is **_ONLY_** for quick start in the test environment. For production environments, refer to [explore HTAP](/explore-htap.md). - - - - - > **Note:** > -> The steps provided in this guide is **_ONLY_** for quick start. For more instructions, refer to [Use an HTAP Cluster with TiFlash](/tiflash/tiflash-overview.md). - - +> The steps provided in this section are **_ONLY_** for quick start and testing purposes. For more information about HTAP usage in TiDB, see [explore HTAP](/explore-htap.md). Suppose that you want to perform OLAP analysis on the `ratings` table using the `bookshop` application, for example, to query **whether the rating of a book has a significant correlation with the time of the rating**, which is to analyze whether the user's rating of the book is objective or not. Then you need to query the `score` and `rated_at` fields of the entire `ratings` table. This operation is resource-intensive for an OLTP-only database. Or you can use some ETL or other data synchronization tools to export the data from the OLTP database to a dedicated OLAP database for analysis. In this scenario, TiDB, an **HTAP (Hybrid Transactional and Analytical Processing)** database that supports both OLTP and OLAP scenarios, is an ideal one-stop database solution. -### Replicate column-based data - - - -Currently, TiDB supports two data analysis engines, **TiFlash** and **TiSpark**. For the large data scenarios (100 T), **TiFlash MPP** is recommended as the primary solution for HTAP, and **TiSpark** as a complementary solution. +In TiDB, you can use the row-based storage engine [TiKV](/tikv-overview.md) for Online Transactional Processing (OLTP) and the columnar storage engine [TiFlash](/tiflash/tiflash-overview.md) for Online Analytical Processing (OLAP). After configuration, TiFlash can replicate data from TiKV in real time according to the Raft Learner consensus algorithm, which ensures that data is strongly consistent between TiKV and TiFlash. -To learn more about TiDB HTAP capabilities, refer to the following documents: [Quick Start with TiDB HTAP](/quick-start-with-htap.md) and [Explore HTAP](/explore-htap.md). - - - - - -To learn more about TiDB HTAP capabilities, see [TiDB Cloud HTAP Quick Start](/tidb-cloud/tidb-cloud-htap-quickstart.md) and [Use an HTAP Cluster with TiFlash](/tiflash/tiflash-overview.md). - - - -In this example, [TiFlash](https://docs.pingcap.com/tidb/stable/tiflash-overview) has been chosen as the data analysis engine for the `bookshop` database. +### Replicate column-based data TiFlash does not automatically replicate data after deployment. Therefore, you need to manually specify the tables to be replicated: @@ -292,7 +263,7 @@ ALTER TABLE `bookshop`.`ratings` SET TIFLASH REPLICA 1; > **Note:** > -> If your cluster does not contain **TiFlash** nodes, this SQL statement will report an error: `1105 - the tiflash replica count: 1 should be less than the total tiflash server count: 0`. You can use [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-tidb-cloud-cluster) to create a {{{ .starter }}} cluster that includes **TiFlash**. +> If your cluster does not contain **TiFlash** nodes, this SQL statement will report an error: `1105 - the tiflash replica count: 1 should be less than the total tiflash server count: 0`. You can use [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-tidb-cloud-cluster) to create a {{{ .starter }}} cluster that includes **TiFlash**. Then you can go on to perform the following query: @@ -412,14 +383,6 @@ Note that all the tables that have been created in this document do not contain ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-delete-data.md b/develop/dev-guide-delete-data.md index 9b08a6c70fc12..dc80098e0b3cd 100644 --- a/develop/dev-guide-delete-data.md +++ b/develop/dev-guide-delete-data.md @@ -1,6 +1,7 @@ --- title: Delete Data summary: Learn about the SQL syntax, best practices, and examples for deleting data. +aliases: ['/tidb/stable/dev-guide-delete-data/','/tidb/dev/dev-guide-delete-data/','/tidbcloud/dev-guide-delete-data/'] --- # Delete Data @@ -11,7 +12,7 @@ This document describes how to use the [DELETE](/sql-statements/sql-statement-de Before reading this document, you need to prepare the following: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md), [Create a Database](/develop/dev-guide-create-database.md), [Create a Table](/develop/dev-guide-create-table.md), and [Create Secondary Indexes](/develop/dev-guide-create-secondary-indexes.md) - [Insert Data](/develop/dev-guide-insert-data.md) @@ -36,18 +37,8 @@ The following are some best practices to follow when you delete data: - Always specify the `WHERE` clause in the `DELETE` statement. If the `WHERE` clause is not specified, TiDB will delete **_ALL ROWS_** in the table. - - - Use [bulk-delete](#bulk-delete) when you delete a large number of rows (for example, more than ten thousand), because TiDB limits the size of a single transaction ([txn-total-size-limit](/tidb-configuration-file.md#txn-total-size-limit), 100 MB by default). - - - - -- Use [bulk-delete](#bulk-delete) when you delete a large number of rows (for example, more than ten thousand), because TiDB limits the size of a single transaction to 100 MB by default. - - - - If you delete all the data in a table, do not use the `DELETE` statement. Instead, use the [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) statement. - For performance considerations, see [Performance Considerations](#performance-considerations). - In scenarios where large batches of data need to be deleted, [Non-Transactional bulk-delete](#non-transactional-bulk-delete) can significantly improve performance. However, this will lose the transactional of the deletion and therefore **CANNOT** be rolled back. Make sure that you select the correct operation. @@ -170,21 +161,10 @@ with connection: - - -The `rated_at` field is of the `DATETIME` type in [Date and Time Types](/data-type-date-and-time.md). You can assume that it is stored as a literal quantity in TiDB, independent of the time zone. On the other hand, the `TIMESTAMP` type stores a timestamp and thus displays a different time string in a different [time zone](/configure-time-zone.md). - - - - - -The `rated_at` field is of the `DATETIME` type in [Date and Time Types](/data-type-date-and-time.md). You can assume that it is stored as a literal quantity in TiDB, independent of the time zone. On the other hand, the `TIMESTAMP` type stores a timestamp and thus displays a different time string in a different time zone. - - - > **Note:** > -> Like MySQL, the `TIMESTAMP` data type is affected by the [year 2038 problem](https://en.wikipedia.org/wiki/Year_2038_problem). It is recommended to use the `DATETIME` type if you store values larger than 2038. +> - The `rated_at` field is of the `DATETIME` type in [Date and Time Types](/data-type-date-and-time.md). You can assume that it is stored as a literal quantity in TiDB, independent of the time zone. On the other hand, the `TIMESTAMP` type stores a timestamp and thus displays a different time string in a different [time zone](/configure-time-zone.md). +> - Like MySQL, the `TIMESTAMP` data type is affected by the [year 2038 problem](https://en.wikipedia.org/wiki/Year_2038_problem). It is recommended to use the `DATETIME` type if you store values larger than 2038. ## Performance considerations @@ -204,18 +184,8 @@ TiDB uses [statistical information](/statistics.md) to determine index selection When you need to delete multiple rows of data from a table, you can choose the [`DELETE` example](#example) and use the `WHERE` clause to filter the data that needs to be deleted. - - However, if you need to delete a large number of rows (more than ten thousand), it is recommended that you delete the data in an iterative way, that is, deleting a portion of the data at each iteration until the deletion is completed. This is because TiDB limits the size of a single transaction ([`txn-total-size-limit`](/tidb-configuration-file.md#txn-total-size-limit), 100 MB by default). You can use loops in your programs or scripts to perform such operations. - - - - -However, if you need to delete a large number of rows (more than ten thousand), it is recommended that you delete the data in an iterative way, that is, deleting a portion of the data at each iteration until the deletion is completed. This is because TiDB limits the size of a single transaction to 100 MB by default. You can use loops in your programs or scripts to perform such operations. - - - This section provides an example of writing a script to handle an iterative delete operation that demonstrates how you should do a combination of `SELECT` and `DELETE` to complete a bulk-delete. ### Write a bulk-delete loop @@ -414,14 +384,6 @@ BATCH ON `rated_at` LIMIT 1000 DELETE FROM `ratings` WHERE `rated_at` >= "2022-0 ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-get-data-from-single-table.md b/develop/dev-guide-get-data-from-single-table.md index e74e5547de7a4..843840f3af4c7 100644 --- a/develop/dev-guide-get-data-from-single-table.md +++ b/develop/dev-guide-get-data-from-single-table.md @@ -1,6 +1,7 @@ --- title: Query Data from a Single Table summary: This document describes how to query data from a single table in a database. +aliases: ['/tidb/stable/dev-guide-get-data-from-single-table/','/tidb/dev/dev-guide-get-data-from-single-table/','/tidbcloud/dev-guide-get-data-from-single-table/'] --- @@ -15,31 +16,22 @@ The following content takes the [Bookshop](/develop/dev-guide-bookshop-schema-de Before querying data, make sure that you have completed the following steps: - - -1. Build a TiDB cluster (using [TiDB Cloud](/develop/dev-guide-build-cluster-in-cloud.md) or [TiUP](/production-deployment-using-tiup.md) is recommended). - - - - - -1. Build a TiDB cluster using [TiDB Cloud](/develop/dev-guide-build-cluster-in-cloud.md). - - + +
+1. [Create a TiDB Cloud cluster](/develop/dev-guide-build-cluster-in-cloud.md). 2. [Import table schema and sample data of the Bookshop application](/develop/dev-guide-bookshop-schema-design.md#import-table-structures-and-data). - - - 3. [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md). - - - +
+
-3. [Connect to TiDB](/tidb-cloud/connect-to-tidb-cluster.md). +1. [Deploy a TiDB Self-Managed cluster](/production-deployment-using-tiup.md). +2. [Import table schema and sample data of the Bookshop application](/develop/dev-guide-bookshop-schema-design.md#import-table-structures-and-data). +3. [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md). - +
+
## Execute a simple query @@ -122,20 +114,9 @@ public class AuthorDAO { } ``` - - -- After [connecting to TiDB using the JDBC driver](/develop/dev-guide-connect-to-tidb.md#jdbc), you can create a `Statement` object with `conn.createStatus()`. - - - - - -- After [connecting to TiDB using the JDBC driver](/develop/dev-guide-choose-driver-or-orm.md#java-drivers), you can create a `Statement` object with `conn.createStatus()`. - - +After [connecting to TiDB using the JDBC driver](/develop/dev-guide-sample-application-java-jdbc.md), you can create a `Statement` object with `conn.createStatement()`, and then call `stmt.executeQuery("query_sql")` to initiate a database query request to TiDB. -- Then call `stmt.executeQuery("query_sql")` to initiate a database query request to TiDB. -- The query results are stored in a `ResultSet` object. By traversing `ResultSet`, the returned results can be mapped to the `Author` object. +The query results are stored in a `ResultSet` object. By traversing `ResultSet`, the returned results can be mapped to the `Author` object.
@@ -399,14 +380,6 @@ In addition to the `COUNT` function, TiDB also supports other aggregate function ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-gui-datagrip.md b/develop/dev-guide-gui-datagrip.md index dedf061777b52..c2822821a5285 100644 --- a/develop/dev-guide-gui-datagrip.md +++ b/develop/dev-guide-gui-datagrip.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with JetBrains DataGrip summary: Learn how to connect to TiDB using JetBrains DataGrip. This tutorial also applies to the Database Tools and SQL plugin available in other JetBrains IDEs, such as IntelliJ, PhpStorm, and PyCharm. +aliases: ['/tidb/stable/dev-guide-gui-datagrip/','/tidb/dev/dev-guide-gui-datagrip/','/tidbcloud/dev-guide-gui-datagrip/'] --- # Connect to TiDB with JetBrains DataGrip @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [DataGrip **2023.2.1** or later](https://www.jetbrains.com/datagrip/download/) or a non-community edition [JetBrains](https://www.jetbrains.com/) IDE. - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Connect to TiDB Connect to your TiDB cluster depending on the TiDB deployment option you've selected. @@ -137,7 +126,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 10. Click **OK** to save the connection configuration.
-
+
1. Launch DataGrip and create a project to manage your connections. @@ -172,19 +161,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele ## Next steps - Learn more usage of DataGrip from [the documentation of DataGrip](https://www.jetbrains.com/help/datagrip/getting-started.html). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-gui-dbeaver.md b/develop/dev-guide-gui-dbeaver.md index 327538168e834..382d8a4519c98 100644 --- a/develop/dev-guide-gui-dbeaver.md +++ b/develop/dev-guide-gui-dbeaver.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with DBeaver summary: Learn how to connect to TiDB using DBeaver Community. +aliases: ['/tidb/stable/dev-guide-gui-dbeaver/','/tidb/dev/dev-guide-gui-dbeaver/','/tidbcloud/dev-guide-gui-dbeaver/'] --- # Connect to TiDB with DBeaver @@ -20,22 +21,24 @@ To complete this tutorial, you need: - [DBeaver Community **23.0.3** or higher](https://dbeaver.io/download/). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - +In addition, to connect to a {{{ .starter }}} or {{{ .essential }}} public endpoint from DBeaver on **Windows**, you must configure an additional SSL certificate (ISRG Root X1) as follows. Otherwise, the connection will fail. For other operating systems, you can skip these steps. -**If you don't have a TiDB cluster, you can create one as follows:** +1. Download the [ISRG Root X1 certificate](https://letsencrypt.org/certs/isrgrootx1.pem) and save it to a local path, such as `C:\certs\isrgrootx1.pem`. -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. +2. In DBeaver, edit your connection and go to the **SSL** tab: + + 1. Select **Use SSL**. + 2. In the **CA certificate** field, select the `isrgrootx1.pem` file you downloaded. + 3. Leave the other certificate fields empty. + +3. On the **Driver properties** tab, remove any existing `sslMode`, `useSSL`, or `requireSSL` entries to avoid SSL configuration conflicts. - +4. Click **Test Connection** to verify that the connection is successful. ## Connect to TiDB @@ -126,7 +129,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 7. Click **Finish** to save the connection configuration.
-
+
1. Launch DBeaver and click **New Database Connection** in the upper-left corner. In the **Connect to a database** dialog, select **TiDB** from the list, and then click **Next**. @@ -161,19 +164,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele ## Next steps - Learn more usage of DBeaver from [the documentation of DBeaver](https://github.com/dbeaver/dbeaver/wiki). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-gui-mysql-workbench.md b/develop/dev-guide-gui-mysql-workbench.md index cfdf810d7bb32..6053c12671b4a 100644 --- a/develop/dev-guide-gui-mysql-workbench.md +++ b/develop/dev-guide-gui-mysql-workbench.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with MySQL Workbench summary: Learn how to connect to TiDB using MySQL Workbench. +aliases: ['/tidb/stable/dev-guide-gui-mysql-workbench/','/tidb/dev/dev-guide-gui-mysql-workbench/','/tidbcloud/dev-guide-gui-mysql-workbench/'] --- # Connect to TiDB with MySQL Workbench @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [MySQL Workbench](https://dev.mysql.com/downloads/workbench/) **8.0.31** or later versions. - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Connect to TiDB Connect to your TiDB cluster depending on the TiDB deployment option you have selected. @@ -124,7 +113,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 7. If the connection test is successful, you can see the **Successfully made the MySQL connection** message. Click **OK** to save the connection configuration.
-
+
1. Launch MySQL Workbench and click **+** near the **MySQL Connections** title. @@ -167,19 +156,11 @@ For more information, see [MySQL Workbench frequently asked questions](https://d ## Next steps - Learn more usage of MySQL Workbench from [the documentation of MySQL Workbench](https://dev.mysql.com/doc/workbench/en/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-gui-navicat.md b/develop/dev-guide-gui-navicat.md index f757be6f87dfd..ce548b7c7efea 100644 --- a/develop/dev-guide-gui-navicat.md +++ b/develop/dev-guide-gui-navicat.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Navicat summary: Learn how to connect to TiDB using Navicat. +aliases: ['/tidb/stable/dev-guide-gui-navicat/','/tidb/dev/dev-guide-gui-navicat/','/tidbcloud/dev-guide-gui-navicat/'] --- # Connect to TiDB with Navicat @@ -21,23 +22,11 @@ To complete this tutorial, you need: - A paid account for Navicat Premium. - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Connect to TiDB Connect to your TiDB cluster depending on the TiDB deployment option you have selected. @@ -122,7 +111,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 9. If the connection test is successful, you can see the **Connection Successful** message. Click **OK** to finish the connection configuration.
-
+
1. Launch Navicat Premium, click **Connection** in the upper-left corner, select **PingCAP** from the **Vendor Filter** list, and double-click **TiDB** in the right panel. @@ -147,19 +136,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se ## Next steps -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-gui-vscode-sqltools.md b/develop/dev-guide-gui-vscode-sqltools.md index 66206847af020..c218073a963b8 100644 --- a/develop/dev-guide-gui-vscode-sqltools.md +++ b/develop/dev-guide-gui-vscode-sqltools.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Visual Studio Code summary: Learn how to connect to TiDB using Visual Studio Code or GitHub Codespaces. +aliases: ['/tidb/stable/dev-guide-gui-vscode-sqltools/','/tidb/dev/dev-guide-gui-vscode-sqltools/','/tidbcloud/dev-guide-gui-vscode-sqltools/'] --- # Connect to TiDB with Visual Studio Code @@ -25,23 +26,11 @@ To complete this tutorial, you need: - On the **Extensions** tab of your VS Code, search for `mtxr.sqltools-driver-mysql` to get the **SQLTools MySQL/MariaDB/TiDB** extension, and then click **Install**. - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Connect to TiDB Connect to your TiDB cluster depending on the TiDB deployment option you have selected. @@ -148,7 +137,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se 7. If the connection test is successful, you can see the **Successfully connected!** message. Click **SAVE CONNECTION** to save the connection configuration.
-
+
1. Launch VS Code and select the **SQLTools** extension on the navigation pane. Under the **CONNECTIONS** section, click **Add New Connection** and select **TiDB** as the database driver. @@ -190,19 +179,11 @@ Connect to your TiDB cluster depending on the TiDB deployment option you have se - Learn more usage of Visual Studio Code from [the documentation of Visual Studio Code](https://code.visualstudio.com/docs). - Learn more usage of VS Code SQLTools extension from [the documentation](https://marketplace.visualstudio.com/items?itemName=mtxr.sqltools) and [GitHub repository](https://github.com/mtxr/vscode-sqltools) of SQLTools. -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-hybrid-oltp-and-olap-queries.md b/develop/dev-guide-hybrid-oltp-and-olap-queries.md index 4423e5872c383..450f015636626 100644 --- a/develop/dev-guide-hybrid-oltp-and-olap-queries.md +++ b/develop/dev-guide-hybrid-oltp-and-olap-queries.md @@ -1,6 +1,7 @@ --- title: HTAP Queries summary: Introduce the HTAP queries in TiDB. +aliases: ['/tidb/stable/dev-guide-hybrid-oltp-and-olap-queries/','/tidb/dev/dev-guide-hybrid-oltp-and-olap-queries/','/tidbcloud/dev-guide-hybrid-oltp-and-olap-queries/'] --- # HTAP Queries @@ -13,13 +14,13 @@ The [Create a table](/develop/dev-guide-create-table.md#use-htap-capabilities) s ## Data preparation -Before starting, you can import more sample data [via the `tiup demo` command](/develop/dev-guide-bookshop-schema-design.md#method-1-via-tiup-demo). For example: +Before starting, you can import more sample data [via the `tiup demo` command](/develop/dev-guide-bookshop-schema-design.md#tidb-self-managed-via-tiup-demo). For example: ```shell tiup demo bookshop prepare --users=200000 --books=500000 --authors=100000 --ratings=1000000 --orders=1000000 --host 127.0.0.1 --port 4000 --drop-tables ``` -Or you can [use the Import function of TiDB Cloud](/develop/dev-guide-bookshop-schema-design.md#method-2-via-tidb-cloud-import) to import the pre-prepared sample data. +Or you can [use the Import function of TiDB Cloud](/develop/dev-guide-bookshop-schema-design.md#tidb-cloud-via-the-import-feature) to import the pre-prepared sample data. ## Window functions @@ -246,32 +247,13 @@ For more information about how TiDB chooses to use TiFlash, see [Use TiDB to rea ## Read more - - -- [Quick Start with TiDB HTAP](/quick-start-with-htap.md) -- [Explore HTAP](/explore-htap.md) - - - - - -- [TiDB Cloud HTAP Quick Start](/tidb-cloud/tidb-cloud-htap-quickstart.md) - - - +- [HTAP Quick Start for TiDB Cloud](/tidb-cloud/tidb-cloud-htap-quickstart.md) +- [HTAP Quick Start for TiDB Self-Managed](/quick-start-with-htap.md) and [Explore HTAP for TiDB Self-Managed](/explore-htap.md) - [Window Functions](/functions-and-operators/window-functions.md) - [Use TiFlash](/tiflash/tiflash-overview.md#use-tiflash) ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-implicit-type-conversion.md b/develop/dev-guide-implicit-type-conversion.md index 13711c29e7317..91137e8e34f53 100644 --- a/develop/dev-guide-implicit-type-conversion.md +++ b/develop/dev-guide-implicit-type-conversion.md @@ -1,6 +1,7 @@ --- title: Avoid Implicit Type Conversions summary: Introduces the possible consequences of implicit type conversions in TiDB and ways to avoid them. +aliases: ['/tidb/stable/dev-guide-implicit-type-conversion/','/tidb/dev/dev-guide-implicit-type-conversion/','/tidbcloud/dev-guide-implicit-type-conversion/'] --- # Avoid Implicit Type Conversions @@ -79,14 +80,6 @@ SELECT * FROM `t1` WHERE `a` BETWEEN '12123123' AND '1111222211111111200000'; ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-index-best-practice.md b/develop/dev-guide-index-best-practice.md index efb3db870c936..3fd8f3556a365 100644 --- a/develop/dev-guide-index-best-practice.md +++ b/develop/dev-guide-index-best-practice.md @@ -1,6 +1,7 @@ --- title: Best Practices for Indexing summary: Learn some best practices for creating and using indexes in TiDB. +aliases: ['/tidb/stable/dev-guide-index-best-practice/','/tidb/dev/dev-guide-index-best-practice/','/tidbcloud/dev-guide-index-best-practice/'] --- @@ -153,14 +154,6 @@ CREATE TABLE `books` ( ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-insert-data.md b/develop/dev-guide-insert-data.md index eec758342a64f..7d5d332202fa1 100644 --- a/develop/dev-guide-insert-data.md +++ b/develop/dev-guide-insert-data.md @@ -1,6 +1,7 @@ --- title: Insert Data summary: Learn about how to insert data. +aliases: ['/tidb/stable/dev-guide-insert-data/','/tidb/dev/dev-guide-insert-data/','/tidbcloud/dev-guide-insert-data/'] --- @@ -13,7 +14,7 @@ This document describes how to insert data into TiDB by using the SQL language w Before reading this document, you need to prepare the following: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md), [Create a Database](/develop/dev-guide-create-database.md), [Create a Table](/develop/dev-guide-create-table.md), and [Create Secondary Indexes](/develop/dev-guide-create-secondary-indexes.md) ## Insert rows @@ -234,33 +235,30 @@ If you need to quickly import a large amount of data into a TiDB cluster, it is The following are the recommended tools for bulk-insert: -- Data export: [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview). You can export MySQL or TiDB data to local or Amazon S3. + +
- +- Data export: use [Dumpling](/dumpling-overview.md) to export MySQL or TiDB data to local or your cloud storage. For a TiDB Cloud Starter or Essential cluster, you can also use the [Export](/tidb-cloud/serverless-export.md) feature in the [TiDB Cloud console](https://tidbcloud.com/) to export data more efficiently. +- Data import: use the [Import](/tidb-cloud/import-sample-data.md) feature in the [TiDB Cloud console](https://tidbcloud.com/). You can import Dumpling exported data, import a local CSV file, or [import CSV files from cloud storage into TiDB Cloud](/tidb-cloud/import-csv-files.md). +- Data replication: use the [TiDB Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) feature in the [TiDB Cloud console](https://tidbcloud.com/). You can replicate MySQL-compatible databases to TiDB. It also supports merging and migrating the sharded instances and tables from the source databases. +- Data backup and restore: use the [Backup](/tidb-cloud/backup-and-restore.md) feature in the [TiDB Cloud console](https://tidbcloud.com/). Compared to Dumpling, backup and restore is more suitable for big data scenario. +
+
+ +- Data export: [Dumpling](/dumpling-overview.md). You can export MySQL or TiDB data to local or Amazon S3. - Data import: [TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md). You can import **Dumpling** exported data, a **CSV** file, or [Migrate Data from Amazon Aurora to TiDB](/migrate-aurora-to-tidb.md). It also supports reading data from a local disk or Amazon S3 cloud disk. - Data replication: [TiDB Data Migration](/dm/dm-overview.md). You can replicate MySQL, MariaDB, and Amazon Aurora databases to TiDB. It also supports merging and migrating the sharded instances and tables from the source databases. - Data backup and restore: [Backup & Restore (BR)](/br/backup-and-restore-overview.md). Compared to **Dumpling**, **BR** is more suitable for **_big data_** scenario. - - - - -- Data import: [Create Import](/tidb-cloud/import-sample-data.md) page in the [TiDB Cloud console](https://tidbcloud.com/). You can import **Dumpling** exported data, import a local **CSV** file, or [Import CSV Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-csv-files.md). It also supports reading data from a local disk, Amazon S3 cloud disk, or GCS cloud disk. -- Data replication: [TiDB Data Migration](https://docs.pingcap.com/tidb/stable/dm-overview). You can replicate MySQL, MariaDB, and Amazon Aurora databases to TiDB. It also supports merging and migrating the sharded instances and tables from the source databases. -- Data backup and restore: [Backup](/tidb-cloud/backup-and-restore.md) page in the TiDB Cloud console. Compared to **Dumpling**, backup and restore is more suitable for **_big data_** scenario. - - +
+
## Avoid hotspots When designing a table, you need to consider if there is a large number of insert operations. If so, you need to avoid hotspots during table design. See the [Select primary key](/develop/dev-guide-create-table.md#select-primary-key) section and follow the [Rules when selecting primary key](/develop/dev-guide-create-table.md#guidelines-to-follow-when-selecting-primary-key). - - -For more information on how to handle hotspot issues, see [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). - - +For more information on how to handle hotspot issues in TiDB Self-Managed, see [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). ## Insert data to a table with the `AUTO_RANDOM` primary key @@ -305,14 +303,6 @@ In TiDB, HTAP capabilities save you from performing additional operations when i ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-join-tables.md b/develop/dev-guide-join-tables.md index 237dc9a8b4841..c53e61481cc3b 100644 --- a/develop/dev-guide-join-tables.md +++ b/develop/dev-guide-join-tables.md @@ -1,6 +1,7 @@ --- title: Multi-table Join Queries summary: This document describes how to use multi-table join queries. +aliases: ['/tidb/stable/dev-guide-join-tables/','/tidb/dev/dev-guide-join-tables/','/tidbcloud/dev-guide-join-tables/'] --- # Multi-table Join Queries @@ -256,14 +257,6 @@ For more information about the implementation details and limitations of this Jo ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-mysql-tools.md b/develop/dev-guide-mysql-tools.md new file mode 100644 index 0000000000000..75a4f07d72f17 --- /dev/null +++ b/develop/dev-guide-mysql-tools.md @@ -0,0 +1,64 @@ +--- +title: Connect to TiDB with MySQL Tools +summary: Learn how to connect to TiDB using MySQL tools. +--- + +# Connect to TiDB with MySQL Tools + +TiDB is highly compatible with the MySQL protocol. For a full list of client link parameters, see [MySQL Client Options](https://dev.mysql.com/doc/refman/8.0/en/mysql-command-options.html). + +TiDB supports the [MySQL Client/Server Protocol](https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html), which allows most client drivers and ORM frameworks to connect to TiDB just as they connect to MySQL. + +You can choose to use MySQL Client or MySQL Shell based on your personal preferences. + + + +
+ +You can connect to TiDB using MySQL Client, which can be used as a command-line tool for TiDB. To install MySQL Client, follow the instructions below for YUM based Linux distributions. + +```shell +sudo yum install mysql +``` + +After the installation, you can connect to TiDB using the following command: + +```shell +mysql --host --port 4000 -u root -p --comments +``` + +The MySQL v9.0 client on macOS cannot correctly load the `mysql_native_password` plugin, causing the error `ERROR 2059 (HY000): Authentication plugin 'mysql_native_password' cannot be loaded` when connecting to TiDB. To address this issue, it is recommended to install and use the MySQL v8.0 client to connect to TiDB. Run the following commands to install it: + +```shell +brew install mysql-client@8.0 +brew unlink mysql +brew link mysql-client@8.0 +``` + +If you still encounter errors, you can specify the installation path of the MySQL v8.0 client to connect to TiDB. Run the following command: + +```shell +/opt/homebrew/opt/mysql-client@8.0/bin/mysql --comments --host ${YOUR_IP_ADDRESS} --port ${YOUR_PORT_NUMBER} -u ${your_user_name} -p +``` + +Replace `/opt/homebrew/opt/mysql-client@8.0/bin/mysql` in the preceding command with the installation path of the MySQL v8.0 client in your actual environment. + +
+ +
+ +You can connect to TiDB using MySQL Shell, which can be used as a command-line tool for TiDB. To install MySQL Shell, follow the instructions in the [MySQL Shell documentation](https://dev.mysql.com/doc/mysql-shell/8.0/en/mysql-shell-install.html). After the installation, you can connect to TiDB using the following command: + +```shell +mysqlsh --sql mysql://root@:4000 +``` + +
+ +
+ +## Need help? + +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-object-naming-guidelines.md b/develop/dev-guide-object-naming-guidelines.md index 8939b7b8fd48f..f854b0220f558 100644 --- a/develop/dev-guide-object-naming-guidelines.md +++ b/develop/dev-guide-object-naming-guidelines.md @@ -1,6 +1,7 @@ --- title: Object Naming Convention summary: Learn the object naming convention in TiDB. +aliases: ['/tidb/stable/dev-guide-object-naming-guidelines/','/tidb/dev/dev-guide-object-naming-guidelines/','/tidbcloud/dev-guide-object-naming-guidelines/'] --- # Object Naming Convention @@ -47,14 +48,6 @@ It is recommended to differentiate database names by business, product, or other ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-optimistic-and-pessimistic-transaction.md b/develop/dev-guide-optimistic-and-pessimistic-transaction.md index 7f441d257a54c..8efa3c53978c8 100644 --- a/develop/dev-guide-optimistic-and-pessimistic-transaction.md +++ b/develop/dev-guide-optimistic-and-pessimistic-transaction.md @@ -1,6 +1,7 @@ --- title: Optimistic Transactions and Pessimistic Transactions summary: Learn about optimistic and pessimistic transactions in TiDB. +aliases: ['/tidb/stable/dev-guide-optimistic-and-pessimistic-transaction/','/tidb/dev/dev-guide-optimistic-and-pessimistic-transaction/','/tidbcloud/dev-guide-optimistic-and-pessimistic-transaction/'] --- # Optimistic Transactions and Pessimistic Transactions @@ -1369,14 +1370,6 @@ mysql> SELECT * FROM users; ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-optimize-sql-best-practices.md b/develop/dev-guide-optimize-sql-best-practices.md index f4d8088297724..788c2616ec57a 100644 --- a/develop/dev-guide-optimize-sql-best-practices.md +++ b/develop/dev-guide-optimize-sql-best-practices.md @@ -1,6 +1,7 @@ --- title: Performance Tuning Best Practices summary: Introduces the best practices for tuning TiDB performance. +aliases: ['/tidb/stable/dev-guide-optimize-sql-best-practices/','/tidb/dev/dev-guide-optimize-sql-best-practices/','/tidbcloud/dev-guide-optimize-sql-best-practices/'] --- # Performance Tuning Best Practices @@ -151,56 +152,18 @@ SET @@global.tidb_ddl_reorg_batch_size = 128; ## Transaction conflicts - - For how to locate and resolve transaction conflicts, see [Troubleshoot Lock Conflicts](/troubleshoot-lock-conflicts.md). - - - - -For how to locate and resolve transaction conflicts, see [Troubleshoot Lock Conflicts](https://docs.pingcap.com/tidb/stable/troubleshoot-lock-conflicts). - - - ## Best practices for developing Java applications with TiDB - - -See [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md). - - - - - -See [Best Practices for Developing Java Applications with TiDB](https://docs.pingcap.com/tidb/stable/java-app-best-practices). - - +See [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md). ### See also - - - [Best Practices for High-Concurrency Writes](/best-practices/high-concurrency-best-practices.md) - - - - -- [Best Practices for High-Concurrency Writes](https://docs.pingcap.com/tidb/stable/high-concurrency-best-practices) - - - ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-optimize-sql-overview.md b/develop/dev-guide-optimize-sql-overview.md index 9b6e30e4a0caf..522e1dfe624d7 100644 --- a/develop/dev-guide-optimize-sql-overview.md +++ b/develop/dev-guide-optimize-sql-overview.md @@ -1,6 +1,7 @@ --- title: Overview of Optimizing SQL Performance summary: Provides an overview of SQL performance tuning for TiDB application developers. +aliases: ['/tidb/stable/dev-guide-optimize-sql-overview/','/tidb/dev/dev-guide-optimize-sql-overview/','/tidbcloud/dev-guide-optimize-sql-overview/'] --- # Overview of Optimizing SQL Performance @@ -23,44 +24,16 @@ To get good SQL statement performance, you can follow these guidelines: After [tuning SQL performance](#sql-performance-tuning), if your application still cannot get good performance, you might need to check your schema design and data access patterns to avoid the following issues: - - * Transaction contention. For how to diagnose and resolve transaction contention, see [Troubleshoot Lock Conflicts](/troubleshoot-lock-conflicts.md). * Hot spots. For how to diagnose and resolve hot spots, see [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). - - - - -* Transaction contention. For how to diagnose and resolve transaction contention, see [Troubleshoot Lock Conflicts](https://docs.pingcap.com/tidb/stable/troubleshoot-lock-conflicts). -* Hot spots. For how to diagnose and resolve hot spots, see [Troubleshoot Hotspot Issues](https://docs.pingcap.com/tidb/stable/troubleshoot-hot-spot-issues). - - - ### See also - - -* [SQL Performance Tuning](/sql-tuning-overview.md) - - - - - -* [SQL Performance Tuning](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) - - +* [SQL Performance Tuning for TiDB Cloud](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) +* [SQL Performance Tuning for TiDB Self-Managed](/sql-tuning-overview.md) ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-optimize-sql.md b/develop/dev-guide-optimize-sql.md index 750b6c12e5d38..ed8838230d9a4 100644 --- a/develop/dev-guide-optimize-sql.md +++ b/develop/dev-guide-optimize-sql.md @@ -1,6 +1,7 @@ --- title: SQL Performance Tuning summary: Introduces TiDB's SQL performance tuning scheme and analysis approach. +aliases: ['/tidb/stable/dev-guide-optimize-sql/','/tidb/dev/dev-guide-optimize-sql/','/tidbcloud/dev-guide-optimize-sql/'] --- # SQL Performance Tuning @@ -9,13 +10,13 @@ This document introduces some common reasons for slow SQL statements and techniq ## Before you begin -You can use [`tiup demo` import](/develop/dev-guide-bookshop-schema-design.md#method-1-via-tiup-demo) to prepare data: +You can use [`tiup demo` import](/develop/dev-guide-bookshop-schema-design.md#tidb-self-managed-via-tiup-demo) to prepare data: ```shell tiup demo bookshop prepare --host 127.0.0.1 --port 4000 --books 1000000 ``` -Or [using the Import feature of TiDB Cloud](/develop/dev-guide-bookshop-schema-design.md#method-2-via-tidb-cloud-import) to import the pre-prepared sample data. +Or [using the Import feature of TiDB Cloud](/develop/dev-guide-bookshop-schema-design.md#tidb-cloud-via-the-import-feature) to import the pre-prepared sample data. ## Issue: Full table scan @@ -247,14 +248,6 @@ See [JOIN Execution Plan](/explain-joins.md). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-paginate-results.md b/develop/dev-guide-paginate-results.md index cb46b6f8c23c7..9514895046b77 100644 --- a/develop/dev-guide-paginate-results.md +++ b/develop/dev-guide-paginate-results.md @@ -1,6 +1,7 @@ --- title: Paginate Results summary: Introduce paginate result feature in TiDB. +aliases: ['/tidb/stable/dev-guide-paginate-results/','/tidb/dev/dev-guide-paginate-results/','/tidbcloud/dev-guide-paginate-results/'] --- # Paginate Results @@ -336,14 +337,6 @@ ORDER BY book_id, user_id; ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-playground-gitpod.md b/develop/dev-guide-playground-gitpod.md index 1a1cec665bc87..221cce4f1939d 100644 --- a/develop/dev-guide-playground-gitpod.md +++ b/develop/dev-guide-playground-gitpod.md @@ -1,6 +1,7 @@ --- title: Gitpod summary: Gitpod provides a complete, automated, and pre-configured cloud-native development environment. You can develop, run, and test code directly in the browser without any local configurations. +aliases: ['/tidb/stable/dev-guide-playground-gitpod/','/tidb/dev/dev-guide-playground-gitpod/','/tidbcloud/dev-guide-playground-gitpod/'] --- @@ -169,14 +170,6 @@ Gitpod provides a complete, automated, and pre-configured cloud-native developme ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-prepared-statement.md b/develop/dev-guide-prepared-statement.md index 1bc3c510ef640..7fe012330271f 100644 --- a/develop/dev-guide-prepared-statement.md +++ b/develop/dev-guide-prepared-statement.md @@ -1,6 +1,7 @@ --- title: Prepared Statements summary: Learn about how to use the TiDB prepared statements. +aliases: ['/tidb/stable/dev-guide-prepared-statement/','/tidb/dev/dev-guide-prepared-statement/','/tidbcloud/dev-guide-prepared-statement/'] --- # Prepared Statements @@ -227,14 +228,6 @@ For a complete example in Java, see: ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-proxysql-integration.md b/develop/dev-guide-proxysql-integration.md index 5a508b5c86dc2..ab316d81e3631 100644 --- a/develop/dev-guide-proxysql-integration.md +++ b/develop/dev-guide-proxysql-integration.md @@ -1,6 +1,7 @@ --- -title: ProxySQL Integration Guide +title: Integrate TiDB with ProxySQL summary: Learn how to integrate TiDB Cloud and TiDB (self-hosted) with ProxySQL. +aliases: ['/tidb/stable/dev-guide-proxysql-integration/','/tidb/dev/dev-guide-proxysql-integration/','/tidbcloud/dev-guide-proxysql-integration/'] --- # Integrate TiDB with ProxySQL @@ -10,7 +11,7 @@ This document provides a high-level introduction to ProxySQL, describes how to i If you are interested in learning more about TiDB and ProxySQL, you can find some useful links as follows: - [TiDB Cloud](https://docs.pingcap.com/tidbcloud) -- [TiDB Developer Guide](/develop/dev-guide-overview.md) +- [TiDB Developer Guide](https://docs.pingcap.com/developer/) - [ProxySQL Documentation](https://proxysql.com/documentation/) ## What is ProxySQL? @@ -1128,14 +1129,6 @@ Databases can be overloaded by high traffic, faulty code, or malicious spam. Wit ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-aws-lambda.md b/develop/dev-guide-sample-application-aws-lambda.md index e844834dbb634..e3aad2705f354 100644 --- a/develop/dev-guide-sample-application-aws-lambda.md +++ b/develop/dev-guide-sample-application-aws-lambda.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with mysql2 in AWS Lambda Function summary: This article describes how to build a CRUD application using TiDB and mysql2 in AWS Lambda Function and provides a simple example code snippet. +aliases: ['/tidb/stable/dev-guide-sample-application-aws-lambda/','/tidb/dev/dev-guide-sample-application-aws-lambda/','/tidbcloud/dev-guide-sample-application-aws-lambda/'] --- # Connect to TiDB with mysql2 in AWS Lambda Function @@ -29,23 +30,11 @@ To complete this tutorial, you need: - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) - [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html) - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - If you don't have an AWS account or a user, you can create them by following the steps in the [Getting Started with Lambda](https://docs.aws.amazon.com/lambda/latest/dg/getting-started.html) guide. ## Run the sample app to connect to TiDB @@ -119,7 +108,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele
-
+
Copy and paste the corresponding connection string into `env.json`. The following is an example: @@ -362,19 +351,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). - For more details on how to use TiDB in AWS Lambda Function, see our [TiDB-Lambda-integration/aws-lambda-bookstore Demo](https://github.com/pingcap/TiDB-Lambda-integration/blob/main/aws-lambda-bookstore/README.md). You can also use AWS API Gateway to build a RESTful API for your application. - Learn more usage of `mysql2` from [the documentation of `mysql2`](https://sidorares.github.io/node-mysql2/docs/documentation). - Learn more usage of AWS Lambda from [the AWS developer guide of `Lambda`](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-cs.md b/develop/dev-guide-sample-application-cs.md index fb7c781e45ac7..59e185143bdf8 100644 --- a/develop/dev-guide-sample-application-cs.md +++ b/develop/dev-guide-sample-application-cs.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with C# summary: Learn how to connect to TiDB using C#. This tutorial provides sample C# code snippets for interacting with TiDB. +aliases: ['/tidb/stable/dev-guide-sample-application-cs/','/tidb/dev/dev-guide-sample-application-cs/','/tidbcloud/dev-guide-sample-application-cs/'] --- # Connect to TiDB with C\# diff --git a/develop/dev-guide-sample-application-golang-gorm.md b/develop/dev-guide-sample-application-golang-gorm.md index fc45bb66ac81c..d247595afec84 100644 --- a/develop/dev-guide-sample-application-golang-gorm.md +++ b/develop/dev-guide-sample-application-golang-gorm.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with GORM summary: Learn how to connect to TiDB using GORM. This tutorial gives Golang sample code snippets that work with TiDB using GORM. +aliases: ['/tidb/stable/dev-guide-sample-application-golang-gorm/','/tidb/dev/dev-guide-sample-application-golang-gorm/','/tidbcloud/dev-guide-sample-application-golang-gorm/'] --- # Connect to TiDB with GORM @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -141,7 +130,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -239,19 +228,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of GORM from [the documentation of GORM](https://gorm.io/docs/index.html) and the [TiDB section in the documentation of GORM](https://gorm.io/docs/connecting_to_the_database.html#TiDB). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-golang-sql-driver.md b/develop/dev-guide-sample-application-golang-sql-driver.md index a5aa138f2b42d..81cc97967f4c4 100644 --- a/develop/dev-guide-sample-application-golang-sql-driver.md +++ b/develop/dev-guide-sample-application-golang-sql-driver.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with Go-MySQL-Driver summary: Learn how to connect to TiDB using Go-MySQL-Driver. This tutorial gives Golang sample code snippets that work with TiDB using Go-MySQL-Driver. -aliases: ['/tidb/dev/dev-guide-outdated-for-go-sql-driver-mysql','/tidb/dev/dev-guide-outdated-for-gorm','/tidb/dev/dev-guide-sample-application-golang'] +aliases: ['/tidb/dev/dev-guide-outdated-for-go-sql-driver-mysql','/tidb/dev/dev-guide-outdated-for-gorm','/tidb/dev/dev-guide-sample-application-golang','/tidb/stable/dev-guide-sample-application-golang-sql-driver/','/tidb/dev/dev-guide-sample-application-golang-sql-driver/','/tidbcloud/dev-guide-sample-application-golang-sql-driver/'] --- # Connect to TiDB with Go-MySQL-Driver @@ -26,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -142,7 +130,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -290,19 +278,11 @@ Unless you need to write complex SQL statements, it is recommended to use [ORM]( ## Next steps - Learn more usage of Go-MySQL-Driver from [the documentation of Go-MySQL-Driver](https://github.com/go-sql-driver/mysql/blob/master/README.md). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-java-hibernate.md b/develop/dev-guide-sample-application-java-hibernate.md index b2ea70fe83e0f..52dc0b2a75687 100644 --- a/develop/dev-guide-sample-application-java-hibernate.md +++ b/develop/dev-guide-sample-application-java-hibernate.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Hibernate summary: Learn how to connect to TiDB using Hibernate. This tutorial gives Java sample code snippets that work with TiDB using Hibernate. +aliases: ['/tidb/stable/dev-guide-sample-application-java-hibernate/','/tidb/dev/dev-guide-sample-application-java-hibernate/','/tidbcloud/dev-guide-sample-application-java-hibernate/'] --- # Connect to TiDB with Hibernate @@ -26,23 +27,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -142,7 +131,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `env.sh` file.
-
+
1. Run the following command to copy `env.sh.example` and rename it to `env.sh`: @@ -297,20 +286,12 @@ Without this setting, TiDB accepts the `CHECK` constraint syntax but does not en ## Next steps - Learn more usage of Hibernate from [the documentation of Hibernate](https://hibernate.org/orm/documentation). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. - Learn through the course for Java developers: [Working with TiDB from Java](https://eng.edu.pingcap.com/catalog/info/id:212). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-java-jdbc.md b/develop/dev-guide-sample-application-java-jdbc.md index 6bdfb3650a205..dac1223ec6ba0 100644 --- a/develop/dev-guide-sample-application-java-jdbc.md +++ b/develop/dev-guide-sample-application-java-jdbc.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with JDBC summary: Learn how to connect to TiDB using JDBC. This tutorial gives Java sample code snippets that work with TiDB using JDBC. -aliases: ['/tidb/dev/sample-application-java','/tidb/dev/dev-guide-sample-application-java'] +aliases: ['/tidb/dev/sample-application-java','/tidb/dev/dev-guide-sample-application-java','/tidb/stable/dev-guide-sample-application-java-jdbc/','/tidb/dev/dev-guide-sample-application-java-jdbc/','/tidbcloud/dev-guide-sample-application-java-jdbc/'] --- # Connect to TiDB with JDBC @@ -14,24 +14,11 @@ In this tutorial, you can learn how to use TiDB and JDBC to accomplish the follo - Connect to your TiDB cluster using JDBC. - Build and run your application. Optionally, you can find [sample code snippets](#sample-code-snippets) for basic CRUD operations. - - > **Note:** > > - This tutorial works with {{{ .starter }}}, {{{ .essential }}}, TiDB Cloud Dedicated, and TiDB Self-Managed. > - Starting from TiDB v7.4, if `connectionCollation` is not configured, and `characterEncoding` is either not configured or set to `UTF-8` in the JDBC URL, the collation used in a JDBC connection depends on the JDBC driver version. For more information, see [Collation used in JDBC connections](/faq/sql-faq.md#collation-used-in-jdbc-connections). - - - - -> **Note:** -> -> - This tutorial works with {{{ .starter }}}, {{{ .essential }}}, TiDB Cloud Dedicated, and TiDB Self-Managed. -> - Starting from TiDB v7.4, if `connectionCollation` is not configured, and `characterEncoding` is either not configured or set to `UTF-8` in the JDBC URL, the collation used in a JDBC connection depends on the JDBC driver version. For more information, see [Collation used in JDBC connections](https://docs.pingcap.com/tidb/stable/sql-faq#collation-used-in-jdbc-connections). - - - ## Prerequisites To complete this tutorial, you need: @@ -41,27 +28,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -> **Note:** -> -> For security considerations, it is recommended that you use `VERIFY_IDENTITY` to establish TLS connections to TiDB clusters when connecting over the internet. {{{ .starter }}}, {{{ .essential }}}, and TiDB Cloud Dedicated use Subject Alternative Name (SAN) certificates, which require MySQL Connector/J version to be greater than or equal to [8.0.22](https://dev.mysql.com/doc/relnotes/connector-j/en/news-8-0-22.html). - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -161,7 +132,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `env.sh` file.
-
+
1. Run the following command to copy `env.sh.example` and rename it to `env.sh`: @@ -314,20 +285,12 @@ Unless you need to write complex SQL statements, it is recommended to use [ORM]( ## Next steps - Learn more usage of MySQL Connector/J from [the documentation of MySQL Connector/J](https://dev.mysql.com/doc/connector-j/en/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. - Learn through the course for Java developers: [Working with TiDB from Java](https://eng.edu.pingcap.com/catalog/info/id:212). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-java-mybatis.md b/develop/dev-guide-sample-application-java-mybatis.md index 97547a0aa9d18..bbbb47c91c5ae 100644 --- a/develop/dev-guide-sample-application-java-mybatis.md +++ b/develop/dev-guide-sample-application-java-mybatis.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with MyBatis summary: Learn how to connect to TiDB using MyBatis. This tutorial gives Java sample code snippets that work with TiDB using MyBatis. +aliases: ['/tidb/stable/dev-guide-sample-application-java-mybatis/','/tidb/dev/dev-guide-sample-application-java-mybatis/','/tidbcloud/dev-guide-sample-application-java-mybatis/'] --- # Connect to TiDB with MyBatis @@ -26,23 +27,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -142,7 +131,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `env.sh` file.
-
+
1. Run the following command to copy `env.sh.example` and rename it to `env.sh`: @@ -311,20 +300,12 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of MyBatis from [the documentation of MyBatis](http://www.mybatis.org/mybatis-3/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. - Learn through the course for Java developers: [Working with TiDB from Java](https://eng.edu.pingcap.com/catalog/info/id:212). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-java-spring-boot.md b/develop/dev-guide-sample-application-java-spring-boot.md index a4ead3b29ff9d..3fa4d7221027a 100644 --- a/develop/dev-guide-sample-application-java-spring-boot.md +++ b/develop/dev-guide-sample-application-java-spring-boot.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with Spring Boot summary: Learn how to connect to TiDB using Spring Boot. This tutorial gives Java sample code snippets that work with TiDB using Spring Boot. -aliases: ['/tidbcloud/dev-guide-sample-application-spring-boot','/tidb/dev/dev-guide-sample-application-spring-boot'] +aliases: ['/tidbcloud/dev-guide-sample-application-spring-boot','/tidb/dev/dev-guide-sample-application-spring-boot','/tidb/stable/dev-guide-sample-application-java-spring-boot/','/tidb/dev/dev-guide-sample-application-java-spring-boot/','/tidbcloud/dev-guide-sample-application-java-spring-boot/'] --- # Connect to TiDB with Spring Boot @@ -27,23 +27,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -143,7 +131,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `env.sh` file.
-
+
1. Run the following command to copy `env.sh.example` and rename it to `env.sh`: @@ -265,20 +253,12 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). - [The documentation of Spring Data JPA](https://spring.io/projects/spring-data-jpa) - [The documentation of Hibernate](https://hibernate.org/orm/documentation) -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. - Learn through the course for Java developers: [Working with TiDB from Java](https://eng.edu.pingcap.com/catalog/info/id:212). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nextjs.md b/develop/dev-guide-sample-application-nextjs.md index aa22bc13b525c..00b0590b56a78 100644 --- a/develop/dev-guide-sample-application-nextjs.md +++ b/develop/dev-guide-sample-application-nextjs.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with mysql2 in Next.js summary: This article describes how to build a CRUD application using TiDB and mysql2 in Next.js and provides a simple example code snippet. +aliases: ['/tidb/stable/dev-guide-sample-application-nextjs/','/tidb/dev/dev-guide-sample-application-nextjs/','/tidbcloud/dev-guide-sample-application-nextjs/'] --- # Connect to TiDB with mysql2 in Next.js @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -124,7 +113,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -278,19 +267,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). - For more details on how to build a complex application with ORM and Next.js, see [our Bookshop Demo](https://github.com/pingcap/tidb-prisma-vercel-demo). - Learn more usage of node-mysql2 driver from [the documentation of node-mysql2](https://sidorares.github.io/node-mysql2/docs/documentation). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nodejs-mysql2.md b/develop/dev-guide-sample-application-nodejs-mysql2.md index 2967a1d7593aa..802703940c40a 100644 --- a/develop/dev-guide-sample-application-nodejs-mysql2.md +++ b/develop/dev-guide-sample-application-nodejs-mysql2.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with node-mysql2 summary: Learn how to connect to TiDB using node-mysql2. This tutorial gives Node.js sample code snippets that work with TiDB using node-mysql2. +aliases: ['/tidb/stable/dev-guide-sample-application-nodejs-mysql2/','/tidb/dev/dev-guide-sample-application-nodejs-mysql2/','/tidbcloud/dev-guide-sample-application-nodejs-mysql2/'] --- # Connect to TiDB with node-mysql2 @@ -27,19 +28,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -155,7 +146,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -306,19 +297,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of node-mysql2 driver from [the documentation of node-mysql2](https://github.com/sidorares/node-mysql2#readme). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nodejs-mysqljs.md b/develop/dev-guide-sample-application-nodejs-mysqljs.md index c5fd4d59a4b5d..c945a789127b0 100644 --- a/develop/dev-guide-sample-application-nodejs-mysqljs.md +++ b/develop/dev-guide-sample-application-nodejs-mysqljs.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with mysql.js summary: Learn how to connect to TiDB using mysql.js. This tutorial gives Node.js sample code snippets that work with TiDB using mysql.js. +aliases: ['/tidb/stable/dev-guide-sample-application-nodejs-mysqljs/','/tidb/dev/dev-guide-sample-application-nodejs-mysqljs/','/tidbcloud/dev-guide-sample-application-nodejs-mysqljs/'] --- # Connect to TiDB with mysql.js @@ -27,19 +28,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -155,7 +146,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -329,19 +320,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of mysql.js driver from [the documentation of mysql.js](https://github.com/mysqljs/mysql#readme). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nodejs-prisma.md b/develop/dev-guide-sample-application-nodejs-prisma.md index c86e6a3363a0a..570227bc4fe23 100644 --- a/develop/dev-guide-sample-application-nodejs-prisma.md +++ b/develop/dev-guide-sample-application-nodejs-prisma.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Prisma summary: Learn how to connect to TiDB using Prisma. This tutorial gives Node.js sample code snippets that work with TiDB using Prisma. +aliases: ['/tidb/stable/dev-guide-sample-application-nodejs-prisma/','/tidb/dev/dev-guide-sample-application-nodejs-prisma/','/tidbcloud/dev-guide-sample-application-nodejs-prisma/'] --- # Connect to TiDB with Prisma @@ -27,19 +28,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -158,7 +149,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele ```
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -367,19 +358,11 @@ To check [referential integrity](https://en.wikipedia.org/wiki/Referential_integ ## Next steps - Learn more usage of the ORM framework Prisma driver from [the documentation of Prisma](https://www.prisma.io/docs). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nodejs-sequelize.md b/develop/dev-guide-sample-application-nodejs-sequelize.md index bf8c042bb1ebf..389bda7918c3e 100644 --- a/develop/dev-guide-sample-application-nodejs-sequelize.md +++ b/develop/dev-guide-sample-application-nodejs-sequelize.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Sequelize summary: Learn how to connect to TiDB using Sequelize. This tutorial gives Node.js sample code snippets that work with TiDB using Sequelize. +aliases: ['/tidb/stable/dev-guide-sample-application-nodejs-sequelize/','/tidb/dev/dev-guide-sample-application-nodejs-sequelize/','/tidbcloud/dev-guide-sample-application-nodejs-sequelize/'] --- # Connect to TiDB with Sequelize @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -151,7 +140,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -320,19 +309,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of the ORM framework Sequelize driver from [the documentation of Sequelize](https://sequelize.org/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-nodejs-typeorm.md b/develop/dev-guide-sample-application-nodejs-typeorm.md index 05012e1800f60..5ee36cbec3364 100644 --- a/develop/dev-guide-sample-application-nodejs-typeorm.md +++ b/develop/dev-guide-sample-application-nodejs-typeorm.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with TypeORM summary: Learn how to connect to TiDB using TypeORM. This tutorial gives Node.js sample code snippets that work with TiDB using TypeORM. +aliases: ['/tidb/stable/dev-guide-sample-application-nodejs-typeorm/','/tidb/dev/dev-guide-sample-application-nodejs-typeorm/','/tidbcloud/dev-guide-sample-application-nodejs-typeorm/'] --- # Connect to TiDB with TypeORM @@ -27,19 +28,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -161,7 +152,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -366,19 +357,11 @@ For more information, refer to the [TypeORM FAQ](https://typeorm.io/relations-fa ## Next steps - Learn more usage of TypeORM from the [documentation of TypeORM](https://typeorm.io/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-django.md b/develop/dev-guide-sample-application-python-django.md index 368128bf2d2e4..64152b3c84e9c 100644 --- a/develop/dev-guide-sample-application-python-django.md +++ b/develop/dev-guide-sample-application-python-django.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with Django summary: Learn how to connect to TiDB using Django. This tutorial gives Python sample code snippets that work with TiDB using Django. -aliases: ['/tidb/dev/dev-guide-outdated-for-django'] +aliases: ['/tidb/dev/dev-guide-outdated-for-django','/tidb/stable/dev-guide-sample-application-python-django/','/tidb/dev/dev-guide-sample-application-python-django/','/tidbcloud/dev-guide-sample-application-python-django/'] --- # Connect to TiDB with Django @@ -26,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -160,7 +148,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -327,19 +315,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of Django from [the documentation of Django](https://www.djangoproject.com/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-mysql-connector.md b/develop/dev-guide-sample-application-python-mysql-connector.md index 3995a406d7046..e52f95719432d 100644 --- a/develop/dev-guide-sample-application-python-mysql-connector.md +++ b/develop/dev-guide-sample-application-python-mysql-connector.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with MySQL Connector/Python summary: Learn how to connect to TiDB using MySQL Connector/Python. This tutorial gives Python sample code snippets that work with TiDB using MySQL Connector/Python. -aliases: ['/tidb/dev/dev-guide-sample-application-python','/tidb/dev/dev-guide-outdated-for-python-mysql-connector'] +aliases: ['/tidb/dev/dev-guide-sample-application-python','/tidb/dev/dev-guide-outdated-for-python-mysql-connector','/tidb/stable/dev-guide-sample-application-python-mysql-connector/','/tidb/dev/dev-guide-sample-application-python-mysql-connector/','/tidbcloud/dev-guide-sample-application-python-mysql-connector/'] --- # Connect to TiDB with MySQL Connector/Python @@ -26,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -148,7 +136,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -278,19 +266,11 @@ Unless you need to write complex SQL statements, it is recommended to use [ORM]( ## Next steps - Learn more usage of mysql-connector-python from [the documentation of MySQL Connector/Python](https://dev.mysql.com/doc/connector-python/en/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-mysqlclient.md b/develop/dev-guide-sample-application-python-mysqlclient.md index 4ef648fa74432..a750fff3fab01 100644 --- a/develop/dev-guide-sample-application-python-mysqlclient.md +++ b/develop/dev-guide-sample-application-python-mysqlclient.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with mysqlclient summary: Learn how to connect to TiDB using mysqlclient. This tutorial gives Python sample code snippets that work with TiDB using mysqlclient. +aliases: ['/tidb/stable/dev-guide-sample-application-python-mysqlclient/','/tidb/dev/dev-guide-sample-application-python-mysqlclient/','/tidbcloud/dev-guide-sample-application-python-mysqlclient/'] --- # Connect to TiDB with mysqlclient @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -151,7 +140,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -279,19 +268,11 @@ Unless you need to write complex SQL statements, it is recommended to use [ORM]( ## Next steps - Learn more usage of `mysqlclient` from [the documentation of mysqlclient](https://mysqlclient.readthedocs.io/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-peewee.md b/develop/dev-guide-sample-application-python-peewee.md index 7ba54b1ae3253..b8975396bec9e 100644 --- a/develop/dev-guide-sample-application-python-peewee.md +++ b/develop/dev-guide-sample-application-python-peewee.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with peewee summary: Learn how to connect to TiDB using peewee. This tutorial gives Python sample code snippets that work with TiDB using peewee. +aliases: ['/tidb/stable/dev-guide-sample-application-python-peewee/','/tidb/dev/dev-guide-sample-application-python-peewee/','/tidbcloud/dev-guide-sample-application-python-peewee/'] --- # Connect to TiDB with peewee @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -151,7 +140,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -302,19 +291,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of peewee from [the documentation of peewee](https://docs.peewee-orm.com/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-pymysql.md b/develop/dev-guide-sample-application-python-pymysql.md index bb686aabbc68b..29a112ae97dde 100644 --- a/develop/dev-guide-sample-application-python-pymysql.md +++ b/develop/dev-guide-sample-application-python-pymysql.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with PyMySQL summary: Learn how to connect to TiDB using PyMySQL. This tutorial gives Python sample code snippets that work with TiDB using PyMySQL. +aliases: ['/tidb/stable/dev-guide-sample-application-python-pymysql/','/tidb/dev/dev-guide-sample-application-python-pymysql/','/tidbcloud/dev-guide-sample-application-python-pymysql/'] --- # Connect to TiDB with PyMySQL @@ -25,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -147,7 +136,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -282,19 +271,11 @@ Unless you need to write complex SQL statements, it is recommended to use [ORM]( ## Next steps - Learn more usage of PyMySQL from [the documentation of PyMySQL](https://pymysql.readthedocs.io). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-python-sqlalchemy.md b/develop/dev-guide-sample-application-python-sqlalchemy.md index abf1746933fba..4dfd249d35701 100644 --- a/develop/dev-guide-sample-application-python-sqlalchemy.md +++ b/develop/dev-guide-sample-application-python-sqlalchemy.md @@ -1,7 +1,7 @@ --- title: Connect to TiDB with SQLAlchemy summary: Learn how to connect to TiDB using SQLAlchemy. This tutorial gives Python sample code snippets that work with TiDB using SQLAlchemy. -aliases: ['/tidb/dev/dev-guide-outdated-for-sqlalchemy'] +aliases: ['/tidb/dev/dev-guide-outdated-for-sqlalchemy','/tidb/stable/dev-guide-sample-application-python-sqlalchemy/','/tidb/dev/dev-guide-sample-application-python-sqlalchemy/','/tidbcloud/dev-guide-sample-application-python-sqlalchemy/'] --- # Connect to TiDB with SQLAlchemy @@ -26,23 +26,11 @@ To complete this tutorial, you need: - [Git](https://git-scm.com/downloads). - A TiDB cluster. - - **If you don't have a TiDB cluster, you can create one as follows:** - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -**If you don't have a TiDB cluster, you can create one as follows:** - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -158,7 +146,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -296,19 +284,11 @@ For more information, refer to [Delete data](/develop/dev-guide-delete-data.md). ## Next steps - Learn more usage of SQLAlchemy from [the documentation of SQLAlchemy](https://www.sqlalchemy.org/). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-ruby-mysql2.md b/develop/dev-guide-sample-application-ruby-mysql2.md index d1d48b3862578..729bef4e323d0 100644 --- a/develop/dev-guide-sample-application-ruby-mysql2.md +++ b/develop/dev-guide-sample-application-ruby-mysql2.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with mysql2 summary: Learn how to connect to TiDB using Ruby mysql2. This tutorial gives Ruby sample code snippets that work with TiDB using mysql2 gem. +aliases: ['/tidb/stable/dev-guide-sample-application-ruby-mysql2/','/tidb/dev/dev-guide-sample-application-ruby-mysql2/','/tidbcloud/dev-guide-sample-application-ruby-mysql2/'] --- # Connect to TiDB with mysql2 @@ -28,19 +29,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -156,7 +147,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -307,19 +298,11 @@ While it is possible to specify the CA certificate path manually, doing so might ## Next steps - Learn more usage of mysql2 driver from [the documentation of mysql2](https://github.com/brianmario/mysql2#readme). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sample-application-ruby-rails.md b/develop/dev-guide-sample-application-ruby-rails.md index 42d340820a9af..4f50604bb8677 100644 --- a/develop/dev-guide-sample-application-ruby-rails.md +++ b/develop/dev-guide-sample-application-ruby-rails.md @@ -1,6 +1,7 @@ --- title: Connect to TiDB with Rails framework and ActiveRecord ORM summary: Learn how to connect to TiDB using the Rails framework. This tutorial gives Ruby sample code snippets that work with TiDB using the Rails framework and ActiveRecord ORM. +aliases: ['/tidb/stable/dev-guide-sample-application-ruby-rails/','/tidb/dev/dev-guide-sample-application-ruby-rails/','/tidbcloud/dev-guide-sample-application-ruby-rails/'] --- # Connect to TiDB with Rails Framework and ActiveRecord ORM @@ -28,19 +29,9 @@ To complete this tutorial, you need: **If you don't have a TiDB cluster, you can create one as follows:** - - - (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. - Follow [Deploy a local test TiDB cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](/production-deployment-using-tiup.md) to create a local cluster. - - - -- (Recommended) Follow [Creating a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md) to create your own TiDB Cloud cluster. -- Follow [Deploy a local test TiDB cluster](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb#deploy-a-local-test-cluster) or [Deploy a production TiDB cluster](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup) to create a local cluster. - - - ## Run the sample app to connect to TiDB This section demonstrates how to run the sample application code and connect to TiDB. @@ -140,7 +131,7 @@ Connect to your TiDB cluster depending on the TiDB deployment option you've sele 6. Save the `.env` file.
-
+
1. Run the following command to copy `.env.example` and rename it to `.env`: @@ -281,19 +272,11 @@ While it is possible to specify the CA certificate path manually, this approach ## Next steps - Learn more usage of ActiveRecord ORM from [the documentation of ActiveRecord](https://guides.rubyonrails.org/active_record_basics.html). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). +- Learn the best practices for TiDB application development with the chapters in the [Developer guide](https://docs.pingcap.com/developer/), such as: [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Query data](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). - Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-schema-design-overview.md b/develop/dev-guide-schema-design-overview.md index b83fb6d735f4c..7a116f99da6e4 100644 --- a/develop/dev-guide-schema-design-overview.md +++ b/develop/dev-guide-schema-design-overview.md @@ -1,6 +1,7 @@ --- title: TiDB Database Schema Design Overview summary: Learn the basics on TiDB database schema design. +aliases: ['/tidb/stable/dev-guide-schema-design-overview/','/tidb/dev/dev-guide-schema-design-overview/','/tidbcloud/dev-guide-schema-design-overview/'] --- # TiDB Database Schema Design Overview @@ -47,18 +48,8 @@ There are two common types of indexes: #### Specialized indexes - - To improve query performance of various user scenarios, TiDB provides you with some specialized types of indexes. For details of each type, see [Indexing and constraints](/basic-features.md#indexing-and-constraints). - - - - -To improve query performance of various user scenarios, TiDB provides you with some specialized types of indexes. For details of each type, see [Indexing and constraints](https://docs.pingcap.com/tidb/stable/basic-features#indexing-and-constraints). - - - ### Other supported logical objects TiDB supports the following logical objects at the same level as **table**: @@ -69,18 +60,8 @@ TiDB supports the following logical objects at the same level as **table**: ## Access Control - - TiDB supports both user-based and role-based access control. To allow users to view, modify, or delete data objects and data schemas, you can either grant [privileges](/privilege-management.md) to [users](/user-account-management.md) directly or grant [privileges](/privilege-management.md) to users through [roles](/role-based-access-control.md). - - - - -TiDB supports both user-based and role-based access control. To allow users to view, modify, or delete data objects and data schemas, you can either grant [privileges](https://docs.pingcap.com/tidb/stable/privilege-management) to [users](https://docs.pingcap.com/tidb/stable/user-account-management) directly or grant [privileges](https://docs.pingcap.com/tidb/stable/privilege-management) to users through [roles](https://docs.pingcap.com/tidb/stable/role-based-access-control). - - - ## Database schema changes As a best practice, it is recommended that you use a [MySQL client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html) or a GUI client instead of a driver or ORM to execute database schema changes. @@ -91,14 +72,6 @@ For more information, see [TiDB Limitations](/tidb-limitations.md). ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-sql-development-specification.md b/develop/dev-guide-sql-development-specification.md index c19f431c97150..400080c4d0f5a 100644 --- a/develop/dev-guide-sql-development-specification.md +++ b/develop/dev-guide-sql-development-specification.md @@ -1,6 +1,7 @@ --- title: SQL Development Specifications summary: Learn about the SQL development specifications for TiDB. +aliases: ['/tidb/stable/dev-guide-sql-development-specification/','/tidb/dev/dev-guide-sql-development-specification/','/tidbcloud/dev-guide-sql-development-specification/'] --- # SQL Development Specifications @@ -55,14 +56,6 @@ This document introduces some general development specifications for using SQL. ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-third-party-support.md b/develop/dev-guide-third-party-support.md index 01d67fdcf106a..9278fe02374ec 100644 --- a/develop/dev-guide-third-party-support.md +++ b/develop/dev-guide-third-party-support.md @@ -1,6 +1,7 @@ --- title: Third-Party Tools Supported by TiDB summary: Learn about third-party tools supported by TiDB. +aliases: ['/tidb/stable/dev-guide-third-party-support/','/tidb/dev/dev-guide-third-party-support/','/tidbcloud/dev-guide-third-party-support/'] --- # Third-Party Tools Supported by TiDB @@ -62,14 +63,6 @@ If you encounter problems when connecting to TiDB using the tools listed in this ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-third-party-tools-compatibility.md b/develop/dev-guide-third-party-tools-compatibility.md index 8e6c5f0bbf1ad..2b5fdfe7ce2bc 100644 --- a/develop/dev-guide-third-party-tools-compatibility.md +++ b/develop/dev-guide-third-party-tools-compatibility.md @@ -1,6 +1,7 @@ --- title: Known Incompatibility Issues with Third-Party Tools summary: Describes TiDB compatibility issues with third-party tools found during testing. +aliases: ['/tidb/stable/dev-guide-third-party-tools-compatibility/','/tidb/dev/dev-guide-third-party-tools-compatibility/','/tidbcloud/dev-guide-third-party-tools-compatibility/'] --- # Known Incompatibility Issues with Third-Party Tools @@ -40,17 +41,10 @@ MySQL maintains a series of [server status variables starting with `Com_`](https **Way to avoid** - +Do not use these variables. One common scenario is monitoring. TiDB is well observable and does not require querying from server status variables. For more information about monitoring services, refer to the following documentation: -Do not use these variables. One common scenario is monitoring. TiDB is well observable and does not require querying from server status variables. For custom monitoring tools, refer to [TiDB Monitoring Framework Overview](/tidb-monitoring-framework.md). - - - - - -Do not use these variables. One common scenario is monitoring. TiDB Cloud is well observable and does not require querying from server status variables. For more information about TiDB Cloud monitoring services, refer to [Monitor a TiDB Cluster](/tidb-cloud/monitor-tidb-cluster.md). - - +- TiDB Cloud documentation: [Monitor a TiDB Cluster](/tidb-cloud/monitor-tidb-cluster.md). +- TiDB Self-Managed documentation: [TiDB Monitoring Framework Overview](/tidb-monitoring-framework.md). ### TiDB distinguishes between `TIMESTAMP` and `DATETIME` in error messages @@ -60,18 +54,8 @@ TiDB error messages distinguish between `TIMESTAMP` and `DATETIME`, while MySQL **Way to avoid** - - Do not use the error messages for string matching. Instead, use [Error Codes](/error-codes.md) for troubleshooting. - - - - -Do not use the error messages for string matching. Instead, use [Error Codes](https://docs.pingcap.com/tidb/stable/error-codes) for troubleshooting. - - - ### TiDB does not support the `CHECK TABLE` statement **Description** @@ -234,14 +218,6 @@ To allow the removal of the `AUTO_INCREMENT` attribute, set `@@tidb_allow_remove ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-tidb-basics.md b/develop/dev-guide-tidb-basics.md new file mode 100644 index 0000000000000..d5f8f10015c77 --- /dev/null +++ b/develop/dev-guide-tidb-basics.md @@ -0,0 +1,46 @@ +--- +title: TiDB basics for Developers +summary: Learn the basics of TiDB for developers, such as transaction mechanisms and how applications interact with TiDB. +--- + +# TiDB basics for Developers + +Before you start working with TiDB, you need to understand some important mechanisms of how TiDB works: + +- Read the [TiDB Transaction Overview](/transaction-overview.md) to understand how transactions work in TiDB, or check out the [Transaction Notes for Application Developers](/develop/dev-guide-transaction-overview.md) to learn about transaction knowledge required for application development. +- Understand [the way applications interact with TiDB](#the-way-applications-interact-with-tidb). +- To learn core components and concepts of building up the distributed database TiDB and TiDB Cloud, refer to the free online course [Introduction to TiDB](https://eng.edu.pingcap.com/catalog/info/id:203/?utm_source=docs-dev-guide). + +## TiDB transaction mechanisms + +TiDB supports distributed transactions and offers both [optimistic transaction](/optimistic-transaction.md) and [pessimistic transaction](/pessimistic-transaction.md) modes. The current version of TiDB uses the **pessimistic transaction** mode by default, which allows you to transact with TiDB as you would with a traditional monolithic database (for example, MySQL). + +You can start a transaction using [`BEGIN`](/sql-statements/sql-statement-begin.md), explicitly specify a **pessimistic transaction** using `BEGIN PESSIMISTIC`, or explicitly specify an **optimistic transaction** using `BEGIN OPTIMISTIC`. After that, you can either commit ([`COMMIT`](/sql-statements/sql-statement-commit.md)) or roll back ([`ROLLBACK`](/sql-statements/sql-statement-rollback.md)) the transaction. + +TiDB guarantees atomicity for all statements between the start of `BEGIN` and the end of `COMMIT` or `ROLLBACK`, that is, all statements that are executed during this period either succeed or fail as a whole. This is used to ensure data consistency you need for application development. + +If you are not sure what an **optimistic transaction** is, do **_NOT_** use it yet. Because **optimistic transactions** require that the application can correctly handle [all errors](https://docs.pingcap.com/tidb/v8.5/error-codes/) returned by the `COMMIT` statement. If you are not sure how your application handles them, use a **pessimistic transaction** instead. + +## The way applications interact with TiDB + +TiDB is highly compatible with the MySQL protocol and supports [most MySQL syntax and features](/mysql-compatibility.md), so most MySQL connection libraries are compatible with TiDB. If your application framework or language does not have an official adaptation from PingCAP, it is recommended that you use MySQL's client libraries. More and more third-party libraries are actively supporting TiDB's different features. + +Since TiDB is compatible with the MySQL protocol and MySQL syntax, most of the ORMs that support MySQL are also compatible with TiDB. + +## Read more + +- [Quick Start](/develop/dev-guide-build-cluster-in-cloud.md) +- [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) +- [Connect to TiDB](https://docs.pingcap.com/tidb/v8.5/dev-guide-connect-to-tidb/) +- [Database Schema Design](/develop/dev-guide-schema-design-overview.md) +- [Write Data](/develop/dev-guide-insert-data.md) +- [Read Data](/develop/dev-guide-get-data-from-single-table.md) +- [Transaction](/develop/dev-guide-transaction-overview.md) +- [Optimize](/develop/dev-guide-optimize-sql-overview.md) +- [Example Applications](/develop/dev-guide-sample-application-java-spring-boot.md) + +## Need help? + +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-tidb-crud-sql.md b/develop/dev-guide-tidb-crud-sql.md index ec4bc0e0122c0..01e9aafb708b2 100644 --- a/develop/dev-guide-tidb-crud-sql.md +++ b/develop/dev-guide-tidb-crud-sql.md @@ -1,6 +1,7 @@ --- title: CRUD SQL in TiDB summary: A brief introduction to TiDB's CRUD SQL. +aliases: ['/tidb/stable/dev-guide-tidb-crud-sql/','/tidb/dev/dev-guide-tidb-crud-sql/','/tidbcloud/dev-guide-tidb-crud-sql/'] --- # CRUD SQL in TiDB @@ -9,7 +10,7 @@ This document briefly introduces how to use TiDB's CRUD SQL. ## Before you start -Please make sure you are connected to a TiDB cluster. If not, refer to [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-tidb-cloud-cluster) to create a {{{ .starter }}} cluster. +Please make sure you are connected to a TiDB cluster. If not, refer to [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-tidb-cloud-cluster) to create a {{{ .starter }}} cluster. ## Explore SQL with TiDB @@ -104,14 +105,6 @@ SELECT * FROM person WHERE id < 5; ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-timeouts-in-tidb.md b/develop/dev-guide-timeouts-in-tidb.md index bd2c3828e7d60..9c47b37ec5f3f 100644 --- a/develop/dev-guide-timeouts-in-tidb.md +++ b/develop/dev-guide-timeouts-in-tidb.md @@ -1,6 +1,7 @@ --- title: Timeouts in TiDB summary: Learn about timeouts in TiDB, and solutions for troubleshooting errors. +aliases: ['/tidb/stable/dev-guide-timeouts-in-tidb/','/tidb/dev/dev-guide-timeouts-in-tidb/','/tidbcloud/dev-guide-timeouts-in-tidb/'] --- # Timeouts in TiDB @@ -26,8 +27,6 @@ If you need longer read time temporarily in some cases, you can increase the ret Note that the system variable configuration takes effect globally and immediately. Increasing its value will increase the life time of all existing snapshots, and decreasing it will immediately shorten the life time of all snapshots. Too many MVCC versions will impact the performance of the TiDB cluster. So you need to change this variable back to the previous setting in time. - - > **Tip:** > > Specifically, when Dumpling is exporting data from TiDB (less than 1 TB), if the TiDB version is v4.0.0 or later and Dumpling can access the PD address and the [`INFORMATION_SCHEMA.CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) table of the TiDB cluster, Dumpling automatically adjusts the GC safe point to block GC without affecting the original cluster. @@ -41,25 +40,6 @@ Note that the system variable configuration takes effect globally and immediatel > > For more details, see [Manually set the TiDB GC time](/dumpling-overview.md#manually-set-the-tidb-gc-time). - - - - -> **Tip:** -> -> Specifically, when Dumpling is exporting data from TiDB (less than 1 TB), if the TiDB version is later than or equal to v4.0.0 and Dumpling can access the PD address of the TiDB cluster, Dumpling automatically extends the GC time without affecting the original cluster. -> -> However, in either of the following scenarios, Dumpling cannot automatically adjust the GC time: -> -> - The data size is very large (more than 1 TB). -> - Dumpling cannot connect directly to PD, for example, the TiDB cluster is on TiDB Cloud or on Kubernetes that is separated from Dumpling. -> -> In such scenarios, you must manually extend the GC time in advance to avoid export failure due to GC during the export process. -> -> For more details, see [Manually set the TiDB GC time](https://docs.pingcap.com/tidb/stable/dumpling-overview#manually-set-the-tidb-gc-time). - - - For more information about GC, see [GC Overview](/garbage-collection-overview.md). ## Transaction timeout @@ -76,26 +56,12 @@ TiDB also provides a system variable (`max_execution_time`, `0` by default, indi ## JDBC query timeout - - Starting from v6.1.0, when the [`enable-global-kill`](/tidb-configuration-file.md#enable-global-kill-new-in-v610) configuration item is set to its default value `true`, you can use the `setQueryTimeout()` method provided by MySQL JDBC to control the query timeout. > **Note:** > > When your TiDB version is earlier than v6.1.0 or [`enable-global-kill`](/tidb-configuration-file.md#enable-global-kill-new-in-v610) is set to `false`, `setQueryTimeout()` does not work for TiDB. This is because the client sends a `KILL` command to the database when it detects the query timeout. However, because the TiDB service is load balanced, TiDB does not execute the `KILL` command to avoid termination of the connection on a wrong TiDB node. In such cases, you can use `max_execution_time` to control query timeout. - - - - -Starting from v6.1.0, when the [`enable-global-kill`](https://docs.pingcap.com/tidb/stable/tidb-configuration-file/#enable-global-kill-new-in-v610) configuration item is set to its default value `true`, you can use the `setQueryTimeout()` method provided by MySQL JDBC to control the query timeout. - -> **Note:** -> -> When your TiDB version is earlier than v6.1.0 or [`enable-global-kill`](https://docs.pingcap.com/tidb/stable/tidb-configuration-file/#enable-global-kill-new-in-v610) is set to `false`, `setQueryTimeout()` does not work for TiDB. This is because the client sends a `KILL` command to the database when it detects the query timeout. However, because the TiDB service is load balanced, TiDB does not execute the `KILL` command to avoid termination of the connection on a wrong TiDB node. In such cases, you can use `max_execution_time` to control query timeout. - - - TiDB provides the following MySQL-compatible timeout control parameters. - **wait_timeout**, controls the non-interactive idle timeout for the connection to Java applications. Since TiDB v5.4, the default value of `wait_timeout` is `28800` seconds, which is 8 hours. For TiDB versions earlier than v5.4, the default value is `0`, which means the timeout is unlimited. @@ -109,14 +75,6 @@ However, in a real production environment, idle connections and indefinitely exe ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-transaction-overview.md b/develop/dev-guide-transaction-overview.md index aa32c042aa499..7fdd3b8bd5575 100644 --- a/develop/dev-guide-transaction-overview.md +++ b/develop/dev-guide-transaction-overview.md @@ -1,6 +1,7 @@ --- title: Transaction overview summary: A brief introduction to transactions in TiDB. +aliases: ['/tidb/stable/dev-guide-transaction-overview/','/tidb/dev/dev-guide-transaction-overview/','/tidbcloud/dev-guide-transaction-overview/'] --- # Transaction overview @@ -162,14 +163,6 @@ TiDB implements Snapshot Isolation (SI) level consistency, also known as "repeat ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-transaction-restraints.md b/develop/dev-guide-transaction-restraints.md index 997dc4f36e67c..f4e043ff46191 100644 --- a/develop/dev-guide-transaction-restraints.md +++ b/develop/dev-guide-transaction-restraints.md @@ -1,6 +1,7 @@ --- title: Transaction Restraints summary: Learn about transaction restraints in TiDB. +aliases: ['/tidb/stable/dev-guide-transaction-restraints/','/tidb/dev/dev-guide-transaction-restraints/','/tidbcloud/dev-guide-transaction-restraints/'] --- # Transaction Restraints @@ -731,14 +732,6 @@ This is a known incompatibility issue with MySQL. You can solve this issue by us ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-transaction-troubleshoot.md b/develop/dev-guide-transaction-troubleshoot.md index 000703d0c2679..840e476fc6070 100644 --- a/develop/dev-guide-transaction-troubleshoot.md +++ b/develop/dev-guide-transaction-troubleshoot.md @@ -1,6 +1,7 @@ --- title: Handle Transaction Errors summary: Learn about how to handle transaction errors, such as deadlocks and application retry errors. +aliases: ['/tidb/stable/dev-guide-transaction-troubleshoot/','/tidb/dev/dev-guide-transaction-troubleshoot/','/tidbcloud/dev-guide-transaction-troubleshoot/'] --- # Handle Transaction Errors @@ -91,18 +92,8 @@ Your retry logic must follow the following rules: - `Error 9007: Write conflict`: Write conflict error, usually caused by multiple transactions modifying the same row of data when the optimistic transaction mode is used. - `COMMIT` the transaction at the end of the try block. - - For more information about error codes, see [Error Codes and Troubleshooting](/error-codes.md). - - - - -For more information about error codes, see [Error Codes and Troubleshooting](https://docs.pingcap.com/tidb/stable/error-codes). - - - ```python while True: n++ @@ -129,42 +120,14 @@ while True: > > If you frequently encounter `Error 9007: Write conflict`, you may need to check your schema design and the data access patterns of your workload to find the root cause of the conflict and try to avoid conflicts by a better design. - - For information about how to troubleshoot and resolve transaction conflicts, see [Troubleshoot Lock Conflicts](/troubleshoot-lock-conflicts.md). - - - - -For information about how to troubleshoot and resolve transaction conflicts, see [Troubleshoot Lock Conflicts](https://docs.pingcap.com/tidb/stable/troubleshoot-lock-conflicts). - - - ## See also - - - [Troubleshoot Write Conflicts in Optimistic Transactions](/troubleshoot-write-conflicts.md) - - - - -- [Troubleshoot Write Conflicts in Optimistic Transactions](https://docs.pingcap.com/tidb/stable/troubleshoot-write-conflicts) - - - ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-troubleshoot-overview.md b/develop/dev-guide-troubleshoot-overview.md index 77428c7aaea03..1faa05406f8b6 100644 --- a/develop/dev-guide-troubleshoot-overview.md +++ b/develop/dev-guide-troubleshoot-overview.md @@ -1,6 +1,7 @@ --- title: SQL or Transaction Issues summary: Learn how to troubleshoot SQL or transaction issues that might occur during application development. +aliases: ['/tidb/stable/dev-guide-troubleshoot-overview/','/tidb/dev/dev-guide-troubleshoot-overview/','/tidbcloud/dev-guide-troubleshoot-overview/'] --- # SQL or Transaction Issues @@ -11,22 +12,27 @@ This document introduces problems that may occur during application development If you want to improve SQL query performance, follow the instructions in [SQL Performance Tuning](/develop/dev-guide-optimize-sql-overview.md) to solve performance problems such as full table scans and missing indexes. - - If you still have performance issues, see the following documents: -- [Analyze Slow Queries](/analyze-slow-queries.md) -- [Identify Expensive Queries Using Top SQL](/dashboard/top-sql.md) + -If you have questions about SQL operations, see [SQL FAQs](/faq/sql-faq.md). +
- +- [Slow Queries](/tidb-cloud/tune-performance.md#slow-query) +- [Statement Analysis](/tidb-cloud/tune-performance.md#statement-analysis) +- [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) - +
-If you have questions about SQL operations, see [SQL FAQs](https://docs.pingcap.com/tidb/stable/sql-faq). +
- +- [Analyze Slow Queries](/analyze-slow-queries.md) +- [Identify Expensive Queries Using Top SQL](/dashboard/top-sql.md) + +
+
+ +If you have questions about SQL operations, see [SQL FAQs](/faq/sql-faq.md). ## Troubleshoot transaction issues @@ -35,24 +41,11 @@ See [Handle transaction errors](/develop/dev-guide-transaction-troubleshoot.md). ## See also - [Unsupported features](/mysql-compatibility.md#unsupported-features) - - - -- [Cluster Management FAQs](/faq/manage-cluster-faq.md) -- [TiDB FAQs](/faq/tidb-faq.md) - - +- [FAQs for TiDB Cloud](/tidb-cloud/tidb-cloud-faq.md) +- [FAQs for TiDB Self-Managed](/faq/faq-overview.md) ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-unique-serial-number-generation.md b/develop/dev-guide-unique-serial-number-generation.md index aa4569dfa43b4..70b0a78aab587 100644 --- a/develop/dev-guide-unique-serial-number-generation.md +++ b/develop/dev-guide-unique-serial-number-generation.md @@ -1,6 +1,7 @@ --- title: Unique Serial Number Generation summary: Unique serial number generation solution for developers who generate their own unique IDs. +aliases: ['/tidb/stable/dev-guide-unique-serial-number-generation/','/tidb/dev/dev-guide-unique-serial-number-generation/','/tidbcloud/dev-guide-unique-serial-number-generation/'] --- # Unique Serial Number Generation @@ -52,14 +53,6 @@ Finally, note that the IDs generated by the above two solutions are not random e ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-unstable-result-set.md b/develop/dev-guide-unstable-result-set.md index 2b166f8b75d37..5414e48da6dd8 100644 --- a/develop/dev-guide-unstable-result-set.md +++ b/develop/dev-guide-unstable-result-set.md @@ -1,6 +1,7 @@ --- title: Unstable Result Set summary: Learn how to handle the error of an unstable result set. +aliases: ['/tidb/stable/dev-guide-unstable-result-set/','/tidb/dev/dev-guide-unstable-result-set/','/tidbcloud/dev-guide-unstable-result-set/'] --- # Unstable Result Set @@ -237,14 +238,6 @@ The returned result is related to the distribution of data on the storage node ( ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-update-data.md b/develop/dev-guide-update-data.md index d0540b462296f..427aea2e0792f 100644 --- a/develop/dev-guide-update-data.md +++ b/develop/dev-guide-update-data.md @@ -1,6 +1,7 @@ --- title: Update Data summary: Learn about how to update data and batch update data. +aliases: ['/tidb/stable/dev-guide-update-data/','/tidb/dev/dev-guide-update-data/','/tidbcloud/dev-guide-update-data/'] --- # Update Data @@ -14,7 +15,7 @@ This document describes how to use the following SQL statements to update the da Before reading this document, you need to prepare the following: -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md), [Create a Database](/develop/dev-guide-create-database.md), [Create a Table](/develop/dev-guide-create-table.md), and [Create Secondary Indexes](/develop/dev-guide-create-secondary-indexes.md). - If you want to `UPDATE` data, you need to [insert data](/develop/dev-guide-insert-data.md) first. @@ -50,19 +51,8 @@ For detailed information, see [UPDATE syntax](/sql-statements/sql-statement-upda The following are some best practices for updating data: - Always specify the `WHERE` clause in the `UPDATE` statement. If the `UPDATE` statement does not have a `WHERE` clause, TiDB will update **_ALL ROWS_** in the table. - - - - Use [bulk-update](#bulk-update) when you need to update a large number of rows (for example, more than ten thousand). Because TiDB limits the size of a single transaction ([txn-total-size-limit](/tidb-configuration-file.md#txn-total-size-limit), 100 MB by default), too many data updates at once will result in holding locks for too long ([pessimistic transactions](/pessimistic-transaction.md)) or cause conflicts ([optimistic transactions](/optimistic-transaction.md)). - - - - -- Use [bulk-update](#bulk-update) when you need to update a large number of rows (for example, more than ten thousand). Because TiDB limits the size of a single transaction to 100 MB by default, too many data updates at once will result in holding locks for too long ([pessimistic transactions](/pessimistic-transaction.md)) or cause conflicts ([optimistic transactions](/optimistic-transaction.md)). - - - ### `UPDATE` example Suppose an author changes her name to **Helen Haruki**. You need to change the [authors](/develop/dev-guide-bookshop-schema-design.md#authors-table) table. Assume that her unique `id` is **1**, and the filter should be: `id = 1`. @@ -163,18 +153,8 @@ VALUES (?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE `score` = ?, `rated_at` = NOW()" When you need to update multiple rows of data in a table, you can [use `INSERT ON DUPLICATE KEY UPDATE`](#use-insert-on-duplicate-key-update) with the `WHERE` clause to filter the data that needs to be updated. - - However, if you need to update a large number of rows (for example, more than ten thousand), it is recommended that you update the data iteratively, that is, updating only a portion of the data at each iteration until the update is complete. This is because TiDB limits the size of a single transaction ([txn-total-size-limit](/tidb-configuration-file.md#txn-total-size-limit), 100 MB by default). Too many data updates at once will result in holding locks for too long ([pessimistic transactions](/pessimistic-transaction.md), or causing conflicts ([optimistic transactions](/optimistic-transaction.md)). You can use a loop in your program or script to complete the operation. - - - - -However, if you need to update a large number of rows (for example, more than ten thousand), it is recommended that you update the data iteratively, that is, updating only a portion of the data at each iteration until the update is complete. This is because TiDB limits the size of a single transaction to 100 MB by default. Too many data updates at once will result in holding locks for too long ([pessimistic transactions](/pessimistic-transaction.md), or causing conflicts ([optimistic transactions](/optimistic-transaction.md)). You can use a loop in your program or script to complete the operation. - - - This section provides examples of writing scripts to handle iterative updates. This example shows how a combination of `SELECT` and `UPDATE` should be done to complete a bulk-update. ### Write bulk-update loop @@ -450,14 +430,6 @@ In each iteration, `SELECT` queries in order of the primary key. It selects prim ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-common-table-expression.md b/develop/dev-guide-use-common-table-expression.md index 5ba101523e59e..9edba061697a8 100644 --- a/develop/dev-guide-use-common-table-expression.md +++ b/develop/dev-guide-use-common-table-expression.md @@ -1,6 +1,7 @@ --- title: Common Table Expression summary: Learn the CTE feature of TiDB, which help you write SQL statements more efficiently. +aliases: ['/tidb/stable/dev-guide-use-common-table-expression/','/tidb/dev/dev-guide-use-common-table-expression/','/tidbcloud/dev-guide-use-common-table-expression/'] --- # Common Table Expression @@ -217,14 +218,6 @@ The result is as follows: ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-follower-read.md b/develop/dev-guide-use-follower-read.md index 611730161374e..6eaf8fa7f0db4 100644 --- a/develop/dev-guide-use-follower-read.md +++ b/develop/dev-guide-use-follower-read.md @@ -1,6 +1,7 @@ --- title: Follower Read summary: Learn how to use Follower Read to optimize query performance. +aliases: ['/tidb/stable/dev-guide-use-follower-read/','/tidb/dev/dev-guide-use-follower-read/','/tidbcloud/dev-guide-use-follower-read/'] --- # Follower Read @@ -17,21 +18,12 @@ By default, TiDB only reads and writes data on the leader of the same Region. Wh ### Reduce read hotspots - +You can visually analyze whether your application has a hotspot Region by doing one of the following: -You can visually analyze whether your application has a hotspot Region on the [TiDB Dashboard Key Visualizer Page](/dashboard/dashboard-key-visualizer.md). You can check whether a read hotspot occurs by selecting the "metrics selection box" to `Read (bytes)` or `Read (keys)`. +- TiDB Cloud: navigate to the [Key Visualizer in the TiDB Cloud console](/tidb-cloud/tune-performance.md#key-visualizer), and then check whether a read hotspot occurs by selecting the "metrics selection box" to `Read (bytes)` or `Read (keys)`. +- TiDB Self-Managed: navigate to the [Key Visualizer in TiDB Dashboard](/dashboard/dashboard-key-visualizer.md), and then check whether a read hotspot occurs by selecting the "metrics selection box" to `Read (bytes)` or `Read (keys)`. -For more information about handling hotspot, see [TiDB Hotspot Problem Handling](/troubleshoot-hot-spot-issues.md). - - - - - -You can visually analyze whether your application has a hotspot Region on the [TiDB Cloud Key Visualizer Page](/tidb-cloud/tune-performance.md#key-visualizer). You can check whether a read hotspot occurs by selecting the "metrics selection box" to `Read (bytes)` or `Read (keys)`. - -For more information about handling hotspot, see [TiDB Hotspot Problem Handling](https://docs.pingcap.com/tidb/stable/troubleshoot-hot-spot-issues). - - +If hotspot issues do exist, you can troubleshoot them by referring to [Handle TiDB Hotspot Issues](/troubleshoot-hot-spot-issues.md), which helps to avoid hotspot generation at the application level. If read hotspots are unavoidable or the changing cost is very high, you can try using the Follower Read feature to better load the balance of reading requests to the follower Region. @@ -146,31 +138,12 @@ public static class AuthorDAO { ## Read more - [Follower Read](/follower-read.md) - - - - [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md) -- [TiDB Dashboard - Key Visualizer Page](/dashboard/dashboard-key-visualizer.md) - - - - - -- [Troubleshoot Hotspot Issues](https://docs.pingcap.com/tidb/stable/troubleshoot-hot-spot-issues) -- [TiDB Cloud Key Visualizer Page](/tidb-cloud/tune-performance.md#key-visualizer) - - +- [Key Visualizer in the TiDB Cloud console](/tidb-cloud/tune-performance.md#key-visualizer) +- [Key Visualizer in TiDB Dashboard for TiDB Self-Managed](/dashboard/dashboard-key-visualizer.md) ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-stale-read.md b/develop/dev-guide-use-stale-read.md index 9f374c5ac1a7a..e92e988b6ae89 100644 --- a/develop/dev-guide-use-stale-read.md +++ b/develop/dev-guide-use-stale-read.md @@ -1,6 +1,7 @@ --- title: Stale Read summary: Learn how to use Stale Read to accelerate queries under certain conditions. +aliases: ['/tidb/stable/dev-guide-use-stale-read/','/tidb/dev/dev-guide-use-stale-read/','/tidbcloud/dev-guide-use-stale-read/'] --- # Stale Read @@ -500,14 +501,6 @@ public static class StaleReadHelper{ ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-subqueries.md b/develop/dev-guide-use-subqueries.md index 9e6fe2c86b23d..7c775f38e7f6f 100644 --- a/develop/dev-guide-use-subqueries.md +++ b/develop/dev-guide-use-subqueries.md @@ -1,6 +1,7 @@ --- title: Subquery summary: Learn how to use subquery in TiDB. +aliases: ['/tidb/stable/dev-guide-use-subqueries/','/tidb/dev/dev-guide-use-subqueries/','/tidbcloud/dev-guide-use-subqueries/'] --- # Subquery @@ -131,14 +132,6 @@ As a best practice, in actual development, it is recommended to avoid querying t ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-temporary-tables.md b/develop/dev-guide-use-temporary-tables.md index f74168012decc..4ff583b2f7979 100644 --- a/develop/dev-guide-use-temporary-tables.md +++ b/develop/dev-guide-use-temporary-tables.md @@ -1,6 +1,7 @@ --- title: Temporary Tables summary: Learn how to create, view, query, and delete temporary tables. +aliases: ['/tidb/stable/dev-guide-use-temporary-tables/','/tidb/dev/dev-guide-use-temporary-tables/','/tidbcloud/dev-guide-use-temporary-tables/'] --- # Temporary Tables @@ -260,14 +261,6 @@ For limitations of temporary tables in TiDB, see [Compatibility restrictions wit ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) diff --git a/develop/dev-guide-use-views.md b/develop/dev-guide-use-views.md index 83f30d2141cbc..6ebb09088e9cf 100644 --- a/develop/dev-guide-use-views.md +++ b/develop/dev-guide-use-views.md @@ -1,6 +1,7 @@ --- title: Views summary: Learn how to use views in TiDB. +aliases: ['/tidb/stable/dev-guide-use-views/','/tidb/dev/dev-guide-use-views/','/tidbcloud/dev-guide-use-views/'] --- # Views @@ -125,14 +126,6 @@ For limitations of views in TiDB, see [Limitations of Views](/views.md#limitatio ## Need help? - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](/support.md). - - - - - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). - - \ No newline at end of file +- Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs). +- [Submit a support ticket for TiDB Cloud](https://tidb.support.pingcap.com/servicedesk/customer/portals) +- [Submit a support ticket for TiDB Self-Managed](/support.md) \ No newline at end of file diff --git a/develop/dev-guide-vector-search.md b/develop/dev-guide-vector-search.md new file mode 100644 index 0000000000000..c17526875cc46 --- /dev/null +++ b/develop/dev-guide-vector-search.md @@ -0,0 +1,61 @@ +--- +title: Vector Search +summary: Introduce the vector search feature in TiDB for developers, including concepts, tutorials, integrations, and reference documentation. +--- + +# Vector Search + +[Vector search](/ai/concepts/vector-search-overview.md) enables semantic similarity searches across diverse data types such as documents, images, audio, and video. By leveraging your MySQL expertise, you can build scalable AI applications with advanced search functionality. + +## Get started + +To get started with TiDB vector search, refer to the following tutorials: + +- [Get Started via Python](/ai/quickstart-via-python.md) +- [Get Started via SQL](/ai/quickstart-via-sql.md) + +## Auto Embedding + +The Auto Embedding feature lets you perform vector searches directly with plain text, without providing your own vectors. With this feature, you can insert text data directly and perform semantic searches using text queries, while TiDB automatically converts the text into vectors behind the scenes. + +Currently, TiDB supports various embedding models, such as Amazon Titan, Cohere, Jina AI, OpenAI, Gemini, Hugging Face, and NVIDIA NIM. You can choose the one that best fits your needs. For more information, see [Auto Embedding Overview](/ai/integrations/vector-search-auto-embedding-overview.md). + +## Integrations + +To accelerate your development, you can integrate TiDB vector search with popular AI frameworks (such as LlamaIndex and LangChain), embedding services (such as Jina AI), and ORM libraries (such as SQLAlchemy, Peewee, and Django ORM). You can choose the one that best fits your needs. + +For more information, see [Vector Search Integration Overview](/ai/integrations/vector-search-integration-overview.md). + +## Text search + +Unlike vector search, which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. + +To improve the retrieval quality in RAG scenarios, you can combine vector search with full-text search. + +| Scenario | Documentation | +|---------------|-------------| +| Perform keyword-based search using SQL. | [Full-Text Search with SQL](/ai/guides/vector-search-full-text-search-sql.md) | +| Implement full-text search in Python applications. | [Full-Text Search with Python](/ai/guides/vector-search-full-text-search-python.md) | +| Combine vector and full-text search for better results. | [Hybrid Search](/ai/guides/vector-search-hybrid-search.md) | + +## Improve performance + +To optimize the performance of your vector search queries, you can follow a series of best practices, such as adding vector indexes, monitoring index build progress, reducing dimensions, excluding vector columns, and warming up indexes. + +For more information about these best practices, see [Improve Vector Search Performance](/ai/reference/vector-search-improve-performance.md). + +## Limitations + +Before implementing vector search, be aware of the following limitations: + +- Maximum 16383 dimensions per vector +- Vector columns cannot be primary keys, unique indexes, or partition keys +- No direct casting between vector and other data types (use string as intermediate) + +For a complete list, see [Vector Search Limitations](/ai/reference/vector-search-limitations.md). + +## Reference + +- [Vector Data Types](/ai/reference/vector-search-data-types.md) +- [Vector Functions and Operators](/ai/reference/vector-search-functions-and-operators.md) +- [Vector Index](/ai/reference/vector-search-index.md) diff --git a/tidb-cloud/dev-guide-wordpress.md b/develop/dev-guide-wordpress.md similarity index 94% rename from tidb-cloud/dev-guide-wordpress.md rename to develop/dev-guide-wordpress.md index cec0c09e79320..4c3a55a5f13a1 100644 --- a/tidb-cloud/dev-guide-wordpress.md +++ b/develop/dev-guide-wordpress.md @@ -1,9 +1,10 @@ --- -title: Connect to TiDB Cloud Serverless with WordPress -summary: Learn how to use TiDB Cloud Serverless to run WordPress. This tutorial gives step-by-step guidance to run WordPress + TiDB Cloud Serverless in a few minutes. +title: Integrate WordPress with {{{ .starter }}} +summary: Learn how to use {{{ .starter }}} to run WordPress. This tutorial gives step-by-step guidance to run WordPress + {{{ .starter }}} in a few minutes. +aliases: ['/tidbcloud/dev-guide-wordpress/'] --- -# Connect to TiDB Cloud Serverless with WordPress +# Integrate WordPress with {{{ .starter }}} TiDB is a MySQL-compatible database, TiDB Cloud Serverless is a fully managed TiDB offering, and [WordPress](https://github.com/WordPress) is a free, open-source content management system (CMS) that lets users create and manage websites. WordPress is written in PHP and uses a MySQL database. diff --git a/best-practices/java-app-best-practices.md b/develop/java-app-best-practices.md similarity index 99% rename from best-practices/java-app-best-practices.md rename to develop/java-app-best-practices.md index 6f43d359bf6a9..0de9d9c6369b0 100644 --- a/best-practices/java-app-best-practices.md +++ b/develop/java-app-best-practices.md @@ -1,7 +1,7 @@ --- title: Best Practices for Developing Java Applications with TiDB summary: This document introduces best practices for developing Java applications with TiDB, covering database-related components, JDBC usage, connection pool configuration, data access framework, Spring Transaction, and troubleshooting tools. TiDB is highly compatible with MySQL, so most MySQL-based Java application best practices also apply to TiDB. -aliases: ['/docs/dev/best-practices/java-app-best-practices/','/docs/dev/reference/best-practices/java-app/'] +aliases: ['/docs/dev/best-practices/java-app-best-practices/','/docs/dev/reference/best-practices/java-app/','/tidb/stable/java-app-best-practices/','/tidb/dev/java-app-best-practices/'] --- # Best Practices for Developing Java Applications with TiDB diff --git a/tidb-cloud/serverless-driver-drizzle-example.md b/develop/serverless-driver-drizzle-example.md similarity index 94% rename from tidb-cloud/serverless-driver-drizzle-example.md rename to develop/serverless-driver-drizzle-example.md index 22eb50cab7045..714f440d1733b 100644 --- a/tidb-cloud/serverless-driver-drizzle-example.md +++ b/develop/serverless-driver-drizzle-example.md @@ -1,11 +1,12 @@ --- title: TiDB Cloud Serverless Driver Drizzle Tutorial summary: Learn how to use TiDB Cloud serverless driver with Drizzle. +aliases: ['/tidbcloud/serverless-driver-drizzle-example/'] --- # TiDB Cloud Serverless Driver Drizzle Tutorial -[Drizzle ORM](https://orm.drizzle.team/) is a lightweight and performant TypeScript ORM with developer experience in mind. Starting from `drizzle-orm@0.31.2`, it supports [drizzle-orm/tidb-serverless](https://orm.drizzle.team/docs/get-started-mysql#tidb-serverless), enabling you to use Drizzle over HTTPS with [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md). +[Drizzle ORM](https://orm.drizzle.team/) is a lightweight and performant TypeScript ORM with developer experience in mind. Starting from `drizzle-orm@0.31.2`, it supports [drizzle-orm/tidb-serverless](https://orm.drizzle.team/docs/get-started-mysql#tidb-serverless), enabling you to use Drizzle over HTTPS with [TiDB Cloud serverless driver](/develop/serverless-driver.md). This tutorial describes how to use TiDB Cloud serverless driver with Drizzle in Node.js environments and edge environments. @@ -93,7 +94,7 @@ To complete this tutorial, you need the following: 1. Create a table in your TiDB Cloud Serverless cluster. - You can use [SQL Editor in the TiDB Cloud console](/tidb-cloud/explore-data-with-chat2query.md) to execute SQL statements. Here is an example: + You can use [SQL Editor in the TiDB Cloud console](https://docs.pingcap.com/tidbcloud/explore-data-with-chat2query) to execute SQL statements. Here is an example: ```sql CREATE TABLE `test`.`users` ( @@ -203,7 +204,7 @@ To complete this tutorial, you need the following: 1. Create a table in your TiDB Cloud Serverless cluster. - You can use [SQL Editor in the TiDB Cloud console](/tidb-cloud/explore-data-with-chat2query.md) to execute SQL statements. Here is an example: + You can use [SQL Editor in the TiDB Cloud console](https://docs.pingcap.com/tidbcloud/explore-data-with-chat2query.md) to execute SQL statements. Here is an example: ```sql CREATE TABLE `test`.`users` ( @@ -269,4 +270,4 @@ To complete this tutorial, you need the following: ## What's next - Learn more about [Drizzle](https://orm.drizzle.team/docs/overview) and [drizzle-orm/tidb-serverless](https://orm.drizzle.team/docs/get-started-mysql#tidb-serverless). -- Learn how to [integrate TiDB Cloud with Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md). +- Learn how to [integrate TiDB Cloud with Vercel](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel). diff --git a/tidb-cloud/serverless-driver-kysely-example.md b/develop/serverless-driver-kysely-example.md similarity index 92% rename from tidb-cloud/serverless-driver-kysely-example.md rename to develop/serverless-driver-kysely-example.md index 38a9763ababeb..15cc345a95d46 100644 --- a/tidb-cloud/serverless-driver-kysely-example.md +++ b/develop/serverless-driver-kysely-example.md @@ -1,11 +1,12 @@ --- title: TiDB Cloud Serverless Driver Kysely Tutorial summary: Learn how to use TiDB Cloud serverless driver with Kysely. +aliases: ['/tidbcloud/serverless-driver-kysely-example/'] --- # TiDB Cloud Serverless Driver Kysely Tutorial -[Kysely](https://kysely.dev/docs/intro) is a type-safe and autocompletion-friendly TypeScript SQL query builder. TiDB Cloud offers [@tidbcloud/kysely](https://github.com/tidbcloud/kysely), enabling you to use Kysely over HTTPS with [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md). Compared with the traditional TCP way, [@tidbcloud/kysely](https://github.com/tidbcloud/kysely) brings the following benefits: +[Kysely](https://kysely.dev/docs/intro) is a type-safe and autocompletion-friendly TypeScript SQL query builder. TiDB Cloud offers [@tidbcloud/kysely](https://github.com/tidbcloud/kysely), enabling you to use Kysely over HTTPS with [TiDB Cloud serverless driver](/develop/serverless-driver.md). Compared with the traditional TCP way, [@tidbcloud/kysely](https://github.com/tidbcloud/kysely) brings the following benefits: - Better performance in serverless environments. - Ability to use Kysely in edge environments. @@ -89,7 +90,7 @@ To complete this tutorial, you need the following: 1. Create a table in your TiDB Cloud Serverless cluster and insert some data. - You can use [SQL Editor in the TiDB Cloud console](/tidb-cloud/explore-data-with-chat2query.md) to execute SQL statements. Here is an example: + You can use [SQL Editor in the TiDB Cloud console](https://docs.pingcap.com/tidbcloud/explore-data-with-chat2query) to execute SQL statements. Here is an example: ```sql CREATE TABLE `test`.`person` ( @@ -201,7 +202,7 @@ mysql://[username]:[password]@[host]/[database] 1. Create a table in your TiDB Cloud Serverless cluster and insert some data. - You can use [SQL Editor in the TiDB Cloud console](/tidb-cloud/explore-data-with-chat2query.md) to execute SQL statements. Here is an example: + You can use [SQL Editor in the TiDB Cloud console](https://docs.pingcap.com/tidbcloud/explore-data-with-chat2query) to execute SQL statements. Here is an example: ```sql CREATE TABLE `test`.`person` ( @@ -296,4 +297,4 @@ mysql://[username]:[password]@[host]/[database] ## What's next - Learn more about [Kysely](https://kysely.dev/docs/intro) and [@tidbcloud/kysely](https://github.com/tidbcloud/kysely) -- Learn how to [integrate TiDB Cloud with Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) +- Learn how to [integrate TiDB Cloud with Vercel](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel) diff --git a/tidb-cloud/serverless-driver-node-example.md b/develop/serverless-driver-node-example.md similarity index 98% rename from tidb-cloud/serverless-driver-node-example.md rename to develop/serverless-driver-node-example.md index e900f653bd4e9..5fc4d9b27012b 100644 --- a/tidb-cloud/serverless-driver-node-example.md +++ b/develop/serverless-driver-node-example.md @@ -1,6 +1,7 @@ --- title: TiDB Cloud Serverless Driver Node.js Tutorial summary: Learn how to use TiDB Cloud serverless driver in a local Node.js project. +aliases: ['/tidbcloud/serverless-driver-node-example/'] --- # TiDB Cloud Serverless Driver Node.js Tutorial diff --git a/tidb-cloud/serverless-driver-prisma-example.md b/develop/serverless-driver-prisma-example.md similarity index 95% rename from tidb-cloud/serverless-driver-prisma-example.md rename to develop/serverless-driver-prisma-example.md index 056985f603b4d..00a845ac42e07 100644 --- a/tidb-cloud/serverless-driver-prisma-example.md +++ b/develop/serverless-driver-prisma-example.md @@ -1,11 +1,12 @@ --- title: TiDB Cloud Serverless Driver Prisma Tutorial summary: Learn how to use TiDB Cloud serverless driver with Prisma ORM. +aliases: ['/tidbcloud/serverless-driver-prisma-example/'] --- # TiDB Cloud Serverless Driver Prisma Tutorial -[Prisma](https://www.prisma.io/docs) is an open source next-generation ORM (Object-Relational Mapping) that helps developers interact with their database in an intuitive, efficient, and safe way. TiDB Cloud offers [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter), enabling you to use [Prisma Client](https://www.prisma.io/docs/concepts/components/prisma-client) over HTTPS with [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md). Compared with the traditional TCP way, [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter) brings the following benefits: +[Prisma](https://www.prisma.io/docs) is an open source next-generation ORM (Object-Relational Mapping) that helps developers interact with their database in an intuitive, efficient, and safe way. TiDB Cloud offers [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter), enabling you to use [Prisma Client](https://www.prisma.io/docs/concepts/components/prisma-client) over HTTPS with [TiDB Cloud serverless driver](/develop/serverless-driver.md). Compared with the traditional TCP way, [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter) brings the following benefits: - Better performance of Prisma Client in serverless environments - Ability to use Prisma Client in edge environments @@ -14,7 +15,7 @@ This tutorial describes how to use [@tidbcloud/prisma-adapter](https://github.co ## Install -You need to install both [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter) and [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md). You can install them using [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) or your preferred package manager. +You need to install both [@tidbcloud/prisma-adapter](https://github.com/tidbcloud/prisma-adapter) and [TiDB Cloud serverless driver](/develop/serverless-driver.md). You can install them using [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) or your preferred package manager. Taking npm as an example, you can run the following commands for installation: diff --git a/tidb-cloud/serverless-driver.md b/develop/serverless-driver.md similarity index 92% rename from tidb-cloud/serverless-driver.md rename to develop/serverless-driver.md index c3ebd151db135..9245d4681f843 100644 --- a/tidb-cloud/serverless-driver.md +++ b/develop/serverless-driver.md @@ -1,7 +1,7 @@ --- title: TiDB Cloud Serverless Driver (Beta) -summary: Learn how to connect to TiDB Cloud Serverless from serverless and edge environments. -aliases: ['/tidbcloud/serverless-driver-config'] +summary: Learn how to connect to {{{ .starter }}} or {{{ .essential }}} from serverless and edge environments. +aliases: ['/tidbcloud/serverless-driver-config/','/tidbcloud/serverless-driver/','/tidb/stable/serverless-driver/','/tidb/dev/serverless-driver/'] --- # TiDB Cloud Serverless Driver (Beta) @@ -14,7 +14,7 @@ Traditional TCP-based MySQL drivers are not suitable for serverless functions du > **Note:** > -> If you prefer programming with RESTful API rather than SQL or ORM, you can use [Data Service (beta)](/tidb-cloud/data-service-overview.md). +> If you prefer programming with RESTful API rather than SQL or ORM, you can use [Data Service (beta)](https://docs.pingcap.com/tidbcloud/data-service-overview/). ## Install the serverless driver @@ -80,7 +80,7 @@ export async function GET(request: NextRequest) { } ``` -Learn more about [using TiDB Cloud serverless driver in Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md). +Learn more about [using TiDB Cloud serverless driver in Vercel](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel).
@@ -100,7 +100,7 @@ export default { }; ``` -Learn more about [using TiDB Cloud serverless driver in Cloudflare Workers](/tidb-cloud/integrate-tidbcloud-with-cloudflare.md). +Learn more about [using TiDB Cloud serverless driver in Cloudflare Workers](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-cloudflare).
@@ -116,7 +116,7 @@ export default async () => { } ``` -Learn more about [using TiDB Cloud serverless driver in Netlify](/tidb-cloud/integrate-tidbcloud-with-netlify.md#use-the-edge-function). +Learn more about [using TiDB Cloud serverless driver in Netlify](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-netlify#use-the-edge-function).
@@ -327,7 +327,10 @@ TiDB Cloud serverless driver has been integrated with the following ORMs: ## Pricing -The serverless driver itself is free, but accessing data with the driver generates [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit) and storage usage. The pricing follows the [TiDB Cloud Serverless pricing](https://www.pingcap.com/tidb-serverless-pricing-details/) model. +The serverless driver itself is free, but accessing data with the driver generates [Request Units (RUs)](https://docs.pingcap.com/tidbcloud/tidb-cloud-glossary#request-unit-ru) and storage usage. + +- For {{{ .starter }}} clusters, the pricing follows the [{{{ .starter }}} pricing](https://www.pingcap.com/tidb-cloud-starter-pricing-details/) model. +- For {{{ .essential }}} clusters, the pricing follows the [{{{ .essential }}} pricing](https://www.pingcap.com/tidb-cloud-essential-pricing-details/) model. ## Limitations @@ -335,8 +338,9 @@ Currently, using serverless driver has the following limitations: - Up to 10,000 rows can be fetched in a single query. - You can execute only a single SQL statement at a time. Multiple SQL statements in one query are not supported yet. -- Connection with [private endpoints](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) is not supported yet. +- Connection with [private endpoints](https://docs.pingcap.com/tidbcloud/set-up-private-endpoint-connections-serverless.md) is not supported yet. +- The server blocks requests from unauthorized browser origins via Cross-Origin Resource Sharing (CORS) to protect your credentials. As a result, you can use the serverless driver only from backend services. ## What's next -- Learn how to [use TiDB Cloud serverless driver in a local Node.js project](/tidb-cloud/serverless-driver-node-example.md). +- Learn how to [use TiDB Cloud serverless driver in a local Node.js project](/develop/serverless-driver-node-example.md). diff --git a/faq/sql-faq.md b/faq/sql-faq.md index f8ada1ade60d7..01cb50d5ac02f 100644 --- a/faq/sql-faq.md +++ b/faq/sql-faq.md @@ -247,7 +247,7 @@ SELECT column_name FROM table_name USE INDEX(index_name)WHERE where_conditio ## DDL Execution -This section lists issues related to DDL statement execution. For detailed explanations on the DDL execution principles, see [Execution Principles and Best Practices of DDL Statements](/ddl-introduction.md). +This section lists issues related to DDL statement execution. For detailed explanations on the DDL execution principles, see [Execution Principles and Best Practices of DDL Statements](/best-practices/ddl-introduction.md). ### How long does it take to perform various DDL operations? diff --git a/glossary.md b/glossary.md index d7850340d7fca..73295b3bbd579 100644 --- a/glossary.md +++ b/glossary.md @@ -96,7 +96,7 @@ For more information, see [Use Dumpling to Export Data](/dumpling-overview.md). ### Data Definition Language (DDL) -Data Definition Language (DDL) is a part of the SQL standard that deals with creating, modifying, and dropping tables and other objects. For more information, see [DDL Introduction](/ddl-introduction.md). +Data Definition Language (DDL) is a part of the SQL standard that deals with creating, modifying, and dropping tables and other objects. For more information, see [DDL Introduction](/best-practices/ddl-introduction.md). ### Data Migration (DM) @@ -412,4 +412,4 @@ Universally Unique Identifier (UUID) is a 128-bit (16-byte) generated ID used to ### Vector search -[Vector search](/vector-search/vector-search-overview.md) is a search method that prioritizes the meaning of your data to deliver relevant results. Unlike traditional full-text search, which relies on exact keyword matching and word frequency, vector search converts various data types (such as text, images, or audio) into high-dimensional vectors and queries based on the similarity between these vectors. This search method captures the semantic meaning and contextual information of the data, leading to a more precise understanding of user intent. Even when the search terms do not exactly match the content in the database, vector search can still provide results that align with the user's intent by analyzing the semantics of the data. +[Vector search](/ai/concepts/vector-search-overview.md) is a search method that prioritizes the meaning of your data to deliver relevant results. Unlike traditional full-text search, which relies on exact keyword matching and word frequency, vector search converts various data types (such as text, images, or audio) into high-dimensional vectors and queries based on the similarity between these vectors. This search method captures the semantic meaning and contextual information of the data, leading to a more precise understanding of user intent. Even when the search terms do not exactly match the content in the database, vector search can still provide results that align with the user's intent by analyzing the semantics of the data. diff --git a/media/tidb-cloud/changefeed/sink-to-cloud-storage-azure-signature.png b/media/tidb-cloud/changefeed/sink-to-cloud-storage-azure-signature.png new file mode 100644 index 0000000000000..fc29132fce434 Binary files /dev/null and b/media/tidb-cloud/changefeed/sink-to-cloud-storage-azure-signature.png differ diff --git a/media/tidb-cloud/import-data-csv-config.png b/media/tidb-cloud/import-data-csv-config.png index 0f769a5d3aa17..b40f9caa2805a 100644 Binary files a/media/tidb-cloud/import-data-csv-config.png and b/media/tidb-cloud/import-data-csv-config.png differ diff --git a/media/tidb-cloud/looker-studio-configure-connection.png b/media/tidb-cloud/looker-studio-configure-connection.png deleted file mode 100644 index 82f5055f897fa..0000000000000 Binary files a/media/tidb-cloud/looker-studio-configure-connection.png and /dev/null differ diff --git a/media/tidb-cloud/looker-studio-custom-query.png b/media/tidb-cloud/looker-studio-custom-query.png deleted file mode 100644 index 8426e2da3c71a..0000000000000 Binary files a/media/tidb-cloud/looker-studio-custom-query.png and /dev/null differ diff --git a/media/tidb-cloud/looker-studio-simple-chart.png b/media/tidb-cloud/looker-studio-simple-chart.png deleted file mode 100644 index 257f9b19203da..0000000000000 Binary files a/media/tidb-cloud/looker-studio-simple-chart.png and /dev/null differ diff --git a/media/tidb-cloud/op-to-cloud-get-role-arn.png b/media/tidb-cloud/op-to-cloud-get-role-arn.png deleted file mode 100644 index 286b97dbd231b..0000000000000 Binary files a/media/tidb-cloud/op-to-cloud-get-role-arn.png and /dev/null differ diff --git a/media/tidb-cloud/poc-points.png b/media/tidb-cloud/poc-points.png deleted file mode 100644 index e6148999738a2..0000000000000 Binary files a/media/tidb-cloud/poc-points.png and /dev/null differ diff --git a/media/tidb-cloud/private-endpoint/alicloud-private-endpoint-info.png b/media/tidb-cloud/private-endpoint/alicloud-private-endpoint-info.png new file mode 100644 index 0000000000000..a46970c0d6fd8 Binary files /dev/null and b/media/tidb-cloud/private-endpoint/alicloud-private-endpoint-info.png differ diff --git a/media/tidb-cloud/regional-high-avaliability-alibaba-cloud.png b/media/tidb-cloud/regional-high-avaliability-alibaba-cloud.png new file mode 100644 index 0000000000000..3cab81f5ab46d Binary files /dev/null and b/media/tidb-cloud/regional-high-avaliability-alibaba-cloud.png differ diff --git a/media/tidb-cloud/regional-high-avaliability-aws.png b/media/tidb-cloud/regional-high-avaliability-aws.png new file mode 100644 index 0000000000000..622217e2fa083 Binary files /dev/null and b/media/tidb-cloud/regional-high-avaliability-aws.png differ diff --git a/media/tidb-cloud/serverless-regional-high-avaliability-aws.png b/media/tidb-cloud/serverless-regional-high-avaliability-aws.png deleted file mode 100644 index 368373b69bef2..0000000000000 Binary files a/media/tidb-cloud/serverless-regional-high-avaliability-aws.png and /dev/null differ diff --git a/media/tidb-cloud/serverless-zonal-high-avaliability-aws.png b/media/tidb-cloud/serverless-zonal-high-avaliability-aws.png deleted file mode 100644 index 2a2d5739640c6..0000000000000 Binary files a/media/tidb-cloud/serverless-zonal-high-avaliability-aws.png and /dev/null differ diff --git a/media/tidb-cloud/tiproxy-billing.png b/media/tidb-cloud/tiproxy-billing.png new file mode 100644 index 0000000000000..e5d74bc76f463 Binary files /dev/null and b/media/tidb-cloud/tiproxy-billing.png differ diff --git a/media/tidb-cloud/tiproxy-disable-tiproxy.png b/media/tidb-cloud/tiproxy-disable-tiproxy.png new file mode 100644 index 0000000000000..b901578259f4c Binary files /dev/null and b/media/tidb-cloud/tiproxy-disable-tiproxy.png differ diff --git a/media/tidb-cloud/tiproxy-enable-tiproxy.png b/media/tidb-cloud/tiproxy-enable-tiproxy.png new file mode 100644 index 0000000000000..662de1527b506 Binary files /dev/null and b/media/tidb-cloud/tiproxy-enable-tiproxy.png differ diff --git a/media/tidb-cloud/tiproxy-topology.png b/media/tidb-cloud/tiproxy-topology.png new file mode 100644 index 0000000000000..4a014e603b339 Binary files /dev/null and b/media/tidb-cloud/tiproxy-topology.png differ diff --git a/media/tidb-cloud/zonal-high-avaliability-alibaba-cloud.png b/media/tidb-cloud/zonal-high-avaliability-alibaba-cloud.png new file mode 100644 index 0000000000000..e5e49c20bbd83 Binary files /dev/null and b/media/tidb-cloud/zonal-high-avaliability-alibaba-cloud.png differ diff --git a/media/tidb-cloud/zonal-high-avaliability-aws.png b/media/tidb-cloud/zonal-high-avaliability-aws.png new file mode 100644 index 0000000000000..93a3fc0cb7761 Binary files /dev/null and b/media/tidb-cloud/zonal-high-avaliability-aws.png differ diff --git a/package-lock.json b/package-lock.json index fd224f072eb07..81ec024172e78 100644 --- a/package-lock.json +++ b/package-lock.json @@ -21,6 +21,116 @@ "micromark-extension-mdxjs": "^1.0.0", "octokit": "^3.1.0", "unist-util-visit": "^4.1.0" + }, + "devDependencies": { + "@breeswish-org/remark-lint-pingcap-docs-anchor": "1.1.2", + "markdown-link-check": "3.8.1", + "remark-cli": "9.0.0", + "remark-lint": "8.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@breeswish-org/remark-lint-pingcap-docs-anchor": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@breeswish-org/remark-lint-pingcap-docs-anchor/-/remark-lint-pingcap-docs-anchor-1.1.2.tgz", + "integrity": "sha512-MGcmkEdXWAfHWusuIj4qRG6HXKICCGFK7ohp0mWEtwEXrMYeDeQt44rTVTC/HCaTi9qR9kv9pBopb+YIHemdrA==", + "dev": true, + "license": "MIT", + "dependencies": { + "didyoumean2": "^4.1.0", + "github-slugger": "^1.4.0", + "mdast-util-to-string": "^2.0.0", + "remark": "^13.0.0", + "unified": "^9.2.2", + "unified-lint-rule": "^1.0.5", + "unist-util-visit": "^2.0.3" + } + }, + "node_modules/@breeswish-org/remark-lint-pingcap-docs-anchor/node_modules/mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@breeswish-org/remark-lint-pingcap-docs-anchor/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@breeswish-org/remark-lint-pingcap-docs-anchor/node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@breeswish-org/remark-lint-pingcap-docs-anchor/node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, "node_modules/@octokit/app": { @@ -457,11 +567,122 @@ "node": ">=8" } }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", + "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==", + "dev": true, + "license": "MIT" + }, "node_modules/axios": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/axios/-/axios-1.4.0.tgz", @@ -472,16 +693,50 @@ "proxy-from-env": "^1.1.0" } }, + "node_modules/bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, "node_modules/before-after-hook": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/bottleneck": { "version": "2.19.5", "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", @@ -495,6 +750,19 @@ "balanced-match": "^1.0.0" } }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/btoa-lite": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/btoa-lite/-/btoa-lite-1.0.0.tgz", @@ -505,11 +773,52 @@ "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==", + "dev": true, + "license": "Apache-2.0" + }, "node_modules/ccount": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/ccount/-/ccount-2.0.1.tgz", "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==" }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, "node_modules/character-entities": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/character-entities/-/character-entities-2.0.1.tgz", @@ -530,6 +839,31 @@ "resolved": "https://registry.npmmirror.com/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==" }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, "node_modules/clean-stack": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", @@ -538,6 +872,33 @@ "node": ">=6" } }, + "node_modules/co": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/co/-/co-3.1.0.tgz", + "integrity": "sha512-CQsjCRiNObI8AtTsNIBDRMQ4oMR83CzEswHYahClvul7gKk+lDQiOKv+5qh7LQWf5sh6jkZNispz/QlsZxyNgA==", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, "node_modules/combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -549,6 +910,59 @@ "node": ">= 0.8" } }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz", + "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==", + "dev": true, + "engines": [ + "node >= 6.0" + ], + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmmirror.com/debug/-/debug-4.3.4.tgz", @@ -594,6 +1008,21 @@ "node": ">=6" } }, + "node_modules/didyoumean2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/didyoumean2/-/didyoumean2-4.2.0.tgz", + "integrity": "sha512-o8KZ9RERbXaPgvXklxuLwD4RotaV5trShsNXaA/y1h5e4u6qmtv5I6enJsst9l8R1b/eqFQFwfPAiTf+FgHAQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.2", + "leven": "^3.1.0", + "lodash.deburr": "^4.1.0" + }, + "engines": { + "node": ">=10.13" + } + }, "node_modules/diff": { "version": "5.0.0", "resolved": "https://registry.npmmirror.com/diff/-/diff-5.0.0.tgz", @@ -602,6 +1031,17 @@ "node": ">=0.3.1" } }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "node_modules/ecdsa-sig-formatter": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", @@ -610,6 +1050,23 @@ "safe-buffer": "^5.0.1" } }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, "node_modules/escape-string-regexp": { "version": "5.0.0", "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", @@ -618,6 +1075,20 @@ "node": ">=12" } }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/estree-util-is-identifier-name": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.0.0.tgz", @@ -640,6 +1111,37 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, "node_modules/fault": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/fault/-/fault-2.0.1.tgz", @@ -648,6 +1150,66 @@ "format": "^0.2.0" } }, + "node_modules/figgy-pudding": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", + "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==", + "deprecated": "This module is no longer supported.", + "dev": true, + "license": "ISC" + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/follow-redirects": { "version": "1.15.2", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", @@ -667,6 +1229,16 @@ } } }, + "node_modules/forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, "node_modules/form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -693,6 +1265,38 @@ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==", + "dev": true, + "license": "ISC" + }, "node_modules/glob": { "version": "8.0.3", "resolved": "https://registry.npmjs.org/glob/-/glob-8.0.3.tgz", @@ -711,16 +1315,90 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, "engines": { - "node": ">=8" + "node": ">= 6" } }, - "node_modules/inflight": { - "version": "1.0.6", + "node_modules/har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "deprecated": "this library is no longer supported", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + }, + "engines": { + "node": ">=0.8", + "npm": ">=1.3.7" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflight": { + "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dependencies": { @@ -733,6 +1411,23 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-absolute-url": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", + "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/is-alphabetical": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/is-alphabetical/-/is-alphabetical-2.0.1.tgz", @@ -747,6 +1442,26 @@ "is-decimal": "^2.0.0" } }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/is-buffer": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", @@ -774,11 +1489,71 @@ "resolved": "https://registry.npmmirror.com/is-decimal/-/is-decimal-2.0.1.tgz", "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==" }, + "node_modules/is-empty": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-empty/-/is-empty-1.2.0.tgz", + "integrity": "sha512-F2FnH/otLNJv0J6wc73A5Xo7oHLNnqplYqZhUu01tD54DIPvxIRSTSLkrUB/M0nHO4vo1O9PDfN4KoTxCzLh/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-hexadecimal": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==" }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/is-plain-object": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", @@ -787,6 +1562,115 @@ "node": ">=0.10.0" } }, + "node_modules/is-relative-url": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-relative-url/-/is-relative-url-3.0.0.tgz", + "integrity": "sha512-U1iSYRlY2GIMGuZx7gezlB5dp1Kheaym7zKzO1PV06mOihiWTXejLwm4poEJysPyXF+HtK/BEd0DVlcCh30pEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-absolute-url": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", + "dev": true, + "license": "MIT" + }, + "node_modules/isemail": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/isemail/-/isemail-3.2.0.tgz", + "integrity": "sha512-zKqkK+O+dGqevc93KNsbZ/TqTUFd46MwWjYOoMrjIMZ51eU7DtQG3Wmd9SQQT7i7RVnuTPEiYEWHU3MSbxC1Tg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "punycode": "2.x.x" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", + "dev": true, + "license": "(AFL-2.1 OR BSD-3-Clause)" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "dev": true, + "license": "ISC" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/jsonwebtoken": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.1.tgz", @@ -802,6 +1686,22 @@ "npm": ">=6" } }, + "node_modules/jsprim": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, "node_modules/jwa": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", @@ -829,11 +1729,90 @@ "node": ">=6" } }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/libnpmconfig": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/libnpmconfig/-/libnpmconfig-1.2.1.tgz", + "integrity": "sha512-9esX8rTQAHqarx6qeZqmGQKBNZR5OIbl/Ayr0qQDy3oXja2iFVQQI81R6GZ2a02bSNZ9p3YOGX1O6HHCb1X7kA==", + "deprecated": "This module is not used anymore. npm config is parsed by npm itself and by @npmcli/config", + "dev": true, + "license": "ISC", + "dependencies": { + "figgy-pudding": "^3.5.1", + "find-up": "^3.0.0", + "ini": "^1.3.5" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/link-check": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/link-check/-/link-check-4.5.4.tgz", + "integrity": "sha512-VdjiYrIBNHtqH7NEvIlF/4i0V9xQWkoBry+65DtmmyKyD5qBZ2U9fCJYx75SI5Ms4ILJzGlNNojPKbPMpg5Spg==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-relative-url": "^3.0.0", + "isemail": "^3.2.0", + "ms": "^2.1.2", + "request": "^2.88.2" + } + }, + "node_modules/load-plugin": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/load-plugin/-/load-plugin-3.0.0.tgz", + "integrity": "sha512-od7eKCCZ62ITvFf8nHHrIiYmgOHb4xVNDRDqxBWSaao5FZyyZVX8OmRCbwjDGPrSrgIulwPNyBsWCGnhiDC0oQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "libnpmconfig": "^1.0.0", + "resolve-from": "^5.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, + "node_modules/lodash.deburr": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/lodash.deburr/-/lodash.deburr-4.1.0.tgz", + "integrity": "sha512-m/M1U1f3ddMCs6Hq2tAsYThTBDaAKFDX3dwDo97GEYzamXi9SqUpjWi/Rrj/gf3X2n8ktwgZrlP1z6E3v/IExQ==", + "dev": true, + "license": "MIT" + }, "node_modules/longest-streak": { "version": "3.0.1", "resolved": "https://registry.npmmirror.com/longest-streak/-/longest-streak-3.0.1.tgz", @@ -847,11 +1826,75 @@ "node": "14 || >=16.14" } }, + "node_modules/markdown-extensions": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz", + "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/markdown-link-check": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/markdown-link-check/-/markdown-link-check-3.8.1.tgz", + "integrity": "sha512-R6k8ytdJZePDAdb8NT0NvrNvu6n25IwLPIoJ4guHWC5yqyTlnUpRT7j3XE4ioBXwqOhG/LlUcuckD621kZkl4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "async": "^3.2.0", + "chalk": "^4.0.0", + "commander": "^5.0.0", + "link-check": "^4.5.0", + "lodash": "^4.17.15", + "markdown-link-extractor": "^1.2.3", + "progress": "^2.0.3", + "request": "^2.88.2" + }, + "bin": { + "markdown-link-check": "markdown-link-check" + } + }, + "node_modules/markdown-link-extractor": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/markdown-link-extractor/-/markdown-link-extractor-1.3.1.tgz", + "integrity": "sha512-IosNBtHXplzEq2n9WoSi83LNLCWgLnb+8Xq379Ct5xrLLzmqPUtc+A1oqo6Sd32YfKus9uLedFNSwFK1sCzoNQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "marked": "^4.0.10" + } + }, "node_modules/markdown-table": { "version": "3.0.2", "resolved": "https://registry.npmmirror.com/markdown-table/-/markdown-table-3.0.2.tgz", "integrity": "sha512-y8j3a5/DkJCmS5x4dMCQL+OR0+2EAq3DOtio1COSHsmW2BGXnNCK3v12hJt1LrUz5iZH5g0LmuYOjDdI+czghA==" }, + "node_modules/marked": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz", + "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==", + "dev": true, + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/mdast-comment-marker": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/mdast-comment-marker/-/mdast-comment-marker-1.1.2.tgz", + "integrity": "sha512-vTFXtmbbF3rgnTh3Zl3irso4LtvwUq/jaDvT2D1JqTGAwaipcS7RpTxzi6KjoRqI9n2yuAhzLDAC8xVTF3XYVQ==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/mdast-util-find-and-replace": { "version": "2.1.0", "resolved": "https://registry.npmmirror.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.1.0.tgz", @@ -1512,6 +2555,16 @@ "node": ">=10" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/mri": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz", @@ -1525,6 +2578,26 @@ "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, "node_modules/octokit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/octokit/-/octokit-3.1.0.tgz", @@ -1553,6 +2626,45 @@ "wrappy": "1" } }, + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/parse-entities": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/parse-entities/-/parse-entities-4.0.0.tgz", @@ -1568,44 +2680,664 @@ "is-hexadecimal": "^2.0.0" } }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, - "node_modules/sade": { - "version": "1.8.1", - "resolved": "https://registry.npmmirror.com/sade/-/sade-1.8.1.tgz", - "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", "dependencies": { - "mri": "^1.1.0" + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" }, "engines": { - "node": ">=6" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] + "node_modules/path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } }, - "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/remark": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/remark/-/remark-13.0.0.tgz", + "integrity": "sha512-HDz1+IKGtOyWN+QgBiAT0kn+2s6ovOxHyPAFGKVE81VSzJ+mq7RwHFledEvB5F1p4iJvOah/LOKdFuzvRnNLCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "remark-parse": "^9.0.0", + "remark-stringify": "^9.0.0", + "unified": "^9.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-cli": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-cli/-/remark-cli-9.0.0.tgz", + "integrity": "sha512-y6kCXdwZoMoh0Wo4Och1tDW50PmMc86gW6GpF08v9d+xUCEJE2wwXdQ+TnTaUamRnfFdU+fE+eNf2PJ53cyq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "markdown-extensions": "^1.1.0", + "remark": "^13.0.0", + "unified-args": "^8.0.0" + }, + "bin": { + "remark": "cli.js" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-lint": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/remark-lint/-/remark-lint-8.0.0.tgz", + "integrity": "sha512-ESI8qJQ/TIRjABDnqoFsTiZntu+FRifZ5fJ77yX63eIDijl/arvmDvT+tAf75/Nm5BFL4R2JFUtkHRGVjzYUsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "remark-message-control": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-message-control": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/remark-message-control/-/remark-message-control-6.0.0.tgz", + "integrity": "sha512-k9bt7BYc3G7YBdmeAhvd3VavrPa/XlKWR3CyHjr4sLO9xJyly8WHHT3Sp+8HPR8lEUv+/sZaffL7IjMLV0f6BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdast-comment-marker": "^1.0.0", + "unified-message-control": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-9.0.0.tgz", + "integrity": "sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^0.8.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse/node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/mdast-util-from-markdown": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.5.tgz", + "integrity": "sha512-2hkTXtYYnr+NubD/g6KGBS/0mFmBcifAsI0yIWRiRo0PjVs6SSOSOdtzbp6kSGnShDN6G5aWZpKQ2lWRy27mWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-string": "^2.0.0", + "micromark": "~2.11.0", + "parse-entities": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse/node_modules/mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse/node_modules/micromark": { + "version": "2.11.4", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-2.11.4.tgz", + "integrity": "sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA==", + "dev": true, + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "debug": "^4.0.0", + "parse-entities": "^2.0.0" + } + }, + "node_modules/remark-parse/node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-parse/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-9.0.1.tgz", + "integrity": "sha512-mWmNg3ZtESvZS8fv5PTvaPckdL4iNlCHTt8/e/8oN08nArHRHjNZMKzA/YW3+p7/lYqIw4nx1XsjCBo/AxNChg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdast-util-to-markdown": "^0.6.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify/node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/longest-streak": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-2.0.4.tgz", + "integrity": "sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/mdast-util-to-markdown": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-0.6.5.tgz", + "integrity": "sha512-XeV9sDE7ZlOQvs45C9UKMtfTcctcaj/pGwH8YLbMHoMOXNNCn2LsqVQOqrF1+/NU8lKDAqozme9SCXWyo9oAcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "longest-streak": "^2.0.0", + "mdast-util-to-string": "^2.0.0", + "parse-entities": "^2.0.0", + "repeat-string": "^1.0.0", + "zwitch": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify/node_modules/mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify/node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/remark-stringify/node_modules/zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/request/node_modules/form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmmirror.com/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dependencies": { "lru-cache": "^6.0.0" @@ -1617,15 +3349,80 @@ "node": ">=10" } }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/sliced": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz", + "integrity": "sha512-VZBmZP8WU3sMOZm1bdgTadsQbcscK0UM8oKxKVBs4XAhUo2Xxzm/OFMGBkPusxw9xL3Uy8LrzEqGqJhclsr0yA==", + "dev": true, + "license": "MIT" + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=10" + "node": ">=8" } }, "node_modules/stringify-entities": { @@ -1637,6 +3434,465 @@ "character-entities-legacy": "^3.0.0" } }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/to-vfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-6.1.0.tgz", + "integrity": "sha512-BxX8EkCxOAZe+D/ToHdDsJcVI4HqQfmw0tCkp31zf3dNP/XWIAjU4CmeuSwsSoOzOTqHPOL0KUzyZqJplkD0Qw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-buffer": "^2.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/to-vfile/node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", + "dev": true, + "license": "Unlicense" + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/unified": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-args": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/unified-args/-/unified-args-8.1.0.tgz", + "integrity": "sha512-t1HPS1cQPsVvt/6EtyWIbQGurza5684WGRigNghZRvzIdHm3LPgMdXPyGx0npORKzdiy5+urkF0rF5SXM8lBuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^5.0.0", + "chalk": "^3.0.0", + "chokidar": "^3.0.0", + "fault": "^1.0.2", + "json5": "^2.0.0", + "minimist": "^1.2.0", + "text-table": "^0.2.0", + "unified-engine": "^8.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-args/node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/unified-args/node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dev": true, + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/unified-engine": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/unified-engine/-/unified-engine-8.2.0.tgz", + "integrity": "sha512-ZlMm62ejrf+tJHdyOjQfljszngQjRor95q2XZMGk6rpJUYi7ZIHY/EXEhOcj9PZkMKKdLIM+dqL4s0ceyk9wbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "concat-stream": "^2.0.0", + "debug": "^4.0.0", + "fault": "^1.0.0", + "figures": "^3.0.0", + "glob": "^7.0.3", + "ignore": "^5.0.0", + "is-buffer": "^2.0.0", + "is-empty": "^1.0.0", + "is-plain-obj": "^2.0.0", + "js-yaml": "^3.6.1", + "load-plugin": "^3.0.0", + "parse-json": "^5.0.0", + "to-vfile": "^6.0.0", + "trough": "^1.0.0", + "unist-util-inspect": "^5.0.0", + "vfile-reporter": "^6.0.0", + "vfile-statistics": "^1.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-engine/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/unified-engine/node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dev": true, + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/unified-engine/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/unified-engine/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/unified-lint-rule": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/unified-lint-rule/-/unified-lint-rule-1.0.6.tgz", + "integrity": "sha512-YPK15YBFwnsVorDFG/u0cVVQN5G2a3V8zv5/N6KN3TCG+ajKtaALcy7u14DCSrJI+gZeyYquFL9cioJXOGXSvg==", + "dev": true, + "license": "MIT", + "dependencies": { + "wrapped": "^1.0.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-message-control": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/unified-message-control/-/unified-message-control-3.0.3.tgz", + "integrity": "sha512-oY5z2n8ugjpNHXOmcgrw0pQeJzavHS0VjPBP21tOcm7rc2C+5Q+kW9j5+gqtf8vfW/8sabbsK5+P+9QPwwEHDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "unist-util-visit": "^2.0.0", + "vfile-location": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-message-control/node_modules/unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-message-control/node_modules/unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-message-control/node_modules/unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified-message-control/node_modules/vfile-location": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", + "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-inspect": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-5.0.1.tgz", + "integrity": "sha512-fPNWewS593JSmg49HbnE86BJKuBi1/nMWhDSccBvbARfxezEuJV85EaARR9/VplveiwCoLm2kWq+DhP8TBaDpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-empty": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/unist-util-is": { "version": "5.1.1", "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-5.1.1.tgz", @@ -1708,11 +3964,39 @@ "jsonwebtoken": "^9.0.0" } }, - "node_modules/universal-user-agent": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", - "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" - }, + "node_modules/universal-user-agent": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", + "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "dev": true, + "license": "MIT", + "bin": { + "uuid": "bin/uuid" + } + }, "node_modules/uvu": { "version": "0.5.3", "resolved": "https://registry.npmmirror.com/uvu/-/uvu-0.5.3.tgz", @@ -1730,6 +4014,21 @@ "node": ">=8" } }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "dev": true, + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, "node_modules/vfile": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.2.tgz", @@ -1767,6 +4066,95 @@ "unist-util-stringify-position": "^3.0.0" } }, + "node_modules/vfile-reporter": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/vfile-reporter/-/vfile-reporter-6.0.2.tgz", + "integrity": "sha512-GN2bH2gs4eLnw/4jPSgfBjo+XCuvnX9elHICJZjVD4+NM0nsUrMTvdjGY5Sc/XG69XVTgLwj7hknQVc6M9FukA==", + "dev": true, + "license": "MIT", + "dependencies": { + "repeat-string": "^1.5.0", + "string-width": "^4.0.0", + "supports-color": "^6.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-sort": "^2.1.2", + "vfile-statistics": "^1.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-reporter/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/vfile-reporter/node_modules/supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/vfile-reporter/node_modules/unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-sort": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/vfile-sort/-/vfile-sort-2.2.2.tgz", + "integrity": "sha512-tAyUqD2R1l/7Rn7ixdGkhXLD3zsg+XLAeUDUhXearjfIcpL1Hcsj5hHpCoy/gvfK/Ws61+e972fm0F7up7hfYA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-statistics": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/vfile-statistics/-/vfile-statistics-1.1.4.tgz", + "integrity": "sha512-lXhElVO0Rq3frgPvFBwahmed3X03vjPF8OcjKMy8+F1xU/3Q3QU3tKEDp743SFtb74PdF0UWpxPvtOP0GCLheA==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/wrapped": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wrapped/-/wrapped-1.0.1.tgz", + "integrity": "sha512-ZTKuqiTu3WXtL72UKCCnQLRax2IScKH7oQ+mvjbpvNE+NJxIWIemDqqM2GxNr4N16NCjOYpIgpin5pStM7kM5g==", + "dev": true, + "license": "MIT", + "dependencies": { + "co": "3.1.0", + "sliced": "^1.0.1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -1784,6 +4172,79 @@ } }, "dependencies": { + "@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + } + }, + "@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true + }, + "@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "dev": true + }, + "@breeswish-org/remark-lint-pingcap-docs-anchor": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@breeswish-org/remark-lint-pingcap-docs-anchor/-/remark-lint-pingcap-docs-anchor-1.1.2.tgz", + "integrity": "sha512-MGcmkEdXWAfHWusuIj4qRG6HXKICCGFK7ohp0mWEtwEXrMYeDeQt44rTVTC/HCaTi9qR9kv9pBopb+YIHemdrA==", + "dev": true, + "requires": { + "didyoumean2": "^4.1.0", + "github-slugger": "^1.4.0", + "mdast-util-to-string": "^2.0.0", + "remark": "^13.0.0", + "unified": "^9.2.2", + "unified-lint-rule": "^1.0.5", + "unist-util-visit": "^2.0.3" + }, + "dependencies": { + "mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true + }, + "unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "dev": true + }, + "unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + } + }, + "unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + } + } + } + }, "@octokit/app": { "version": "14.0.0", "resolved": "https://registry.npmjs.org/@octokit/app/-/app-14.0.0.tgz", @@ -2127,11 +4588,90 @@ "indent-string": "^4.0.0" } }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "requires": { + "color-convert": "^2.0.1" + } + }, + "anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dev": true, + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "dev": true + }, + "async": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true + }, "asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", + "dev": true + }, + "aws4": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", + "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==", + "dev": true + }, "axios": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/axios/-/axios-1.4.0.tgz", @@ -2142,16 +4682,37 @@ "proxy-from-env": "^1.1.0" } }, + "bail": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", + "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", + "dev": true + }, "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dev": true, + "requires": { + "tweetnacl": "^0.14.3" + } + }, "before-after-hook": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz", "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==" }, + "binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true + }, "bottleneck": { "version": "2.19.5", "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", @@ -2165,6 +4726,15 @@ "balanced-match": "^1.0.0" } }, + "braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "requires": { + "fill-range": "^7.1.1" + } + }, "btoa-lite": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/btoa-lite/-/btoa-lite-1.0.0.tgz", @@ -2175,11 +4745,39 @@ "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" }, + "buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==", + "dev": true + }, "ccount": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/ccount/-/ccount-2.0.1.tgz", "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==" }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, "character-entities": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/character-entities/-/character-entities-2.0.1.tgz", @@ -2200,11 +4798,48 @@ "resolved": "https://registry.npmmirror.com/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==" }, + "chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + } + }, "clean-stack": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" }, + "co": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/co/-/co-3.1.0.tgz", + "integrity": "sha512-CQsjCRiNObI8AtTsNIBDRMQ4oMR83CzEswHYahClvul7gKk+lDQiOKv+5qh7LQWf5sh6jkZNispz/QlsZxyNgA==", + "dev": true + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, "combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -2213,6 +4848,45 @@ "delayed-stream": "~1.0.0" } }, + "commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "concat-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz", + "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.2", + "typedarray": "^0.0.6" + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", + "dev": true + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, "debug": { "version": "4.3.4", "resolved": "https://registry.npmmirror.com/debug/-/debug-4.3.4.tgz", @@ -2244,11 +4918,32 @@ "resolved": "https://registry.npmmirror.com/dequal/-/dequal-2.0.2.tgz", "integrity": "sha512-q9K8BlJVxK7hQYqa6XISGmBZbtQQWVXSrRrWreHC94rMt1QL/Impruc+7p2CYSYuVIUr+YCt6hjrs1kkdJRTug==" }, + "didyoumean2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/didyoumean2/-/didyoumean2-4.2.0.tgz", + "integrity": "sha512-o8KZ9RERbXaPgvXklxuLwD4RotaV5trShsNXaA/y1h5e4u6qmtv5I6enJsst9l8R1b/eqFQFwfPAiTf+FgHAQQ==", + "dev": true, + "requires": { + "@babel/runtime": "^7.10.2", + "leven": "^3.1.0", + "lodash.deburr": "^4.1.0" + } + }, "diff": { "version": "5.0.0", "resolved": "https://registry.npmmirror.com/diff/-/diff-5.0.0.tgz", "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==" }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dev": true, + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "ecdsa-sig-formatter": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", @@ -2257,11 +4952,32 @@ "safe-buffer": "^5.0.1" } }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, "escape-string-regexp": { "version": "5.0.0", "resolved": "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==" }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, "estree-util-is-identifier-name": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-2.0.0.tgz", @@ -2276,6 +4992,30 @@ "@types/unist": "^2.0.0" } }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "dev": true + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "dev": true + }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, "fault": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/fault/-/fault-2.0.1.tgz", @@ -2284,11 +5024,58 @@ "format": "^0.2.0" } }, + "figgy-pudding": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", + "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==", + "dev": true + }, + "figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "requires": { + "escape-string-regexp": "^1.0.5" + }, + "dependencies": { + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true + } + } + }, + "fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "dev": true, + "requires": { + "locate-path": "^3.0.0" + } + }, "follow-redirects": { "version": "1.15.2", "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==" }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", + "dev": true + }, "form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -2309,6 +5096,28 @@ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, + "fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "optional": true + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dev": true, + "requires": { + "assert-plus": "^1.0.0" + } + }, + "github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==", + "dev": true + }, "glob": { "version": "8.0.3", "resolved": "https://registry.npmjs.org/glob/-/glob-8.0.3.tgz", @@ -2321,6 +5130,54 @@ "once": "^1.3.0" } }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==", + "dev": true + }, + "har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "dev": true, + "requires": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true + }, "indent-string": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", @@ -2340,6 +5197,18 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, + "ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true + }, + "is-absolute-url": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", + "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==", + "dev": true + }, "is-alphabetical": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/is-alphabetical/-/is-alphabetical-2.0.1.tgz", @@ -2354,6 +5223,21 @@ "is-decimal": "^2.0.0" } }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, "is-buffer": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", @@ -2364,16 +5248,137 @@ "resolved": "https://registry.npmmirror.com/is-decimal/-/is-decimal-2.0.1.tgz", "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==" }, + "is-empty": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-empty/-/is-empty-1.2.0.tgz", + "integrity": "sha512-F2FnH/otLNJv0J6wc73A5Xo7oHLNnqplYqZhUu01tD54DIPvxIRSTSLkrUB/M0nHO4vo1O9PDfN4KoTxCzLh/w==", + "dev": true + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, "is-hexadecimal": { "version": "2.0.1", "resolved": "https://registry.npmmirror.com/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==" }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true + }, "is-plain-object": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==" }, + "is-relative-url": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-relative-url/-/is-relative-url-3.0.0.tgz", + "integrity": "sha512-U1iSYRlY2GIMGuZx7gezlB5dp1Kheaym7zKzO1PV06mOihiWTXejLwm4poEJysPyXF+HtK/BEd0DVlcCh30pEA==", + "dev": true, + "requires": { + "is-absolute-url": "^3.0.0" + } + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", + "dev": true + }, + "isemail": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/isemail/-/isemail-3.2.0.tgz", + "integrity": "sha512-zKqkK+O+dGqevc93KNsbZ/TqTUFd46MwWjYOoMrjIMZ51eU7DtQG3Wmd9SQQT7i7RVnuTPEiYEWHU3MSbxC1Tg==", + "dev": true, + "requires": { + "punycode": "2.x.x" + } + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==", + "dev": true + }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", + "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", + "dev": true + }, + "json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true + }, "jsonwebtoken": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.1.tgz", @@ -2385,6 +5390,18 @@ "semver": "^7.3.8" } }, + "jsprim": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + } + }, "jwa": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", @@ -2409,11 +5426,72 @@ "resolved": "https://registry.npmmirror.com/kleur/-/kleur-4.1.4.tgz", "integrity": "sha512-8QADVssbrFjivHWQU7KkMgptGTl6WAcSdlbBPY4uNF+mWr6DGcKrvY2w4FQJoXch7+fKMjj0dRrL75vk3k23OA==" }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true + }, + "libnpmconfig": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/libnpmconfig/-/libnpmconfig-1.2.1.tgz", + "integrity": "sha512-9esX8rTQAHqarx6qeZqmGQKBNZR5OIbl/Ayr0qQDy3oXja2iFVQQI81R6GZ2a02bSNZ9p3YOGX1O6HHCb1X7kA==", + "dev": true, + "requires": { + "figgy-pudding": "^3.5.1", + "find-up": "^3.0.0", + "ini": "^1.3.5" + } + }, + "lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "link-check": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/link-check/-/link-check-4.5.4.tgz", + "integrity": "sha512-VdjiYrIBNHtqH7NEvIlF/4i0V9xQWkoBry+65DtmmyKyD5qBZ2U9fCJYx75SI5Ms4ILJzGlNNojPKbPMpg5Spg==", + "dev": true, + "requires": { + "is-relative-url": "^3.0.0", + "isemail": "^3.2.0", + "ms": "^2.1.2", + "request": "^2.88.2" + } + }, + "load-plugin": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/load-plugin/-/load-plugin-3.0.0.tgz", + "integrity": "sha512-od7eKCCZ62ITvFf8nHHrIiYmgOHb4xVNDRDqxBWSaao5FZyyZVX8OmRCbwjDGPrSrgIulwPNyBsWCGnhiDC0oQ==", + "dev": true, + "requires": { + "libnpmconfig": "^1.0.0", + "resolve-from": "^5.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "dev": true, + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, "lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, + "lodash.deburr": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/lodash.deburr/-/lodash.deburr-4.1.0.tgz", + "integrity": "sha512-m/M1U1f3ddMCs6Hq2tAsYThTBDaAKFDX3dwDo97GEYzamXi9SqUpjWi/Rrj/gf3X2n8ktwgZrlP1z6E3v/IExQ==", + "dev": true + }, "longest-streak": { "version": "3.0.1", "resolved": "https://registry.npmmirror.com/longest-streak/-/longest-streak-3.0.1.tgz", @@ -2424,11 +5502,54 @@ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.0.0.tgz", "integrity": "sha512-svTf/fzsKHffP42sujkO/Rjs37BCIsQVRCeNYIm9WN8rgT7ffoUnRtZCqU+6BqcSBdv8gwJeTz8knJpgACeQMw==" }, + "markdown-extensions": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz", + "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==", + "dev": true + }, + "markdown-link-check": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/markdown-link-check/-/markdown-link-check-3.8.1.tgz", + "integrity": "sha512-R6k8ytdJZePDAdb8NT0NvrNvu6n25IwLPIoJ4guHWC5yqyTlnUpRT7j3XE4ioBXwqOhG/LlUcuckD621kZkl4w==", + "dev": true, + "requires": { + "async": "^3.2.0", + "chalk": "^4.0.0", + "commander": "^5.0.0", + "link-check": "^4.5.0", + "lodash": "^4.17.15", + "markdown-link-extractor": "^1.2.3", + "progress": "^2.0.3", + "request": "^2.88.2" + } + }, + "markdown-link-extractor": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/markdown-link-extractor/-/markdown-link-extractor-1.3.1.tgz", + "integrity": "sha512-IosNBtHXplzEq2n9WoSi83LNLCWgLnb+8Xq379Ct5xrLLzmqPUtc+A1oqo6Sd32YfKus9uLedFNSwFK1sCzoNQ==", + "dev": true, + "requires": { + "marked": "^4.0.10" + } + }, "markdown-table": { "version": "3.0.2", "resolved": "https://registry.npmmirror.com/markdown-table/-/markdown-table-3.0.2.tgz", "integrity": "sha512-y8j3a5/DkJCmS5x4dMCQL+OR0+2EAq3DOtio1COSHsmW2BGXnNCK3v12hJt1LrUz5iZH5g0LmuYOjDdI+czghA==" }, + "marked": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz", + "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==", + "dev": true + }, + "mdast-comment-marker": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/mdast-comment-marker/-/mdast-comment-marker-1.1.2.tgz", + "integrity": "sha512-vTFXtmbbF3rgnTh3Zl3irso4LtvwUq/jaDvT2D1JqTGAwaipcS7RpTxzi6KjoRqI9n2yuAhzLDAC8xVTF3XYVQ==", + "dev": true + }, "mdast-util-find-and-replace": { "version": "2.1.0", "resolved": "https://registry.npmmirror.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.1.0.tgz", @@ -3026,6 +6147,12 @@ "brace-expansion": "^2.0.1" } }, + "minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true + }, "mri": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz", @@ -3036,6 +6163,18 @@ "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "dev": true + }, "octokit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/octokit/-/octokit-3.1.0.tgz", @@ -3058,9 +6197,33 @@ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "requires": { - "wrappy": "1" + "wrappy": "1" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "dev": true, + "requires": { + "p-limit": "^2.0.0" } }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, "parse-entities": { "version": "4.0.0", "resolved": "https://registry.npmmirror.com/parse-entities/-/parse-entities-4.0.0.tgz", @@ -3076,11 +6239,406 @@ "is-hexadecimal": "^2.0.0" } }, + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", + "dev": true + }, + "picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true + }, + "picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true + }, "proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" }, + "psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "requires": { + "punycode": "^2.3.1" + } + }, + "punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true + }, + "qs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "dev": true + }, + "readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + }, + "readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "remark": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/remark/-/remark-13.0.0.tgz", + "integrity": "sha512-HDz1+IKGtOyWN+QgBiAT0kn+2s6ovOxHyPAFGKVE81VSzJ+mq7RwHFledEvB5F1p4iJvOah/LOKdFuzvRnNLCA==", + "dev": true, + "requires": { + "remark-parse": "^9.0.0", + "remark-stringify": "^9.0.0", + "unified": "^9.1.0" + } + }, + "remark-cli": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-cli/-/remark-cli-9.0.0.tgz", + "integrity": "sha512-y6kCXdwZoMoh0Wo4Och1tDW50PmMc86gW6GpF08v9d+xUCEJE2wwXdQ+TnTaUamRnfFdU+fE+eNf2PJ53cyq8g==", + "dev": true, + "requires": { + "markdown-extensions": "^1.1.0", + "remark": "^13.0.0", + "unified-args": "^8.0.0" + } + }, + "remark-lint": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/remark-lint/-/remark-lint-8.0.0.tgz", + "integrity": "sha512-ESI8qJQ/TIRjABDnqoFsTiZntu+FRifZ5fJ77yX63eIDijl/arvmDvT+tAf75/Nm5BFL4R2JFUtkHRGVjzYUsg==", + "dev": true, + "requires": { + "remark-message-control": "^6.0.0" + } + }, + "remark-message-control": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/remark-message-control/-/remark-message-control-6.0.0.tgz", + "integrity": "sha512-k9bt7BYc3G7YBdmeAhvd3VavrPa/XlKWR3CyHjr4sLO9xJyly8WHHT3Sp+8HPR8lEUv+/sZaffL7IjMLV0f6BA==", + "dev": true, + "requires": { + "mdast-comment-marker": "^1.0.0", + "unified-message-control": "^3.0.0" + } + }, + "remark-parse": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-9.0.0.tgz", + "integrity": "sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw==", + "dev": true, + "requires": { + "mdast-util-from-markdown": "^0.8.0" + }, + "dependencies": { + "character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "dev": true + }, + "character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "dev": true + }, + "character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "dev": true + }, + "is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "dev": true + }, + "is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dev": true, + "requires": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + } + }, + "is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "dev": true + }, + "is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "dev": true + }, + "mdast-util-from-markdown": { + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.5.tgz", + "integrity": "sha512-2hkTXtYYnr+NubD/g6KGBS/0mFmBcifAsI0yIWRiRo0PjVs6SSOSOdtzbp6kSGnShDN6G5aWZpKQ2lWRy27mWQ==", + "dev": true, + "requires": { + "@types/mdast": "^3.0.0", + "mdast-util-to-string": "^2.0.0", + "micromark": "~2.11.0", + "parse-entities": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + } + }, + "mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true + }, + "micromark": { + "version": "2.11.4", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-2.11.4.tgz", + "integrity": "sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA==", + "dev": true, + "requires": { + "debug": "^4.0.0", + "parse-entities": "^2.0.0" + } + }, + "parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dev": true, + "requires": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "requires": { + "@types/unist": "^2.0.2" + } + } + } + }, + "remark-stringify": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-9.0.1.tgz", + "integrity": "sha512-mWmNg3ZtESvZS8fv5PTvaPckdL4iNlCHTt8/e/8oN08nArHRHjNZMKzA/YW3+p7/lYqIw4nx1XsjCBo/AxNChg==", + "dev": true, + "requires": { + "mdast-util-to-markdown": "^0.6.0" + }, + "dependencies": { + "character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "dev": true + }, + "character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "dev": true + }, + "character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "dev": true + }, + "is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "dev": true + }, + "is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dev": true, + "requires": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + } + }, + "is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "dev": true + }, + "is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "dev": true + }, + "longest-streak": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-2.0.4.tgz", + "integrity": "sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==", + "dev": true + }, + "mdast-util-to-markdown": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-0.6.5.tgz", + "integrity": "sha512-XeV9sDE7ZlOQvs45C9UKMtfTcctcaj/pGwH8YLbMHoMOXNNCn2LsqVQOqrF1+/NU8lKDAqozme9SCXWyo9oAcQ==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "longest-streak": "^2.0.0", + "mdast-util-to-string": "^2.0.0", + "parse-entities": "^2.0.0", + "repeat-string": "^1.0.0", + "zwitch": "^1.0.0" + } + }, + "mdast-util-to-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz", + "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==", + "dev": true + }, + "parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dev": true, + "requires": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "zwitch": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz", + "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==", + "dev": true + } + } + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "dev": true + }, + "request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "dev": true, + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "dependencies": { + "form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dev": true, + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + } + } + } + }, + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + }, "sade": { "version": "1.8.1", "resolved": "https://registry.npmmirror.com/sade/-/sade-1.8.1.tgz", @@ -3094,6 +6652,12 @@ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, "semver": { "version": "7.5.4", "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", @@ -3112,6 +6676,55 @@ } } }, + "sliced": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz", + "integrity": "sha512-VZBmZP8WU3sMOZm1bdgTadsQbcscK0UM8oKxKVBs4XAhUo2Xxzm/OFMGBkPusxw9xL3Uy8LrzEqGqJhclsr0yA==", + "dev": true + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dev": true, + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "requires": { + "safe-buffer": "~5.2.0" + } + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, "stringify-entities": { "version": "4.0.2", "resolved": "https://registry.npmmirror.com/stringify-entities/-/stringify-entities-4.0.2.tgz", @@ -3121,6 +6734,335 @@ "character-entities-legacy": "^3.0.0" } }, + "strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "requires": { + "ansi-regex": "^5.0.1" + } + }, + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "to-vfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-6.1.0.tgz", + "integrity": "sha512-BxX8EkCxOAZe+D/ToHdDsJcVI4HqQfmw0tCkp31zf3dNP/XWIAjU4CmeuSwsSoOzOTqHPOL0KUzyZqJplkD0Qw==", + "dev": true, + "requires": { + "is-buffer": "^2.0.0", + "vfile": "^4.0.0" + }, + "dependencies": { + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "requires": { + "@types/unist": "^2.0.2" + } + }, + "vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + } + }, + "vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + } + } + } + }, + "tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dev": true, + "requires": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + } + }, + "trough": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", + "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", + "dev": true + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dev": true, + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", + "dev": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==", + "dev": true + }, + "unified": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz", + "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==", + "dev": true, + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^2.0.0", + "trough": "^1.0.0", + "vfile": "^4.0.0" + }, + "dependencies": { + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "requires": { + "@types/unist": "^2.0.2" + } + }, + "vfile": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz", + "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-message": "^2.0.0" + } + }, + "vfile-message": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz", + "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^2.0.0" + } + } + } + }, + "unified-args": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/unified-args/-/unified-args-8.1.0.tgz", + "integrity": "sha512-t1HPS1cQPsVvt/6EtyWIbQGurza5684WGRigNghZRvzIdHm3LPgMdXPyGx0npORKzdiy5+urkF0rF5SXM8lBuQ==", + "dev": true, + "requires": { + "camelcase": "^5.0.0", + "chalk": "^3.0.0", + "chokidar": "^3.0.0", + "fault": "^1.0.2", + "json5": "^2.0.0", + "minimist": "^1.2.0", + "text-table": "^0.2.0", + "unified-engine": "^8.0.0" + }, + "dependencies": { + "chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dev": true, + "requires": { + "format": "^0.2.0" + } + } + } + }, + "unified-engine": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/unified-engine/-/unified-engine-8.2.0.tgz", + "integrity": "sha512-ZlMm62ejrf+tJHdyOjQfljszngQjRor95q2XZMGk6rpJUYi7ZIHY/EXEhOcj9PZkMKKdLIM+dqL4s0ceyk9wbA==", + "dev": true, + "requires": { + "concat-stream": "^2.0.0", + "debug": "^4.0.0", + "fault": "^1.0.0", + "figures": "^3.0.0", + "glob": "^7.0.3", + "ignore": "^5.0.0", + "is-buffer": "^2.0.0", + "is-empty": "^1.0.0", + "is-plain-obj": "^2.0.0", + "js-yaml": "^3.6.1", + "load-plugin": "^3.0.0", + "parse-json": "^5.0.0", + "to-vfile": "^6.0.0", + "trough": "^1.0.0", + "unist-util-inspect": "^5.0.0", + "vfile-reporter": "^6.0.0", + "vfile-statistics": "^1.1.0" + }, + "dependencies": { + "brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dev": true, + "requires": { + "format": "^0.2.0" + } + }, + "glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + } + } + }, + "unified-lint-rule": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/unified-lint-rule/-/unified-lint-rule-1.0.6.tgz", + "integrity": "sha512-YPK15YBFwnsVorDFG/u0cVVQN5G2a3V8zv5/N6KN3TCG+ajKtaALcy7u14DCSrJI+gZeyYquFL9cioJXOGXSvg==", + "dev": true, + "requires": { + "wrapped": "^1.0.1" + } + }, + "unified-message-control": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/unified-message-control/-/unified-message-control-3.0.3.tgz", + "integrity": "sha512-oY5z2n8ugjpNHXOmcgrw0pQeJzavHS0VjPBP21tOcm7rc2C+5Q+kW9j5+gqtf8vfW/8sabbsK5+P+9QPwwEHDA==", + "dev": true, + "requires": { + "unist-util-visit": "^2.0.0", + "vfile-location": "^3.0.0" + }, + "dependencies": { + "unist-util-is": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz", + "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==", + "dev": true + }, + "unist-util-visit": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz", + "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0", + "unist-util-visit-parents": "^3.0.0" + } + }, + "unist-util-visit-parents": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz", + "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==", + "dev": true, + "requires": { + "@types/unist": "^2.0.0", + "unist-util-is": "^4.0.0" + } + }, + "vfile-location": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz", + "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==", + "dev": true + } + } + }, + "unist-util-inspect": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-5.0.1.tgz", + "integrity": "sha512-fPNWewS593JSmg49HbnE86BJKuBi1/nMWhDSccBvbARfxezEuJV85EaARR9/VplveiwCoLm2kWq+DhP8TBaDpw==", + "dev": true, + "requires": { + "is-empty": "^1.0.0" + } + }, "unist-util-is": { "version": "5.1.1", "resolved": "https://registry.npmmirror.com/unist-util-is/-/unist-util-is-5.1.1.tgz", @@ -3195,6 +7137,27 @@ "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz", "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==" }, + "uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "requires": { + "punycode": "^2.1.0" + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "dev": true + }, "uvu": { "version": "0.5.3", "resolved": "https://registry.npmmirror.com/uvu/-/uvu-0.5.3.tgz", @@ -3206,6 +7169,17 @@ "sade": "^1.7.3" } }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "dev": true, + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, "vfile": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.2.tgz", @@ -3235,6 +7209,68 @@ "unist-util-stringify-position": "^3.0.0" } }, + "vfile-reporter": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/vfile-reporter/-/vfile-reporter-6.0.2.tgz", + "integrity": "sha512-GN2bH2gs4eLnw/4jPSgfBjo+XCuvnX9elHICJZjVD4+NM0nsUrMTvdjGY5Sc/XG69XVTgLwj7hknQVc6M9FukA==", + "dev": true, + "requires": { + "repeat-string": "^1.5.0", + "string-width": "^4.0.0", + "supports-color": "^6.0.0", + "unist-util-stringify-position": "^2.0.0", + "vfile-sort": "^2.1.2", + "vfile-statistics": "^1.1.0" + }, + "dependencies": { + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true + }, + "supports-color": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", + "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } + }, + "unist-util-stringify-position": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz", + "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==", + "dev": true, + "requires": { + "@types/unist": "^2.0.2" + } + } + } + }, + "vfile-sort": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/vfile-sort/-/vfile-sort-2.2.2.tgz", + "integrity": "sha512-tAyUqD2R1l/7Rn7ixdGkhXLD3zsg+XLAeUDUhXearjfIcpL1Hcsj5hHpCoy/gvfK/Ws61+e972fm0F7up7hfYA==", + "dev": true + }, + "vfile-statistics": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/vfile-statistics/-/vfile-statistics-1.1.4.tgz", + "integrity": "sha512-lXhElVO0Rq3frgPvFBwahmed3X03vjPF8OcjKMy8+F1xU/3Q3QU3tKEDp743SFtb74PdF0UWpxPvtOP0GCLheA==", + "dev": true + }, + "wrapped": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wrapped/-/wrapped-1.0.1.tgz", + "integrity": "sha512-ZTKuqiTu3WXtL72UKCCnQLRax2IScKH7oQ+mvjbpvNE+NJxIWIemDqqM2GxNr4N16NCjOYpIgpin5pStM7kM5g==", + "dev": true, + "requires": { + "co": "3.1.0", + "sliced": "^1.0.1" + } + }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", diff --git a/package.json b/package.json index 7367d12460c5a..4e5b303151bdf 100644 --- a/package.json +++ b/package.json @@ -17,5 +17,11 @@ "micromark-extension-mdxjs": "^1.0.0", "octokit": "^3.1.0", "unist-util-visit": "^4.1.0" + }, + "devDependencies": { + "@breeswish-org/remark-lint-pingcap-docs-anchor": "1.1.2", + "markdown-link-check": "3.8.1", + "remark-cli": "9.0.0", + "remark-lint": "8.0.0" } } diff --git a/performance-tuning-practices.md b/performance-tuning-practices.md index 1704b228e6f9d..7d31749bf1106 100644 --- a/performance-tuning-practices.md +++ b/performance-tuning-practices.md @@ -13,7 +13,7 @@ This document describes how to use these features together to analyze and compar > > [Top SQL](/dashboard/top-sql.md) and [Continuous Profiling](/dashboard/continuous-profiling.md) are not enabled by default. You need to enable them in advance. -By running the same application with different JDBC configurations in these scenarios, this document shows you how the overall system performance is affected by different interactions between applications and databases, so that you can apply [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) for better performance. +By running the same application with different JDBC configurations in these scenarios, this document shows you how the overall system performance is affected by different interactions between applications and databases, so that you can apply [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) for better performance. ## Environment description diff --git a/quick-start-with-tidb.md b/quick-start-with-tidb.md index d9ce6a384e4cd..a1e01172ce1b3 100644 --- a/quick-start-with-tidb.md +++ b/quick-start-with-tidb.md @@ -493,8 +493,8 @@ If you are ready to deploy a TiDB cluster for the production environment, here a If you are an application developer and want to quickly build an application using TiDB, here are the next steps: -- [Developer Guide Overview](/develop/dev-guide-overview.md) -- [Build a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) +- [Developer Guide Overview](https://docs.pingcap.com/developer/) +- [Create a {{{ .starter }}} Cluster](/develop/dev-guide-build-cluster-in-cloud.md) - [Example Applications](/develop/dev-guide-sample-application-java-jdbc.md) If you are looking for an analytics solution with TiFlash, here are the next steps: diff --git a/releases/release-notes.md b/releases/_index.md similarity index 99% rename from releases/release-notes.md rename to releases/_index.md index 538b931062af9..f4ebc4ba0011f 100644 --- a/releases/release-notes.md +++ b/releases/_index.md @@ -2,6 +2,7 @@ title: Release Notes aliases: ['/docs/dev/releases/release-notes/','/docs/dev/releases/rn/'] summary: TiDB has released multiple versions, including 8.5.0, 8.4.0-DMR, 8.3.0-DMR, 8.2.0-DMR, 8.1.0, 8.0.0-DMR, 7.6.0-DMR, 7.5.1, 7.5.0, 7.4.0-DMR, 7.3.0-DMR, 7.2.0-DMR, 7.1.4, 7.1.3, 7.1.2, 7.1.1, 7.1.0, 7.0.0-DMR, 6.6.0-DMR, 6.5.9, 6.5.8, 6.5.7, 6.5.6, 6.5.5, 6.5.4, 6.5.3, 6.5.2, 6.5.1, 6.5.0, 6.4.0-DMR, 6.3.0-DMR, 6.2.0-DMR, 6.1.7, 6.1.6, 6.1.5, 6.1.4, 6.1.3, 6.1.2, 6.1.1, 6.1.0, 6.0.0-DMR, 5.4.3, 5.4.2, 5.4.1, 5.4.0, 5.3.4, 5.3.3, 5.3.2, 5.3.1, 5.3.0, 5.2.4, 5.2.3, 5.2.2, 5.2.1, 5.2.0, 5.1.5, 5.1.4, 5.1.3, 5.1.2, 5.1.1, 5.1.0, 5.0.6, 5.0.5, 5.0.4, 5.0.3, 5.0.2, 5.0.1, 5.0.0, 5.0.0-rc, 4.0.16, 4.0.15, 4.0.14, 4.0.13, 4.0.12, 4.0.11, 4.0.10, 4.0.9, 4.0.8, 4.0.7, 4.0.6, 4.0.5, 4.0.4, 4.0.3, 4.0.2, 4.0.1, 4.0.0, 4.0.0-rc.2, 4.0.0-rc.1, 4.0.0-rc, 4.0.0-beta.2, 4.0.0-beta.1, 4.0.0-beta, 3.1.2, 3.1.1, 3.1.0, 3.1.0-rc, 3.1.0-beta.2, 3.1.0-beta.1, 3.1.0-beta, 3.0.20, 3.0.19, 3.0.18, 3.0.17, 3.0.16, 3.0.15, 3.0.14, 3.0.13, 3.0.12, 3.0.11, 3.0.10, 3.0.9, 3.0.8, 3.0.7, 3.0.6, 3.0.5, 3.0.4, 3.0.3, 3.0.2, 3.0.1, 3.0.0, 3.0.0-rc.3, 3.0.0-rc.2, 3.0.0-rc.1, 3.0.0-beta.1, 3.0.0-beta, 2.1.19, 2.1.18, 2.1.17, 2.1.16, 2.1.15, 2.1.14, 2.1.13, 2.1.12, 2.1.11, 2.1.10, 2.1.9, 2.1.8, 2.1.7, 2.1.6, 2.1.5, 2.1.4, 2.1.3, 2.1.2, 2.1.1, 2.1.0, 2.1.0-rc.5, 2.1.0-rc.4, 2.1.0-rc.3, 2.1.0-rc.2, 2.1.0-rc.1, 2.1.0-beta, 2.0.11, 2.0.10, 2.0.9, 2.0.8, 2.0.7, 2.0.6, 2.0.5, 2.0.4, 2.0.3, 2.0.2, 2.0.1, 2.0.0, 2.0.0-rc.5, 2.0.0-rc.4, 2.0.0-rc.3, 2.0.0-rc.1, 1.1.0-beta, 1.1.0-alpha, 1.0.8, 1.0.7, 1.0.6, 1.0.5, 1.0.4, 1.0.3, 1.0.2, 1.0.1, 1.0.0, Pre-GA, rc4, rc3, rc2, rc1. +aliases: ['/tidb/stable/release-notes/','/tidb/v8.5/release-notes/'] --- # TiDB Release Notes diff --git a/releases/release-5.1.0.md b/releases/release-5.1.0.md index 608bad96342f1..b7de8833254e0 100644 --- a/releases/release-5.1.0.md +++ b/releases/release-5.1.0.md @@ -23,7 +23,7 @@ In v5.1, the key new features or improvements are as follows: > **Note:** > -> When upgrading from an earlier TiDB version to v5.1, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/release-notes.md) for the corresponding version. +> When upgrading from an earlier TiDB version to v5.1, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/_index.md) for the corresponding version. ### System variables diff --git a/releases/release-5.2.0.md b/releases/release-5.2.0.md index 40f4df5a0374a..d3cbda1ce1ff5 100644 --- a/releases/release-5.2.0.md +++ b/releases/release-5.2.0.md @@ -27,7 +27,7 @@ In v5.2, the key new features and improvements are as follows: > **Note:** > -> When upgrading from an earlier TiDB version to v5.2, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Note](/releases/release-notes.md) for the corresponding version. +> When upgrading from an earlier TiDB version to v5.2, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Note](/releases/_index.md) for the corresponding version. ### System variables diff --git a/releases/release-5.3.0.md b/releases/release-5.3.0.md index 2e7f95dab811b..0e0e636902fee 100644 --- a/releases/release-5.3.0.md +++ b/releases/release-5.3.0.md @@ -26,7 +26,7 @@ In v5.3, the key new features or improvements are as follows: > **Note:** > -> When upgrading from an earlier TiDB version to v5.3.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/release-notes.md) of the corresponding version. +> When upgrading from an earlier TiDB version to v5.3.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/_index.md) of the corresponding version. ### System variables diff --git a/releases/release-5.4.0.md b/releases/release-5.4.0.md index 832ac4884d262..7babe523ea4f9 100644 --- a/releases/release-5.4.0.md +++ b/releases/release-5.4.0.md @@ -27,7 +27,7 @@ In v5.4, the key new features or improvements are as follows: > **Note:** > -> When upgrading from an earlier TiDB version to v5.4.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/release-notes.md) of the corresponding version. +> When upgrading from an earlier TiDB version to v5.4.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/_index.md) of the corresponding version. ### System variables diff --git a/releases/release-6.0.0-dmr.md b/releases/release-6.0.0-dmr.md index 8ceb69dcc9b65..2d70511c8cfad 100644 --- a/releases/release-6.0.0-dmr.md +++ b/releases/release-6.0.0-dmr.md @@ -283,7 +283,7 @@ TiDB v6.0.0 is a DMR, and its version is 6.0.0-DMR. > **Note:** > -> When upgrading from an earlier TiDB version to v6.0.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/release-notes.md) of the corresponding version. +> When upgrading from an earlier TiDB version to v6.0.0, if you want to know the compatibility change notes of all intermediate versions, you can check the [Release Notes](/releases/_index.md) of the corresponding version. ### System variables diff --git a/releases/release-7.0.0.md b/releases/release-7.0.0.md index 53cf17f13a1fc..603c9dc39cc30 100644 --- a/releases/release-7.0.0.md +++ b/releases/release-7.0.0.md @@ -86,7 +86,7 @@ In v7.0.0-DMR, the key new features and improvements are as follows: Starting from TiDB v7.0.0, Fast Online DDL and PITR are fully compatible. When restoring cluster data through PITR, the index operations added via Fast Online DDL during log backup will be automatically replayed to achieve compatibility. - For more information, see [documentation](/ddl-introduction.md). + For more information, see [documentation](/best-practices/ddl-introduction.md). * TiFlash supports null-aware semi join and null-aware anti semi join operators [#6674](https://github.com/pingcap/tiflash/issues/6674) @[gengliqi](https://github.com/gengliqi) @@ -142,7 +142,7 @@ In v7.0.0-DMR, the key new features and improvements are as follows: TiDB v6.5.0 supports creating ordinary secondary indexes via Fast Online DDL. TiDB v7.0.0 supports creating unique indexes via Fast Online DDL. Compared to v6.1.0, adding unique indexes to large tables is expected to be several times faster with improved performance. - For more information, see [documentation](/ddl-introduction.md). + For more information, see [documentation](/best-practices/ddl-introduction.md). ### Reliability @@ -168,7 +168,7 @@ In v7.0.0-DMR, the key new features and improvements are as follows: TiDB v7.0.0 introduces a checkpoint mechanism for [Fast Online DDL](/system-variables.md#tidb_ddl_enable_fast_reorg-new-in-v630), which significantly improves its fault tolerance and automatic recovery capabilities. By periodically recording and synchronizing the DDL progress, ongoing DDL operations can continue to be executed in Fast Online DDL mode even if there is a TiDB DDL Owner failure or switch. This makes the execution of DDL more stable and efficient. - For more information, see [documentation](/ddl-introduction.md). + For more information, see [documentation](/best-practices/ddl-introduction.md). * TiFlash supports spilling to disk [#6528](https://github.com/pingcap/tiflash/issues/6528) @[windtalker](https://github.com/windtalker) diff --git a/releases/release-7.1.0.md b/releases/release-7.1.0.md index e53525e28fc6e..458675ff000dc 100644 --- a/releases/release-7.1.0.md +++ b/releases/release-7.1.0.md @@ -156,7 +156,7 @@ Compared with the previous LTS 6.5.0, 7.1.0 not only includes new features, impr * Support the checkpoint mechanism for Fast Online DDL to improve fault tolerance and automatic recovery capability [#42164](https://github.com/pingcap/tidb/issues/42164) @[tangenta](https://github.com/tangenta) - TiDB v7.1.0 introduces a checkpoint mechanism for [Fast Online DDL](/ddl-introduction.md), which significantly improves the fault tolerance and automatic recovery capability of Fast Online DDL. Even if the TiDB owner node is restarted or changed due to failures, TiDB can still recover progress from checkpoints that are automatically updated on a regular basis, making the DDL execution more stable and efficient. + TiDB v7.1.0 introduces a checkpoint mechanism for [Fast Online DDL](/best-practices/ddl-introduction.md), which significantly improves the fault tolerance and automatic recovery capability of Fast Online DDL. Even if the TiDB owner node is restarted or changed due to failures, TiDB can still recover progress from checkpoints that are automatically updated on a regular basis, making the DDL execution more stable and efficient. For more information, see [documentation](/system-variables.md#tidb_ddl_enable_fast_reorg-new-in-v630). diff --git a/releases/release-7.2.0.md b/releases/release-7.2.0.md index aa2d03d35c080..6c98ccf0f5026 100644 --- a/releases/release-7.2.0.md +++ b/releases/release-7.2.0.md @@ -146,7 +146,7 @@ Quick access: [Quick start](https://docs-archive.pingcap.com/tidb/v7.2/quick-sta ADMIN RESUME DDL JOBS 1,2; ``` - For more information, see [documentation](/ddl-introduction.md#ddl-related-commands). + For more information, see [documentation](/best-practices/ddl-introduction.md#ddl-related-commands). ### Data migration diff --git a/releases/release-7.5.0.md b/releases/release-7.5.0.md index d39ef73b9b7dd..8b09b93c7370c 100644 --- a/releases/release-7.5.0.md +++ b/releases/release-7.5.0.md @@ -108,7 +108,7 @@ Compared with the previous LTS 7.1.0, 7.5.0 includes new features, improvements, ADMIN RESUME DDL JOBS 1,2; ``` - For more information, see [documentation](/ddl-introduction.md#ddl-related-commands). + For more information, see [documentation](/best-practices/ddl-introduction.md#ddl-related-commands). * BR supports backing up and restoring statistics [#48008](https://github.com/pingcap/tidb/issues/48008) @[Leavrth](https://github.com/Leavrth) diff --git a/releases/release-8.2.0.md b/releases/release-8.2.0.md index fc13b08813506..a6b47a2cf8d0b 100644 --- a/releases/release-8.2.0.md +++ b/releases/release-8.2.0.md @@ -226,7 +226,7 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v8.2/quick-start-with- + TiDB - - Support parallel execution of [logical DDL statements (General DDL)](/ddl-introduction.md#types-of-ddl-statements). Compared with v8.1.0, when you use 10 sessions to submit different DDL statements concurrently, the performance is improved by 3 to 6 times [#53246](https://github.com/pingcap/tidb/issues/53246) @[D3Hunter](https://github.com/D3Hunter) + - Support parallel execution of [logical DDL statements (General DDL)](/best-practices/ddl-introduction.md#types-of-ddl-statements). Compared with v8.1.0, when you use 10 sessions to submit different DDL statements concurrently, the performance is improved by 3 to 6 times [#53246](https://github.com/pingcap/tidb/issues/53246) @[D3Hunter](https://github.com/D3Hunter) - Improve the logic of matching multi-column indexes using expressions like `((a = 1 and b = 2 and c > 3) or (a = 4 and b = 5 and c > 6)) and d > 3` to produce a more accurate `Range` [#41598](https://github.com/pingcap/tidb/issues/41598) @[ghazalfamilyusa](https://github.com/ghazalfamilyusa) - Optimize the performance of obtaining data distribution information when performing simple queries on tables with large data volumes [#53850](https://github.com/pingcap/tidb/issues/53850) @[you06](https://github.com/you06) - The aggregated result set can be used as an inner table for IndexJoin, allowing more complex queries to be matched to IndexJoin, thus improving query efficiency through indexing [#37068](https://github.com/pingcap/tidb/issues/37068) @[elsa0520](https://github.com/elsa0520) diff --git a/releases/release-8.4.0.md b/releases/release-8.4.0.md index 08673baab6045..8e828909c45a9 100644 --- a/releases/release-8.4.0.md +++ b/releases/release-8.4.0.md @@ -200,13 +200,13 @@ Quick access: [Quick start](https://docs.pingcap.com/tidb/v8.4/quick-start-with- Vector search is a search method based on data semantics, which provides more relevant search results. As one of the core functions of AI and large language models (LLMs), vector search can be used in various scenarios such as Retrieval-Augmented Generation (RAG), semantic search, and recommendation systems. - Starting from v8.4.0, TiDB supports [vector data types](/vector-search/vector-search-data-types.md) and [vector search indexes](/vector-search/vector-search-index.md), offering powerful vector search capabilities. TiDB vector data types support up to 16,383 dimensions and support various [distance functions](/vector-search/vector-search-functions-and-operators.md#vector-functions), including L2 distance (Euclidean distance), cosine distance, negative inner product, and L1 distance (Manhattan distance). + Starting from v8.4.0, TiDB supports [vector data types](/ai/reference/vector-search-data-types.md) and [vector search indexes](/ai/reference/vector-search-index.md), offering powerful vector search capabilities. TiDB vector data types support up to 16,383 dimensions and support various [distance functions](/ai/reference/vector-search-functions-and-operators.md#vector-functions), including L2 distance (Euclidean distance), cosine distance, negative inner product, and L1 distance (Manhattan distance). To start vector search, you only need to create a table with vector data types, insert vector data, and then perform a query of vector data. You can also perform mixed queries of vector data and traditional relational data. - To enhance the performance of vector search, you can create and use [vector search indexes](/vector-search/vector-search-index.md). Note that TiDB vector search indexes rely on TiFlash. Before using vector search indexes, make sure that TiFlash nodes are deployed in your TiDB cluster. + To enhance the performance of vector search, you can create and use [vector search indexes](/ai/reference/vector-search-index.md). Note that TiDB vector search indexes rely on TiFlash. Before using vector search indexes, make sure that TiFlash nodes are deployed in your TiDB cluster. - For more information, see [documentation](/vector-search/vector-search-overview.md). + For more information, see [documentation](/ai/concepts/vector-search-overview.md). ### DB operations diff --git a/scripts/verify-internal-links-in-toc.js b/scripts/verify-internal-links-in-toc.js new file mode 100644 index 0000000000000..8f9a1431c7a4b --- /dev/null +++ b/scripts/verify-internal-links-in-toc.js @@ -0,0 +1,344 @@ +import * as fs from "fs"; +import path from "path"; +import { fileURLToPath } from "url"; +import glob from "glob"; + +import { visit } from "unist-util-visit"; + +import { generateMdAstFromFile } from "./utils.js"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const ROOT = path.resolve(__dirname, ".."); + +const SPECIAL_IMPLICIT_TARGETS = new Set(["_index.md", "_docHome.md"]); +const EXCLUDED_TOC_FILES = new Set(["TOC-pingkai.md"]); + +const CLOUD_TOC_FILES = [ + "TOC-tidb-cloud.md", + "TOC-tidb-cloud-premium.md", + "TOC-tidb-cloud-starter.md", + "TOC-tidb-cloud-essential.md", +]; + +const PREFIX_TO_TOC = [ + { prefix: "ai/", toc: "TOC-ai.md" }, + { prefix: "api/", toc: "TOC-api.md" }, + { prefix: "develop/", toc: "TOC-develop.md" }, + { prefix: "releases/", toc: "TOC-tidb-releases.md" }, + { prefix: "tidb-cloud/releases/", toc: "TOC-tidb-cloud-releases.md" }, + { prefix: "best-practices/", toc: "TOC-best-practices.md" }, +]; + +const sortByLocale = (left, right) => left.localeCompare(right); + +function isExternalUrl(url = "") { + return ( + url.startsWith("//") || url.includes("://") || url.startsWith("mailto:") + ); +} + +function stripQueryAndHash(url = "") { + const q = url.split("?")[0]; + const [p, hash] = q.split("#"); + return { path: p, hash: hash || "" }; +} + +function isInternalDocLink(url = "") { + if (!url) return false; + if (isExternalUrl(url)) return false; + if (!url.startsWith("/")) return false; + if (url.startsWith("/media/")) return false; + const { path: p } = stripQueryAndHash(url); + return p.endsWith(".md") || p.endsWith(".mdx"); +} + +function toTargetRel(url = "") { + return stripQueryAndHash(url).path.replace(/^\/+/, ""); +} + +function extractInternalDocTargetsFromUrls(urls = []) { + return urls + .filter((url) => isInternalDocLink(url)) + .map((url) => toTargetRel(url)); +} + +function extractInternalDocTargetsFromMarkdownFile(absPath) { + return extractInternalDocTargetsFromUrls(extractUrlsFromMarkdownFile(absPath)); +} + +function sortedValues(values = []) { + return [...values].sort(sortByLocale); +} + +function extractUrlsFromMarkdownFile(absPath) { + const buf = fs.readFileSync(absPath); + const ast = generateMdAstFromFile(buf); + const urls = []; + visit(ast, ["link", "definition"], (node) => { + if (typeof node.url === "string" && node.url.trim()) { + urls.push(node.url.trim()); + } + }); + return urls; +} + +function readTocFiles() { + const tocFiles = glob + .sync("TOC*.md", { cwd: ROOT, nodir: true }) + .filter((f) => !EXCLUDED_TOC_FILES.has(f)) + .sort(sortByLocale); + return tocFiles; +} + +function buildTocIndex(tocFiles) { + const tocToPages = new Map(); // tocFile -> Set(relPathWithoutLeadingSlash) + const anyTocPages = new Set(); + const pageToTocs = new Map(); // pageRel -> Set(tocFile) + + tocFiles.forEach((toc) => { + const tocAbs = path.join(ROOT, toc); + const pages = new Set(); + + extractInternalDocTargetsFromMarkdownFile(tocAbs).forEach((rel) => { + pages.add(rel); + anyTocPages.add(rel); + + const tocs = pageToTocs.get(rel) || new Set(); + tocs.add(toc); + pageToTocs.set(rel, tocs); + }); + + tocToPages.set(toc, pages); + }); + + const cloudTocPages = new Set( + CLOUD_TOC_FILES.flatMap((toc) => [...(tocToPages.get(toc) || new Set())]) + ); + + return { tocToPages, anyTocPages, pageToTocs, cloudTocPages }; +} + +function expectedSetForTarget(targetRel, tocToPages, anyTocPages, cloudTocPages) { + if ( + targetRel === "_index.md" || + targetRel.endsWith("/_index.md") || + targetRel === "_docHome.md" || + targetRel.endsWith("/_docHome.md") + ) { + return { ok: true }; + } + + if ( + targetRel.startsWith("tidb-cloud/") && + !targetRel.startsWith("tidb-cloud/releases/") + ) { + return { + ok: cloudTocPages.has(targetRel), + expectedLabel: "any TiDB Cloud TOC", + }; + } + + const matchedPrefix = PREFIX_TO_TOC.find(({ prefix }) => + targetRel.startsWith(prefix) + ); + if (matchedPrefix) { + const set = tocToPages.get(matchedPrefix.toc) || new Set(); + return { ok: set.has(targetRel), expectedLabel: matchedPrefix.toc }; + } + + // Default: the target appears in any TOC*.md + return { ok: anyTocPages.has(targetRel), expectedLabel: "any TOC*.md" }; +} + +function main() { + process.chdir(ROOT); + + const verbose = + process.env.VERBOSE_TOC === "1" || + process.env.VERBOSE_TOC === "true" || + process.env.VERBOSE === "1" || + process.env.VERBOSE === "true"; + const maxMissing = + Number.parseInt(process.env.TOC_MAX_MISSING || "", 10) || 50; + const maxFiles = + Number.parseInt(process.env.TOC_MAX_FILES || "", 10) || 30; + const maxLinksPerFile = + Number.parseInt(process.env.TOC_MAX_LINKS_PER_FILE || "", 10) || 10; + + const tocFiles = readTocFiles(); + if (tocFiles.length === 0) { + console.error("TOC check error: no TOC*.md files found in repo root."); + process.exit(1); + } + + const { tocToPages, anyTocPages, pageToTocs, cloudTocPages } = + buildTocIndex(tocFiles); + const buildScopePages = sortedValues(anyTocPages); + + const missingScopePages = []; + const violations = []; + + buildScopePages.forEach((sourceRel) => { + const sourceAbs = path.join(ROOT, sourceRel); + if (!fs.existsSync(sourceAbs)) { + missingScopePages.push(sourceRel); + return; + } + + extractInternalDocTargetsFromMarkdownFile(sourceAbs) + .filter((targetRel) => + !SPECIAL_IMPLICIT_TARGETS.has(path.basename(targetRel)) + ) + .forEach((targetRel) => { + const { ok, expectedLabel } = expectedSetForTarget( + targetRel, + tocToPages, + anyTocPages, + cloudTocPages + ); + if (!ok) { + violations.push({ + sourceRel, + targetRel, + expectedLabel, + }); + } + }); + }); + + if (missingScopePages.length > 0) { + // Printed below in a grouped summary. + } + + if (violations.length > 0) { + // Printed below in a grouped summary. + } + + if (missingScopePages.length > 0 || violations.length > 0) { + const byTarget = violations.reduce((groupedMap, violation) => { + const current = groupedMap.get(violation.targetRel) || { + targetRel: violation.targetRel, + expectedLabel: violation.expectedLabel, + sourceFiles: new Set(), + }; + current.sourceFiles.add(violation.sourceRel); + groupedMap.set(violation.targetRel, current); + return groupedMap; + }, new Map()); + + console.error("TOC check report: FAILED"); + console.error( + `- Scope: pages included by TOC*.md (excluding: ${[ + ...EXCLUDED_TOC_FILES, + ].join(", ") || "(none)"})` + ); + console.error(`- In-scope pages: ${buildScopePages.length}`); + console.error( + `- Missing in-scope pages (referenced by TOC but not on disk): ${missingScopePages.length}` + ); + console.error( + `- TOC membership violations: ${violations.length} links across ${byTarget.size} targets` + ); + console.error(""); + + if (missingScopePages.length > 0) { + console.error( + `=== Missing pages referenced by TOC*.md (${missingScopePages.length}) ===` + ); + console.error(""); + missingScopePages.slice(0, maxMissing).forEach((p) => { + const referencedBy = sortedValues(pageToTocs.get(p) || new Set()); + if (referencedBy.length > 0) { + console.error(`- ${p}`); + console.error(` referenced by: ${referencedBy.join(", ")}`); + } else { + console.error(`- ${p}`); + } + }); + if (!verbose && missingScopePages.length > maxMissing) { + console.error( + `- ... and ${missingScopePages.length - maxMissing} more (set TOC_MAX_MISSING or VERBOSE_TOC=1 to show more)` + ); + } + console.error(""); + } + + if (violations.length > 0) { + console.error(`=== TOC membership violations (grouped by target) ===`); + console.error(""); + + const targets = [...byTarget.values()].sort((a, b) => { + const diff = b.sourceFiles.size - a.sourceFiles.size; + if (diff !== 0) return diff; + return sortByLocale(a.targetRel, b.targetRel); + }); + const shownTargets = verbose ? targets : targets.slice(0, maxFiles); + + shownTargets.forEach((item, index) => { + const targetUrl = `/${item.targetRel}`; + const targetTocs = sortedValues( + pageToTocs.get(item.targetRel) || new Set() + ); + const sourceFiles = sortedValues(item.sourceFiles); + + console.error(`Target: ${targetUrl}`); + if (targetTocs.length === 0) { + if (item.expectedLabel === "any TOC*.md") { + console.error(`Issue: Missing from TOC index`); + console.error(`Expected TOC: any TOC*.md`); + } else { + console.error(`Issue: Missing from TOC index`); + console.error(`Expected TOC: ${item.expectedLabel}`); + } + } else { + console.error(`Issue: TOC mismatch`); + console.error(`Current TOC: ${targetTocs.join(", ")}`); + console.error(`Expected TOC: ${item.expectedLabel}`); + } + + console.error(""); + console.error(`Referenced in (${sourceFiles.length}):`); + const shownFiles = verbose + ? sourceFiles + : sourceFiles.slice(0, maxLinksPerFile); + shownFiles.forEach((sourceRel) => { + console.error(` - ${sourceRel}`); + }); + if (!verbose && sourceFiles.length > maxLinksPerFile) { + console.error( + ` - ... and ${sourceFiles.length - maxLinksPerFile} more (set TOC_MAX_LINKS_PER_FILE or VERBOSE_TOC=1)` + ); + } + + if (index < shownTargets.length - 1) { + console.error(""); + } + }); + + if (!verbose && targets.length > maxFiles) { + console.error(""); + console.error( + `... and ${targets.length - maxFiles} more target links (set TOC_MAX_FILES or VERBOSE_TOC=1)` + ); + } + + console.error("=== How to fix ==="); + console.error( + "- If the target page should be part of the site, add it to the expected TOC (per folder mapping)." + ); + console.error( + "- Otherwise, update the link to point to an in-scope page that is included by TOC." + ); + console.error(""); + } + + process.exit(1); + } + + console.log( + `TOC check report: OK. Checked ${buildScopePages.length} in-scope pages (from TOC*.md) and found no TOC membership violations.` + ); +} + +main(); diff --git a/scripts/verify-link-anchors.sh b/scripts/verify-link-anchors.sh index 77faf1fdcd30c..469e68b505ff8 100755 --- a/scripts/verify-link-anchors.sh +++ b/scripts/verify-link-anchors.sh @@ -7,8 +7,24 @@ ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) cd $ROOT -npm install -g remark-cli@9.0.0 remark-lint@8.0.0 breeswish/remark-lint-pingcap-docs-anchor +REMARK_CMD=() +if [ -x "$ROOT/node_modules/.bin/remark" ]; then + # Prefer the repo-local pinned version (installed by `npm ci`). + REMARK_CMD=("$ROOT/node_modules/.bin/remark") +elif command -v remark >/dev/null 2>&1; then + # Fall back to a globally-installed version (less reproducible). + REMARK_CMD=(remark) +else + REMARK_CMD=(npx --no-install remark) +fi echo "info: checking links anchors under $ROOT directory..." -remark --ignore-path .gitignore -u lint -u remark-lint-pingcap-docs-anchor . --frail --quiet +"${REMARK_CMD[@]}" \ + --ignore-path .gitignore \ + --ignore-pattern '.*/**' \ + -u lint \ + -u @breeswish-org/remark-lint-pingcap-docs-anchor \ + . \ + --frail \ + --quiet diff --git a/scripts/verify-links.sh b/scripts/verify-links.sh index 119eb1fec53be..1cb3d7b156ac4 100755 --- a/scripts/verify-links.sh +++ b/scripts/verify-links.sh @@ -15,7 +15,17 @@ ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) cd $ROOT -npm install -g markdown-link-check@3.8.1 +MLC_CMD=() +if [ -x "$ROOT/node_modules/.bin/markdown-link-check" ]; then + # Prefer the repo-local pinned version (installed by `npm ci`). + MLC_CMD=("$ROOT/node_modules/.bin/markdown-link-check") +elif command -v markdown-link-check >/dev/null 2>&1; then + # Fall back to a globally-installed version (less reproducible). + MLC_CMD=(markdown-link-check) +else + # As a last resort, rely on `npx` without installing. + MLC_CMD=(npx --no-install markdown-link-check) +fi VERBOSE=${VERBOSE:-} CONFIG_TMP=$(mktemp) @@ -49,7 +59,7 @@ fi while read -r tasks; do for task in $tasks; do ( - output=$(markdown-link-check --config "$CONFIG_TMP" "$task" -q) + output=$("${MLC_CMD[@]}" --config "$CONFIG_TMP" "$task" -q) if [ $? -ne 0 ]; then printf "$output" >> $ERROR_REPORT fi @@ -59,7 +69,7 @@ while read -r tasks; do ) & done wait -done <<<"$(find "." -type f -not -path './node_modules/*' -name '*.md' | xargs -n 10)" +done <<<"$(find "." -type f -name '*.md' -not -path './node_modules/*' -not -path './tmp/*' -not -path './.*/*' | xargs -n 10)" error_files=$(cat $ERROR_REPORT | grep 'FILE: ' | wc -l) error_output=$(cat $ERROR_REPORT) diff --git a/sql-statements/sql-statement-admin-show-ddl.md b/sql-statements/sql-statement-admin-show-ddl.md index c62b7b558e2b1..7ce45927da1cf 100644 --- a/sql-statements/sql-statement-admin-show-ddl.md +++ b/sql-statements/sql-statement-admin-show-ddl.md @@ -69,7 +69,7 @@ The `ADMIN SHOW DDL JOBS` statement is used to view the 10 jobs in the current D - `add index`: for [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) operations. - `SCHEMA_STATE`: the current state of the schema object that the DDL operates on. If `JOB_TYPE` is `ADD INDEX`, it is the state of the index; if `JOB_TYPE` is `ADD COLUMN`, it is the state of the column; if `JOB_TYPE` is `CREATE TABLE`, it is the state of the table. Common states include the following: - `none`: indicates that it does not exist. Generally, after the `DROP` operation or after the `CREATE` operation fails and rolls back, it will become the `none` state. - - `delete only`, `write only`, `delete reorganization`, `write reorganization`: these four states are intermediate states. For their specific meanings, see [How the Online DDL Asynchronous Change Works in TiDB](/ddl-introduction.md#how-the-online-ddl-asynchronous-change-works-in-tidb). As the intermediate state conversion is fast, these states are generally not visible during operation. Only when performing `ADD INDEX` operation can the `write reorganization` state be seen, indicating that index data is being added. + - `delete only`, `write only`, `delete reorganization`, `write reorganization`: these four states are intermediate states. For their specific meanings, see [How the Online DDL Asynchronous Change Works in TiDB](/best-practices/ddl-introduction.md#how-the-online-ddl-asynchronous-change-works-in-tidb). As the intermediate state conversion is fast, these states are generally not visible during operation. Only when performing `ADD INDEX` operation can the `write reorganization` state be seen, indicating that index data is being added. - `public`: indicates that it exists and is available to users. Generally, after `CREATE TABLE` and `ADD INDEX` (or `ADD COLUMN`) operations are completed, it will become the `public` state, indicating that the newly created table, column, and index can be read and written normally. - `SCHEMA_ID`: the ID of the database where the DDL operation is performed. - `TABLE_ID`: the ID of the table where the DDL operation is performed. @@ -247,7 +247,7 @@ This statement is a TiDB extension to MySQL syntax. ## See also -* [DDL introduction](/ddl-introduction.md) +* [DDL introduction](/best-practices/ddl-introduction.md) * [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) * [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) * [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) diff --git a/system-variable-reference.md b/system-variable-reference.md index a38f2e6d53898..ef41aa77a6c3c 100644 --- a/system-variable-reference.md +++ b/system-variable-reference.md @@ -438,7 +438,7 @@ Referenced in: Referenced in: -- [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) +- [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) - [Limited SQL features on TiDB Cloud](https://docs.pingcap.com/tidbcloud/limited-sql-features) - [System Variables](/system-variables.md#interactive_timeout) - [TiDB Cluster Management FAQs](/faq/manage-cluster-faq.md) @@ -525,7 +525,7 @@ Referenced in: Referenced in: -- [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) +- [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - [Optimizer Hints](/optimizer-hints.md) - [SQL Plan Management (SPM)](/sql-plan-management.md) @@ -730,11 +730,10 @@ Referenced in: - [Connect to TiDB with PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) - [Connect to TiDB with SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) - [Connect to TiDB with peewee](/develop/dev-guide-sample-application-python-peewee.md) -- [Get Started with TiDB + AI via Python](/vector-search/vector-search-get-started-using-python.md) -- [Integrate TiDB Vector Search with Jina AI Embeddings API](/vector-search/vector-search-integrate-with-jinaai-embedding.md) -- [Integrate TiDB Vector Search with SQLAlchemy](/vector-search/vector-search-integrate-with-sqlalchemy.md) -- [Integrate Vector Search with LangChain](/vector-search/vector-search-integrate-with-langchain.md) -- [Integrate Vector Search with LlamaIndex](/vector-search/vector-search-integrate-with-llamaindex.md) +- [Integrate TiDB Vector Search with Jina AI Embeddings API](/ai/integrations/vector-search-integrate-with-jinaai-embedding.md) +- [Integrate TiDB Vector Search with SQLAlchemy](/ai/integrations/vector-search-integrate-with-sqlalchemy.md) +- [Integrate Vector Search with LangChain](/ai/integrations/vector-search-integrate-with-langchain.md) +- [Integrate Vector Search with LlamaIndex](/ai/integrations/vector-search-integrate-with-llamaindex.md) - [System Variables](/system-variables.md#ssl_ca) ### ssl_cert @@ -1221,7 +1220,7 @@ Referenced in: - [ADMIN ALTER DDL JOBS](/sql-statements/sql-statement-admin-alter-ddl.md) - [ADMIN SHOW DDL [JOBS|JOB QUERIES]](/sql-statements/sql-statement-admin-show-ddl.md) -- [Best Practices for DDL Execution in TiDB](/ddl-introduction.md) +- [Best Practices for DDL Execution in TiDB](/best-practices/ddl-introduction.md) - [CREATE INDEX](/sql-statements/sql-statement-create-index.md) - [Limited SQL features on TiDB Cloud](https://docs.pingcap.com/tidbcloud/limited-sql-features) - [Performance Tuning Best Practices](/develop/dev-guide-optimize-sql-best-practices.md) @@ -1266,7 +1265,7 @@ Referenced in: - [ADMIN ALTER DDL JOBS](/sql-statements/sql-statement-admin-alter-ddl.md) - [ADMIN SHOW DDL [JOBS|JOB QUERIES]](/sql-statements/sql-statement-admin-show-ddl.md) -- [Best Practices for DDL Execution in TiDB](/ddl-introduction.md) +- [Best Practices for DDL Execution in TiDB](/best-practices/ddl-introduction.md) - [CREATE INDEX](/sql-statements/sql-statement-create-index.md) - [Limited SQL features on TiDB Cloud](https://docs.pingcap.com/tidbcloud/limited-sql-features) - [Performance Tuning Best Practices](/develop/dev-guide-optimize-sql-best-practices.md) @@ -1689,7 +1688,7 @@ Referenced in: Referenced in: -- [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) +- [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - [System Variables](/system-variables.md#tidb_enable_lazy_cursor_fetch-new-in-v830) - [TiDB 8.3.0 Release Notes](/releases/release-8.3.0.md) @@ -2052,7 +2051,7 @@ Referenced in: Referenced in: -- [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) +- [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - [Enable Encryption for Disk Spill](/enable-disk-spill-encrypt.md) - [Explain Statements That Use Joins](/explain-joins.md) @@ -3545,7 +3544,8 @@ Referenced in: - [Optimizer Hints](/optimizer-hints.md) - [SHOW [GLOBAL|SESSION] VARIABLES](/sql-statements/sql-statement-show-variables.md) - [System Variables](/system-variables.md#tidb_replica_read-new-in-v40) -- [TiDB Best Practices on Public Cloud](/best-practices-on-public-cloud.md) +- [TiDB Best Practices on Public Cloud](/best-practices/best-practices-on-public-cloud.md) +- [TiDB 8.5.4 Release Notes](/releases/release-8.5.4.md) - [TiDB 8.5.3 Release Notes](/releases/release-8.5.3.md) - [TiDB 7.0.0 Release Notes](/releases/release-7.0.0.md) - [TiDB 6.6.0 Release Notes](/releases/release-6.6.0.md) @@ -4089,7 +4089,7 @@ Referenced in: - [Limited SQL features on TiDB Cloud](https://docs.pingcap.com/tidbcloud/limited-sql-features) - [System Variables](/system-variables.md#tidb_tso_client_batch_max_wait_time-new-in-v530) -- [TiDB Best Practices on Public Cloud](/best-practices-on-public-cloud.md) +- [TiDB Best Practices on Public Cloud](/best-practices/best-practices-on-public-cloud.md) - [TiDB 5.3 Release Notes](/releases/release-5.3.0.md) ### tidb_tso_client_rpc_mode @@ -4512,7 +4512,7 @@ Referenced in: Referenced in: -- [Best Practices for Developing Java Applications with TiDB](/best-practices/java-app-best-practices.md) +- [Best Practices for Developing Java Applications with TiDB](/develop/java-app-best-practices.md) - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) - [Limited SQL features on TiDB Cloud](https://docs.pingcap.com/tidbcloud/limited-sql-features) - [System Variables](/system-variables.md#wait_timeout) diff --git a/telemetry.md b/telemetry.md index 081b37db0a7ba..66e5e5e6b3a12 100644 --- a/telemetry.md +++ b/telemetry.md @@ -17,7 +17,7 @@ When the telemetry feature is enabled, TiUP and TiSpark collect usage informatio ## What is shared when telemetry is enabled? -The following sections describe the shared usage information in detail for TiUP and TiSpark. The usage details that get shared might change over time. These changes (if any) will be announced in [release notes](/releases/release-notes.md). +The following sections describe the shared usage information in detail for TiUP and TiSpark. The usage details that get shared might change over time. These changes (if any) will be announced in [release notes](/releases/_index.md). > **Note:** > diff --git a/ticdc/ticdc-compatibility.md b/ticdc/ticdc-compatibility.md index 112c0990f8dbb..1857238e04f41 100644 --- a/ticdc/ticdc-compatibility.md +++ b/ticdc/ticdc-compatibility.md @@ -85,7 +85,7 @@ If the upstream cluster contains a global temporary table, the downstream TiDB c ### Compatibility with vector data types -Starting from v8.4.0, TiCDC supports replicating tables with [vector data types](/vector-search/vector-search-data-types.md) to downstream (experimental). +Starting from v8.4.0, TiCDC supports replicating tables with [vector data types](/ai/reference/vector-search-data-types.md) to downstream (experimental). When the downstream is Kafka or a storage service (such as Amazon S3, GCS, Azure Blob Storage, or NFS), TiCDC converts vector data types into string types before writing to the downstream. diff --git a/tidb-cloud/ai-feature-concepts.md b/tidb-cloud/ai-feature-concepts.md index 7a573482168a2..48a60ef7a9ea6 100644 --- a/tidb-cloud/ai-feature-concepts.md +++ b/tidb-cloud/ai-feature-concepts.md @@ -13,7 +13,7 @@ This document highlights these AI features and how they enhance the TiDB experie Chat2Query is an AI-powered feature integrated into SQL Editor that assists users in generating, debugging, or rewriting SQL queries using natural language instructions. For more information, see [Explore your data with AI-assisted SQL Editor](/tidb-cloud/explore-data-with-chat2query.md). -In addition, TiDB Cloud provides a Chat2Query API for TiDB Cloud Serverless clusters. After it is enabled, TiDB Cloud will automatically create a system Data App called Chat2Query and a Chat2Data endpoint in Data Service. You can call this endpoint to let AI generate and execute SQL statements by providing instructions. For more information, see [Get started with Chat2Query API](/tidb-cloud/use-chat2query-api.md). +In addition, TiDB Cloud provides a Chat2Query API for {{{ .starter }}} clusters. After it is enabled, TiDB Cloud will automatically create a system Data App called Chat2Query and a Chat2Data endpoint in Data Service. You can call this endpoint to let AI generate and execute SQL statements by providing instructions. For more information, see [Get started with Chat2Query API](/tidb-cloud/use-chat2query-api.md). ## Vector search (Beta) @@ -23,7 +23,7 @@ Unlike traditional full-text search, which relies on exact keyword matching and Even when the search terms do not exactly match the content in the database, vector search can still provide results that align with the user's intent by analyzing the semantics of the data. For example, a full-text search for "a swimming animal" only returns results containing these exact keywords. In contrast, vector search can return results for other swimming animals, such as fish or ducks, even if these results do not contain the exact keywords. -For more information, see [Vector Search (Beta) Overview](/vector-search/vector-search-overview.md). +For more information, see [Vector Search (Beta) Overview](/ai/concepts/vector-search-overview.md). ## AI integrations @@ -31,13 +31,13 @@ For more information, see [Vector Search (Beta) Overview](/vector-search/vector- TiDB provides official support for several popular AI frameworks, enabling you to easily integrate AI applications developed based on these frameworks with TiDB Vector Search. -For a list of supported AI frameworks, see [Vector Search Integration Overview](/vector-search/vector-search-integration-overview.md#ai-frameworks). +For a list of supported AI frameworks, see [Vector Search Integration Overview](/ai/integrations/vector-search-integration-overview.md#ai-frameworks). ### Embedding models and services A vector embedding, also known as an embedding, is a sequence of numbers that represents real-world objects in a high-dimensional space. It captures the meaning and context of unstructured data, such as documents, images, audio, and videos. -Embedding models are algorithms that transform data into [vector embeddings](/vector-search/vector-search-overview.md#vector-embedding). The choice of an appropriate embedding model is crucial for ensuring the accuracy and relevance of semantic search results. +Embedding models are algorithms that transform data into [vector embeddings](/ai/concepts/vector-search-overview.md#vector-embedding). The choice of an appropriate embedding model is crucial for ensuring the accuracy and relevance of semantic search results. TiDB Vector Search supports storing vectors of up to 16383 dimensions, which accommodates most embedding models. For unstructured text data, you can find top-performing text embedding models on the [Massive Text Embedding Benchmark (MTEB) Leaderboard](https://huggingface.co/spaces/mteb/leaderboard). @@ -47,4 +47,4 @@ Object Relational Mapping (ORM) libraries are tools that facilitate the interact TiDB lets you integrate vector search with ORM libraries to manage vector data alongside traditional relational data. This integration is particularly useful for applications that need to store and query vector embeddings generated by AI models. By using ORM libraries, developers can seamlessly interact with vector data stored in TiDB, leveraging the database's capabilities to perform complex vector operations like nearest neighbor search. -For a list of supported ORM libraries, see [Vector Search Integration Overview](/vector-search/vector-search-integration-overview.md#object-relational-mapping-orm-libraries). \ No newline at end of file +For a list of supported ORM libraries, see [Vector Search Integration Overview](/ai/integrations/vector-search-integration-overview.md#object-relational-mapping-orm-libraries). \ No newline at end of file diff --git a/tidb-cloud/api-overview.md b/tidb-cloud/api-overview.md deleted file mode 100644 index 48d316a12b4da..0000000000000 --- a/tidb-cloud/api-overview.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: TiDB Cloud API Overview -summary: Learn about what is TiDB Cloud API, its features, and how to use API to manage your TiDB Cloud clusters. ---- - -# TiDB Cloud API Overview (Beta) - -> **Note:** -> -> TiDB Cloud API is in beta. - -The TiDB Cloud API is a [REST interface](https://en.wikipedia.org/wiki/Representational_state_transfer) that provides you with programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can automatically and efficiently manage resources such as Projects, Clusters, Backups, Restores, Imports, Billings, and resources in the [Data Service](/tidb-cloud/data-service-overview.md). - -The API has the following features: - -- **JSON entities.** All entities are expressed in JSON. -- **HTTPS-only.** You can only access the API via HTTPS, ensuring all the data sent over the network is encrypted with TLS. -- **Key-based access and digest authentication.** Before you access TiDB Cloud API, you must generate an API key, refer to [API Key Management](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication/API-key-management). All requests are authenticated through [HTTP Digest Authentication](https://en.wikipedia.org/wiki/Digest_access_authentication), ensuring the API key is never sent over the network. - -To start using TiDB Cloud API, refer to the following resources in TiDB Cloud API Documentation: - -- [Get Started](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Get-Started) -- [Authentication](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication) -- [Rate Limiting](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Rate-Limiting) -- API Full References - - v1beta1 - - [Billing](https://docs.pingcap.com/tidbcloud/api/v1beta1/billing) - - [Data Service](https://docs.pingcap.com/tidbcloud/api/v1beta1/dataservice) - - [IAM](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam) - - [MSP (Deprecated)](https://docs.pingcap.com/tidbcloud/api/v1beta1/msp) - - [v1beta](https://docs.pingcap.com/tidbcloud/api/v1beta#tag/Project) -- [Changelog](https://docs.pingcap.com/tidbcloud/api/v1beta#section/API-Changelog) diff --git a/tidb-cloud/architecture-concepts.md b/tidb-cloud/architecture-concepts.md index b7112b8df3571..3cd1090afd695 100644 --- a/tidb-cloud/architecture-concepts.md +++ b/tidb-cloud/architecture-concepts.md @@ -5,23 +5,62 @@ summary: Learn about architecture concepts for TiDB Cloud. # Architecture -TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings the flexibility and power of [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source HTAP (Hybrid Transactional and Analytical Processing) database, to AWS, Azure, and Google Cloud. + + +TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings the flexibility and power of [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source HTAP (Hybrid Transactional and Analytical Processing) database, to Amazon Web Services (AWS), Google Cloud, Microsoft Azure, and Alibaba Cloud. + + + + + +TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings the flexibility and power of [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source HTAP (Hybrid Transactional and Analytical Processing) database, to Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. + + TiDB is MySQL-compatible, making it easy to migrate and work with existing applications, while offering seamless scalability to handle everything from small workloads to massive, high-performance clusters. It supports both transactional (OLTP) and analytical (OLAP) workloads in one system, simplifying operations and enabling real-time insights. -TiDB Cloud provides two deployment options: **TiDB Cloud** **Serverless**, for auto-scaling, cost-efficient workloads, and **TiDB Cloud Dedicated**, for enterprise-grade applications with dedicated resources and advanced capabilities. TiDB Cloud makes it easy to scale your database, handle complex management tasks, and stay focused on developing reliable, high-performing applications. +TiDB Cloud makes it easy to scale your database, handle complex management tasks, and stay focused on developing reliable, high-performing applications. + + + +- For AWS, TiDB Cloud provides **{{{ .starter }}}** for auto-scaling, cost-efficient workloads, **{{{ .essential }}}** for production-ready workloads with provisioned capacity, and **{{{ .dedicated }}}** for enterprise-grade applications with dedicated resources and advanced capabilities. +- For Google Cloud and Azure, TiDB Cloud provides **{{{ .dedicated }}}** for enterprise-grade applications with dedicated resources and advanced capabilities. +- For Alibaba Cloud, TiDB Cloud provides **{{{ .starter }}}** for auto-scaling, cost-efficient workloads and **{{{ .essential }}}** for production-ready workloads with provisioned capacity. + + + + + +- For AWS, TiDB Cloud provides **{{{ .starter }}}** for auto-scaling, cost-efficient workloads, **{{{ .essential }}}** for production-ready workloads with provisioned capacity, and **{{{ .dedicated }}}** for enterprise-grade applications with dedicated resources and advanced capabilities. +- For Google Cloud and Azure, TiDB Cloud provides **{{{ .dedicated }}}** for enterprise-grade applications with dedicated resources and advanced capabilities. + + + +## {{{ .starter }}} + +{{{ .starter }}} is a fully managed, multi-tenant TiDB offering. It delivers an instant, autoscaling MySQL-compatible database. + +The Starter cluster plan is ideal for those who are getting started with TiDB Cloud. It provides developers and small teams with the following features: + +- **No cost**: This plan is completely free, with no credit card required to get started. +- **Storage**: Provides an initial 5 GiB of row-based storage and 5 GiB of columnar storage. +- **Request Units**: Includes 50 million [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru) for database operations. -## TiDB Cloud Serverless +## {{{ .essential }}} -TiDB Cloud Serverless is a fully managed serverless solution that provides HTAP capabilities similar to traditional TiDB, while offering auto-scaling to alleviate users' burdens related to capacity planning and management complexities. It includes a free tier for basic usage, with consumption-based billing for any usage that exceeds the free limits. TiDB Cloud Serverless offers two types of high availability to address varying operational requirements. +For applications experiencing growing workloads and needing scalability in real time, the Essential cluster plan provides the flexibility and performance to keep pace with your business growth with the following features: -By default, clusters utilizing the Zonal High Availability option have all components located within the same availability zone, which results in lower network latency. +- **Enhanced capabilities**: includes all capabilities of the Starter plan, along with the capacity to handle larger and more complex workloads, as well as advanced security features. +- **Automatic scaling**: automatically adjusts storage and computing resources to efficiently meet changing workload demands. +- **High availability**: built-in fault tolerance and redundancy ensure your applications remain available and resilient, even during infrastructure failures. +- **Predictable pricing**: billed based on storage and Request Capacity Units (RCUs) of the compute resources, offering transparent, usage-based pricing that scales with your needs, so you only pay for what you use without surprises. -![TiDB Cloud Serverless zonal high availability](/media/tidb-cloud/serverless-zonal-high-avaliability-aws.png) +{{{ .essential }}} offers two types of high availability to address varying operational requirements. -For applications that require maximum infrastructure isolation and redundancy, the Regional High Availability option distributes nodes across multiple availability zones. +- By default, clusters utilizing the Zonal High Availability option have all components located within the same availability zone, which results in lower network latency. +- For applications that require maximum infrastructure isolation and redundancy, the Regional High Availability option distributes nodes across multiple availability zones. -![TiDB Cloud Serverless regional high availability](/media/tidb-cloud/serverless-regional-high-avaliability-aws.png) +For more information, see [High Availability in TiDB Cloud](/tidb-cloud/serverless-high-availability.md). ## TiDB Cloud Dedicated @@ -33,11 +72,11 @@ Built on isolated cloud resources such as VPCs, VMs, managed Kubernetes services ## TiDB Cloud console -The [TiDB Cloud console](https://tidbcloud.com/) is the web-based management interface for both TiDB Cloud Serverless and TiDB Cloud Dedicated. It provides tools to manage clusters, import or migrate data, monitor performance metrics, configure backups, set up security controls, and integrate with other cloud services, all from a single, user-friendly platform. +The [TiDB Cloud console](https://tidbcloud.com/) is the web-based management interface for TiDB Cloud clusters. It provides tools to manage clusters, import or migrate data, monitor performance metrics, configure backups, set up security controls, and integrate with other cloud services, all from a single, user-friendly platform. ## TiDB Cloud CLI (Beta) -The TiDB Cloud CLI, `ticloud`, allows you to manage TiDB Cloud Serverless and TiDB Cloud Dedicated directly from your terminal with simple commands. You can perform tasks such as: +The TiDB Cloud CLI, `ticloud`, allows you to manage TiDB Cloud clusters directly from your terminal with simple commands. You can perform tasks such as: - Creating, deleting, and listing clusters. - Importing data into clusters. @@ -47,16 +86,16 @@ For more information, see [TiDB Cloud CLI Reference](/tidb-cloud/cli-reference.m ## TiDB Cloud API (Beta) -The TiDB Cloud API is a REST-based interface that provides programmatic access to manage resources across TiDB Cloud Serverless and TiDB Cloud Dedicated. It enables automated and efficient handling of tasks such as managing projects, clusters, backups, restores, data imports, billing, and other resources in [TiDB Cloud Data Service](/tidb-cloud/data-service-overview.md). +The TiDB Cloud API is a REST-based interface that provides programmatic access to manage resources across {{{ .starter }}} and TiDB Cloud Dedicated. It enables automated and efficient handling of tasks such as managing projects, clusters, backups, restores, data imports, billing, and other resources in [TiDB Cloud Data Service](/tidb-cloud/data-service-overview.md). -For more information, see [TiDB Cloud API Overview](/tidb-cloud/api-overview.md). +For more information, see [TiDB Cloud API Overview](https://docs.pingcap.com/api/tidb-cloud-api-overview). ## Nodes In TiDB Cloud, each cluster consists of TiDB, TiKV, and TiFlash nodes. - In a TiDB Cloud Dedicated cluster, you can fully manage the number and size of your dedicated TiDB, TiKV, and TiFlash nodes according to your performance requirements. For more information, see [Scalability](/tidb-cloud/scalability-concepts.md). -- In a TiDB Cloud Serverless cluster, the number and size of TiDB, TiKV, and TiFlash nodes are automatically managed. This ensures seamless scaling, eliminating the need for users to handle node configuration or management tasks. +- In a {{{ .starter }}} or {{{ .essential }}} cluster, the number and size of TiDB, TiKV, and TiFlash nodes are automatically managed. This ensures seamless scaling, eliminating the need for users to handle node configuration or management tasks. ### TiDB node diff --git a/tidb-cloud/backup-and-restore-concepts.md b/tidb-cloud/backup-and-restore-concepts.md index 18feb0e0c511c..86ad3e67beeed 100644 --- a/tidb-cloud/backup-and-restore-concepts.md +++ b/tidb-cloud/backup-and-restore-concepts.md @@ -9,11 +9,11 @@ TiDB Cloud Backup & Restore features are designed to safeguard your data and ens ## Automatic backup -For both TiDB Cloud Serverless and TiDB Cloud Dedicated clusters, snapshot backups are taken automatically by default and stored according to your backup retention policy. +For TiDB Cloud clusters, snapshot backups are taken automatically by default and stored according to your backup retention policy. For more information, see the following: -- [Automatic backups for TiDB Cloud Serverless clusters](/tidb-cloud/backup-and-restore-serverless.md#automatic-backups) +- [Automatic backups for {{{ .starter }}} and {{{ .essential }}} clusters](/tidb-cloud/backup-and-restore-serverless.md#automatic-backups) - [Automatic backups for TiDB Cloud Dedicated clusters](/tidb-cloud/backup-and-restore.md#turn-on-auto-backup) ## Manual backup @@ -38,5 +38,6 @@ Point-in-time Restore is a feature that enables you to restore data of any point If you want to perform Point-in-time Restore, note the following: -- For TiDB Cloud Serverless clusters, Point-in-time Restore is available only for scalable clusters and not available for free clusters. For more information, see [Restore mode](/tidb-cloud/backup-and-restore-serverless.md#restore-mode). +- For {{{ .starter }}} clusters, Point-in-time Restore is not available. +- For {{{ .essential }}} clusters, you can restore to any time within the last 30 days. For more information, see [Restore mode](/tidb-cloud/backup-and-restore-serverless.md#restore-mode). - For TiDB Cloud Dedicated clusters, you need to [enable PITR](/tidb-cloud/backup-and-restore.md#turn-on-point-in-time-restore) in advance. diff --git a/tidb-cloud/backup-and-restore-serverless.md b/tidb-cloud/backup-and-restore-serverless.md index a2174fd50183f..789c850247d9b 100644 --- a/tidb-cloud/backup-and-restore-serverless.md +++ b/tidb-cloud/backup-and-restore-serverless.md @@ -1,16 +1,16 @@ --- -title: Back Up and Restore TiDB Cloud Serverless Data -summary: Learn how to back up and restore your TiDB Cloud Serverless cluster. +title: Back Up and Restore {{{ .starter }}} or Essential Data +summary: Learn how to back up and restore your {{{ .starter }}} or {{{ .essential }}} clusters. aliases: ['/tidbcloud/restore-deleted-tidb-cluster'] --- -# Back Up and Restore TiDB Cloud Serverless Data +# Back Up and Restore {{{ .starter }}} or Essential Data -This document describes how to back up and restore your TiDB Cloud Serverless cluster data on TiDB Cloud. +This document describes how to back up and restore your data on {{{ .starter }}} or {{{ .essential }}} clusters. > **Tip:** > -> To learn how to back up and restore TiDB Cloud Dedicated cluster data, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md). +> To learn how to back up and restore data on TiDB Cloud Dedicated clusters, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md). ## View the Backup page @@ -24,30 +24,33 @@ This document describes how to back up and restore your TiDB Cloud Serverless cl ## Automatic backups -TiDB Cloud Serverless automatically backs up your cluster data, allowing you to restore data from a backup snapshot to minimize data loss in the event of a disaster. +TiDB Cloud automatically backs up your cluster data, allowing you to restore data from a backup snapshot to minimize data loss in the event of a disaster. ### Learn about the backup setting -Automatic backup settings vary between free clusters and scalable clusters, as shown in the following table: +Automatic backup settings vary between {{{ .starter }}} clusters and {{{ .essential }}} clusters, as shown in the following table: -| Backup setting | Free clusters | Scalable clusters | -|------------------|--------------|------------------| -| Backup Cycle | Daily | Daily | -| Backup Retention | 1 day | 14 days | -| Backup Time | Fixed time | Configurable | +| Backup setting | {{{ .starter }}} (free) | {{{ .starter }}} (with spending limit > 0) | {{{ .essential }}} | +|------------------|----------------------------|----------------------------|----------------------------| +| Backup Cycle | Daily | Daily | Daily | +| Backup Retention | 1 day | Up to 30 days | Up to 30 days | +| Backup Time | Fixed time | Configurable | Configurable | - **Backup Cycle** is the frequency at which backups are taken. - **Backup Retention** is the duration for which backups are retained. Expired backups cannot be restored. - + + - For a free {{{ .starter }}} cluster, the backup retention is 1 day. + - For a {{{ .starter }}} (with spending limit > 0) or {{{ .essential }}} cluster, you can configure the backup retention to any value between 1 and 30 days. The default retention is 14 days. + - **Backup Time** is the time when the backup starts to be scheduled. Note that the final backup time might fall behind the configured backup time. - - - Free clusters: the backup time is a randomly fixed time. - - Scalable clusters: you can configure the backup time to every half an hour. The default value is a randomly fixed time. + + - For a free {{{ .starter }}} cluster, the backup time is a randomly fixed time. + - For a {{{ .starter }}} (with spending limit > 0) or {{{ .essential }}} cluster, you can configure the backup time to every half an hour. The default value is a randomly fixed time. ### Configure the backup setting -To set the backup time for a scalable cluster, perform the following steps: +To set the backup time for a {{{ .essential }}} cluster, perform the following steps: 1. Navigate to the [**Backup**](#view-the-backup-page) page of your cluster. @@ -59,53 +62,40 @@ To set the backup time for a scalable cluster, perform the following steps: ## Restore -TiDB Cloud Serverless clusters offer restore functionality to help recover data in case of accidental loss or corruption. +TiDB Cloud clusters offer restore functionality to help recover data in case of accidental loss or corruption. ### Restore mode -TiDB Cloud Serverless supports snapshot restore and point-in-time restore for your cluster. +TiDB Cloud supports snapshot restore and point-in-time restore for your cluster. - **Snapshot Restore**: restores your cluster from a specific backup snapshot. - **Point-in-Time Restore (beta)**: restores your cluster to a specific time. - - Free clusters: not supported. - - Scalable clusters: restores to any time within the last 14 days, but not before the cluster creation time or after the current time minus one minute. + - {{{ .starter }}} clusters: not supported. + - {{{ .essential }}} clusters: restores to any time within the backup retention, but not earlier than the cluster creation time or later than one minute before the current time. ### Restore destination -TiDB Cloud Serverless supports restoring in-place and restoring to a new cluster. - -**In-place restore** - -Restore to the current cluster will overwrite existing data. Note the following: - -- Existing connections will be terminated once the restore is started. -- The cluster will be unavailable, and new connections will be blocked during the restore process. -- Restore will affect tables in the `mysql` schema. Any changes to user credentials, permissions, or system variables will be reverted to their state at the backup time. - -**Restore to a new cluster** - -Create and restore to the new cluster. Note the following: - -- User credentials and permissions from the source cluster will not be restored to the new cluster. +TiDB Cloud supports restoring data to a new cluster. ### Restore timeout -The restore process typically completes within a few minutes. If the restore takes longer than three hours, it is automatically canceled. The outcome of a canceled restore depends on the destination: - -- **In-place restore**: the cluster status changes from **Restoring** to **Available**, and the cluster becomes accessible. -- **Restore to a new cluster**: the new cluster is deleted and the source cluster remains unchanged. +The restore process typically completes within a few minutes. If the restore takes longer than three hours, it is automatically canceled and the new cluster is deleted, while the source cluster remains unchanged. If the data is corrupted after a canceled restore and cannot be recovered, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for assistance. ### Perform the restore -To restore your TiDB Cloud Serverless cluster, follow these steps: +> **Note:** +> +> User credentials and permissions from the source cluster will not be restored to the new cluster. + +To restore your data to a new cluster, take the following steps: 1. Navigate to the [**Backup**](#view-the-backup-page) page of your cluster. -2. Click **Restore**. The setting window displays. +2. Click **Restore**. 3. In **Restore Mode**, you can choose to restore from a specific backup or any point in time. @@ -120,7 +110,7 @@ To restore your TiDB Cloud Serverless cluster, follow these steps:
- To restore to a specific point in time for a scalable cluster, take the following steps: + To restore to a specific point in time for a {{{ .essential }}} cluster, take the following steps: 1. Click **Point-in-Time Restore**. 2. Select the date and time you want to restore to. @@ -128,32 +118,18 @@ To restore your TiDB Cloud Serverless cluster, follow these steps:
-4. In **Destination**, you can choose to restore to a new cluster or restore in-place. - - -
- - To restore to a new cluster, take the following steps: - - 1. Click **Restore to a New Cluster**. - 2. Enter a name for the new cluster. - 3. Choose the cluster plan for the new cluster. - 4. If you choose a scalable cluster, set a monthly spending limit, and then configure advanced settings as needed. Otherwise, skip this step. +4. Enter a name for the new cluster. +5. Choose a plan for the new cluster and update the capacity as needed. -
-
- - To restore in-place, click **In-place Restore**. - -
-
+ - If you choose a {{{ .starter }}} cluster and need more resources than the [free quota](/tidb-cloud/select-cluster-tier.md#usage-quota), set a monthly spending limit. + - If you choose a {{{ .essential }}} cluster, set the minimum RCU and maximum RCU, and then configure advanced settings as needed. -5. Click **Restore** to begin the restore process. +6. Click **Restore** to begin the restore process. Once the restore process begins, the cluster status changes to **Restoring**. The cluster will remain unavailable until the restore is complete and the status changes to **Available**. ## Limitations - If a TiFlash replica is enabled, it will be unavailable for a period after the restore, because the data needs to be rebuilt in TiFlash. -- Manual backups are not supported for TiDB Cloud Serverless clusters. +- Manual backups are not supported for {{{ .starter }}} and {{{ .essential }}} clusters. - Clusters with more than 1 TiB of data do not support restoring to new clusters by default. Contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for assistance with larger datasets. diff --git a/tidb-cloud/backup-and-restore.md b/tidb-cloud/backup-and-restore.md index 92cfe6487a8a7..7d2620f538b25 100644 --- a/tidb-cloud/backup-and-restore.md +++ b/tidb-cloud/backup-and-restore.md @@ -10,7 +10,7 @@ This document describes how to back up and restore your TiDB Cloud Dedicated clu > **Tip** > -> To learn how to back up and restore TiDB Cloud Serverless cluster data, see [Back Up and Restore TiDB Cloud Serverless Data](/tidb-cloud/backup-and-restore-serverless.md). +> To learn how to back up and restore data on {{{ .starter }}} or {{{ .essential }}} clusters, see [Back Up and Restore Data on {{{ .starter }}} or Essential](/tidb-cloud/backup-and-restore-serverless.md). ## Limitations @@ -176,6 +176,73 @@ To apply a manual backup to your TiDB Cloud Dedicated cluster, perform the follo 4. Click **Confirm**. Then your cluster data is backed up. +### Export backups + +To export a specific backup to cloud storage, such as Amazon S3 or Google Cloud Storage, follow the steps for your target storage provider. + +> **Note:** +> +> Currently, this feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for the export backups feature" in the **Description** field, and then click **Submit**. + + + +
+ +To export a backup to Amazon S3, perform the following steps: + +1. Navigate to the [**Backup**](#view-the-backup-page) page of your cluster. + +2. Locate the backup you want to export, and then click **...** > **Export** in the **Action** column. + +3. In the **Export Backup to Amazon S3** dialog, enter the **Folder URI** field, and then select a bucket region for the backup bucket. + +4. Click **Generate Command** to view the command for configuring permissions. + + - **With AWS CLI**: + + Execute the generated command on AWS to grant TiDB Cloud access to your Amazon S3 bucket. + + - **With AWS Console**: + + 1. Navigate to the [Amazon S3 console](https://console.aws.amazon.com/s3/). + 2. Open the target bucket details page, and then click the **Permissions** tab. + 3. Scroll to **Bucket policy**, and then click **Edit**. + 4. Copy the policy content from the generated command, and then paste it into the policy editor. + 5. Click **Save changes**. + +5. Click **Export** to start the export process. + +
+ +
+ +To export a backup to Google Cloud Storage, perform the following steps: + +1. Navigate to the [**Backup**](#view-the-backup-page) page of your cluster. + +2. Locate the backup you want to export, and then click **...** > **Export** in the **Action** column. + +3. In the **Export Backup to Google Cloud Storage** dialog, note down the **Google Cloud Service Account ID**, which is required for a later step. + +4. In the [Google Cloud console](https://console.cloud.google.com/), create a custom IAM role with the following permissions. If you use an existing role, verify that it has these permissions. + + - `storage.buckets.get` + - `storage.objects.list` + - `storage.objects.create` + - `storage.objects.delete` + +5. Go to **Cloud Storage** > **Buckets**, select the target bucket, and then click **Permissions** > **Grant Access**. + +6. In **New principals**, enter the **Service Account ID** from step 3, assign the role from step 4, and then click **Save**. + +7. Open the **Configuration** tab, copy the **gsutil URI**, and paste it into the **Export Path** field in the **Export Backup to Google Cloud Storage** dialog. To export to a subdirectory, append a path suffix to the URI. + +8. Click **Export** to start the export process. + +
+ +
+ ### Delete backups #### Delete backup files diff --git a/tidb-cloud/branch-github-integration.md b/tidb-cloud/branch-github-integration.md index c1a36d1a7760b..785592d9cdc1d 100644 --- a/tidb-cloud/branch-github-integration.md +++ b/tidb-cloud/branch-github-integration.md @@ -1,21 +1,21 @@ --- -title: Integrate TiDB Cloud Serverless Branching (Beta) with GitHub -summary: Learn how to integrate the TiDB Cloud Serverless branching feature with GitHub. +title: Integrate TiDB Cloud Branching (Beta) with GitHub +summary: Learn how to integrate the TiDB Cloud Branching feature with GitHub. --- -# Integrate TiDB Cloud Serverless Branching (Beta) with GitHub +# Integrate TiDB Cloud Branching (Beta) with GitHub > **Note:** > -> The integration is built upon [TiDB Cloud Serverless branching](/tidb-cloud/branch-overview.md). Make sure that you are familiar with TiDB Cloud Serverless branching before reading this document. +> The integration is built upon [TiDB Cloud Branching](/tidb-cloud/branch-overview.md). Make sure that you are familiar with TiDB Cloud Branching before reading this document. -If you use GitHub for application development, you can integrate TiDB Cloud Serverless branching into your GitHub CI/CD pipeline, which lets you automatically test your pull requests with branches without affecting the production database. +If you use GitHub for application development, you can integrate TiDB Cloud Branching into your GitHub CI/CD pipeline, which lets you automatically test your pull requests with branches without affecting the production database. -In the integration process, you will be prompted to install the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) GitHub App. The app can automatically manage TiDB Cloud Serverless branches according to pull requests in your GitHub repository. For example, when you create a pull request, the app will create a corresponding branch for your TiDB Cloud Serverless cluster, in which you can work on new features or bug fixes in isolation without affecting the production database. +In the integration process, you will be prompted to install the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) GitHub App. The app can automatically manage branches of your {{{ .starter }}} or {{{ .essential }}} cluster according to pull requests in your GitHub repository. For example, when you create a pull request, the app will create a corresponding branch for your cluster, in which you can work on new features or bug fixes in isolation without affecting the production database. This document covers the following topics: -1. How to integrate TiDB Cloud Serverless branching with GitHub +1. How to integrate TiDB Cloud Branching with GitHub 2. How does the TiDB Cloud Branching app work 3. How to build a branching-based CI workflow to test every pull request using branches rather than the production cluster @@ -25,13 +25,13 @@ Before the integration, make sure that you have the following: - A GitHub account - A GitHub repository for your application -- A [TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) +- A [{{{ .starter }}} or {{{ .essential }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md) -## Integrate TiDB Cloud Serverless branching with your GitHub repository +## Integrate TiDB Cloud Branching with your GitHub repository {#integrate-branching-with-your-github-repository} -To integrate TiDB Cloud Serverless branching with your GitHub repository, take the following steps: +To integrate TiDB Cloud Branching with your GitHub repository, take the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. @@ -48,18 +48,18 @@ To integrate TiDB Cloud Serverless branching with your GitHub repository, take t 5. Select your target repository in the **GitHub Repository** drop-down list. If the list is long, you can search the repository by typing the name. -6. Click **Connect** to connect between your TiDB Cloud Serverless cluster and your GitHub repository. +6. Click **Connect** to connect between your cluster and your GitHub repository. ## TiDB Cloud Branching app behaviors -After you connect your TiDB Cloud Serverless cluster to your GitHub repository, for each pull request in this repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) GitHub App can automatically manage its corresponding TiDB Cloud Serverless branch. The following lists the default behaviors for pull request changes: +After you connect your {{{ .starter }}} or {{{ .essential }}} cluster to your GitHub repository, for each pull request in this repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) GitHub App can automatically manage its corresponding branch for your cluster. The following lists the default behaviors for pull request changes: | Pull request changes | TiDB Cloud Branching app behaviors | |------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Create a pull request | When you create a pull request in the repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app creates a branch for your TiDB Cloud Serverless cluster. When `branch.mode` is set to `reset`, the branch name follows the `${github_branch_name}_${pr_id}` format. When `branch.mode` is set to `reserve`, the branch name follows the `${github_branch_name}_${pr_id}_${commit_sha}` format. Note that the number of branches has a [limit](/tidb-cloud/branch-overview.md#limitations-and-quotas). | -| Push new commits to a pull request | When `branch.mode` is set to `reset`, every time you push a new commit to a pull request in the repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app resets the TiDB Cloud Serverless branch. When `branch.mode` is set to `reserve`, the app creates a new branch for the latest commit. | +| Create a pull request | When you create a pull request in the repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app creates a branch for your cluster. When `branch.mode` is set to `reset`, the branch name follows the `${github_branch_name}_${pr_id}` format. When `branch.mode` is set to `reserve`, the branch name follows the `${github_branch_name}_${pr_id}_${commit_sha}` format. Note that the number of branches has a [limit](/tidb-cloud/branch-overview.md#limitations-and-quotas). | +| Push new commits to a pull request | When `branch.mode` is set to `reset`, every time you push a new commit to a pull request in the repository, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app resets the branch. When `branch.mode` is set to `reserve`, the app creates a new branch for the latest commit. | | Close or merge a pull request | When you close or merge a pull request, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app deletes the branch for this pull request. | | Reopen a pull request | When you reopen a pull request, the [TiDB Cloud Branching](https://github.com/apps/tidb-cloud-branching) app creates a branch for the lasted commit of the pull request. | @@ -113,7 +113,7 @@ github: **Type:** boolean. **Default:** `true`. -If it is set to `false`, the TiDB Cloud Branching app will not delete the TiDB Cloud Serverless branch when a pull request is closed or merged. +If it is set to `false`, the TiDB Cloud Branching app will not delete the branch for your {{{ .starter }}} or {{{ .essential }}} cluster when a pull request is closed or merged. ```yaml github: @@ -123,21 +123,21 @@ github: ## Create a branching CI workflow -One of the best practices for using branches is to create a branching CI workflow. With the workflow, you can test your code using a TiDB Cloud Serverless branch instead of using the production cluster before merging the pull request. You can find a live demo [here](https://github.com/shiyuhang0/tidbcloud-branch-gorm-example). +One of the best practices for using branches is to create a branching CI workflow. With the workflow, you can test your code using a branch of your cluster instead of using the production cluster before merging the pull request. You can find a live demo [here](https://github.com/shiyuhang0/tidbcloud-branch-gorm-example). Here are the main steps to create the workflow: -1. [Integrate TiDB Cloud Serverless branching with your GitHub repository](#integrate-tidb-cloud-serverless-branching-with-your-github-repository). +1. [Integrate TiDB Cloud Branching with your GitHub repository](#integrate-branching-with-your-github-repository). 2. Get the branch connection information. - You can use the [wait-for-tidbcloud-branch](https://github.com/tidbcloud/wait-for-tidbcloud-branch) action to wait for the readiness of the TiDB Cloud Serverless branch and get the connection information of the branch. + You can use the [wait-for-tidbcloud-branch](https://github.com/tidbcloud/wait-for-tidbcloud-branch) action to wait for the readiness of the branch and get the connection information of the branch. - Example usage: + Taking the branch of a {{{ .starter }}} cluster as an example: ```yaml steps: - - name: Wait for TiDB Cloud Serverless branch to be ready + - name: Wait for {{{ .starter }}} branch to be ready uses: tidbcloud/wait-for-tidbcloud-branch@v0 id: wait-for-branch with: @@ -145,7 +145,7 @@ Here are the main steps to create the workflow: public-key: ${{ secrets.TIDB_CLOUD_API_PUBLIC_KEY }} private-key: ${{ secrets.TIDB_CLOUD_API_PRIVATE_KEY }} - - name: Test with TiDB Cloud Serverless branch + - name: Test with {{{ .starter }}} branch run: | echo "The host is ${{ steps.wait-for-branch.outputs.host }}" echo "The user is ${{ steps.wait-for-branch.outputs.user }}" diff --git a/tidb-cloud/branch-manage.md b/tidb-cloud/branch-manage.md index a17b4d0559a48..c0b8f59b06e29 100644 --- a/tidb-cloud/branch-manage.md +++ b/tidb-cloud/branch-manage.md @@ -1,11 +1,11 @@ --- -title: Manage TiDB Cloud Serverless Branches -summary: Learn How to manage TiDB Cloud Serverless branches. +title: Manage TiDB Cloud Branches +summary: Learn How to manage TiDB Cloud branches. --- -# Manage TiDB Cloud Serverless Branches +# Manage TiDB Cloud Branches -This document describes how to manage TiDB Cloud Serverless branches using the [TiDB Cloud console](https://tidbcloud.com). To manage it using the TiDB Cloud CLI, see [`ticloud branch`](/tidb-cloud/ticloud-branch-create.md). +This document describes how to manage branches of your {{{ .starter }}} or {{{ .essential }}} cluster using the [TiDB Cloud console](https://tidbcloud.com). To manage it using the TiDB Cloud CLI, see [`ticloud branch`](/tidb-cloud/ticloud-branch-create.md). ## Required access @@ -18,11 +18,11 @@ For more information about permissions, see [User roles](/tidb-cloud/manage-user > **Note:** > -> You can only create branches for TiDB Cloud Serverless clusters that are created after July 5, 2023. See [Limitations and quotas](/tidb-cloud/branch-overview.md#limitations-and-quotas) for more limitations. +> You can only create branches for {{{ .starter }}} or {{{ .essential }}} clusters that are created after July 5, 2023. See [Limitations and quotas](/tidb-cloud/branch-overview.md#limitations-and-quotas) for more limitations. To create a branch, perform the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. 3. In the upper-right corner of the **Branches** page, click **Create Branch**. A dialog is displayed. @@ -44,7 +44,7 @@ Depending on the data size in your cluster, the branch creation will be complete To view branches for your cluster, perform the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. The branch list of the cluster is displayed in the right pane. @@ -53,7 +53,7 @@ To view branches for your cluster, perform the following steps: To connect to a branch, perform the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. 3. In the row of your target branch to be connected, click **...** in the **Action** column. 4. Click **Connect** in the drop-down list. The dialog for the connection information is displayed. @@ -62,7 +62,7 @@ To connect to a branch, perform the following steps: Alternatively, you can get the connection string from the cluster overview page: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Connect** in the upper-right corner. 3. Select the branch you want to connect to in the `Branch` drop-down list. 4. Click **Generate Password** or **Reset Password** to create or reset the root password. @@ -72,7 +72,7 @@ Alternatively, you can get the connection string from the cluster overview page: To delete a branch, perform the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. 3. In the row of your target branch to be deleted, click **...** in the **Action** column. 4. Click **Delete** in the drop-down list. @@ -88,7 +88,7 @@ Resetting a branch synchronizes it with the latest data from its parent. To reset a branch, perform the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Branches** in the left navigation pane. 3. In the row of your target branch to be reset, click **...** in the **Action** column. 4. Click **Reset** in the drop-down list. @@ -96,4 +96,4 @@ To reset a branch, perform the following steps: ## What's next -- [Integrate TiDB Cloud Serverless branching into your GitHub CI/CD pipeline](/tidb-cloud/branch-github-integration.md) +- [Integrate TiDB Cloud Branching into your GitHub CI/CD pipeline](/tidb-cloud/branch-github-integration.md) diff --git a/tidb-cloud/branch-overview.md b/tidb-cloud/branch-overview.md index 3a573e9d97006..6b0871743a49c 100644 --- a/tidb-cloud/branch-overview.md +++ b/tidb-cloud/branch-overview.md @@ -1,19 +1,23 @@ --- -title: TiDB Cloud Serverless Branching (Beta) Overview -summary: Learn the concept of TiDB Cloud Serverless branches. +title: TiDB Cloud Branching (Beta) Overview +summary: Learn the concept of TiDB Cloud branches. --- -# TiDB Cloud Serverless Branching (Beta) Overview +# TiDB Cloud Branching (Beta) Overview -TiDB Cloud lets you create branches for TiDB Cloud Serverless clusters. A branch for a cluster is a separate instance that contains a diverged copy of data from the original cluster. It provides an isolated environment, allowing you to experiment freely without worrying about affecting the original cluster. +TiDB Cloud lets you create branches for {{{ .starter }}} and {{{ .essential }}} clusters. A branch for a cluster is a separate instance that contains a diverged copy of data from the original cluster. It provides an isolated environment, allowing you to experiment freely without worrying about affecting the original cluster. -With TiDB Cloud Serverless branches, developers can work in parallel, iterate rapidly on new features, troubleshoot issues without affecting the production database, and easily revert changes if needed. This feature streamlines the development and deployment process while ensuring a high level of stability and reliability for the production database. +With branches, developers can work in parallel, iterate rapidly on new features, troubleshoot issues without affecting the production database, and easily revert changes if needed. This feature streamlines the development and deployment process while ensuring a high level of stability and reliability for the production database. + +> **Note:** +> +> Currently, TiDB Cloud Branching is in beta and is not available on TiDB Cloud Dedicated clusters. ## Implementations When a branch for a cluster is created, the data in the branch diverges from the original cluster or its parent branch at a specific point in time. This means that subsequent changes made in either the parent or the branch will not be synchronized with each other. -To ensure fast and seamless branch creation, TiDB Cloud Serverless uses a copy-on-write technique for sharing data between the original cluster and its branches. This process usually completes within a few minutes and is imperceptible to users, ensuring that it does not affect the performance of your original cluster. +To ensure fast and seamless branch creation, TiDB Cloud uses a copy-on-write technique for sharing data between the original cluster and its branches. This process usually completes within a few minutes and is imperceptible to users, ensuring that it does not affect the performance of your original cluster. ## Scenarios @@ -33,18 +37,20 @@ You can create branches easily and quickly to get isolated data environments. Br ## Limitations and quotas -Currently, TiDB Cloud Serverless branches are in beta and free of charge. +Currently, TiDB Cloud branches are in beta and free of charge. + +- For each organization in TiDB Cloud, you can create a maximum of five branches by default across all the clusters. The branches of a cluster will be created in the same region as the cluster, and you cannot create branches for a throttled cluster or a cluster larger than 100 GiB. -- For each organization in TiDB Cloud, you can create a maximum of five TiDB Cloud Serverless branches by default across all the clusters. The branches of a cluster will be created in the same region as the cluster, and you cannot create branches for a throttled cluster or a cluster larger than 100 GiB. +- For each branch of a free cluster, 10 GiB storage is allowed. For each branch of a cluster with spending limit > 0, 100 GiB storage is allowed. Once the storage is reached, the read and write operations on this branch will be throttled until you reduce the storage. -- For each branch of a free cluster, 10 GiB storage is allowed. For each branch of a scalable cluster, 100 GiB storage is allowed. Once the storage is reached, the read and write operations on this branch will be throttled until you reduce the storage. +- Branches are intended for short-term feature development and functional testing. Because branches lack auto-scaling capability, they are not suitable for performance testing. - If your cluster has tables with TiFlash replicas, these replicas will be temporarily unavailable in the new branch after you create it, because TiFlash needs to rebuild the replica data. - When [creating a branch](/tidb-cloud/branch-manage.md#create-a-branch) from a specific point in time: - - For branches of a free cluster, you can select any time within the last 24 hours. - - For branches of a scalable cluster, you can select any time within the last 14 days. + - For a free {{{ .starter }}} cluster, you can select any time within the last 24 hours. + - For a {{{ .starter }}} (with spending limit > 0) or {{{ .essential }}} cluster, you can select any time within the last 14 days. If you need more quotas, [contact TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). diff --git a/tidb-cloud/built-in-monitoring.md b/tidb-cloud/built-in-monitoring.md index d7dedb2887189..a747bc16dca91 100644 --- a/tidb-cloud/built-in-monitoring.md +++ b/tidb-cloud/built-in-monitoring.md @@ -21,7 +21,7 @@ To view the metrics on the **Metrics** page, take the following steps: ## Metrics retention policy -For TiDB Cloud Dedicated clusters and TiDB Cloud Serverless clusters, the metrics data is kept for 7 days. +For TiDB Cloud clusters, the metrics data is kept for 7 days. ## Metrics for TiDB Cloud Dedicated clusters @@ -60,6 +60,9 @@ The following sections illustrate the metrics on the **Metrics** page for TiDB C | Average / P99 Append Log Duration | avg, 99 | The average or the 99th percentile duration consumed by Raft to append logs. | | Average / P99 Commit Log Duration | avg, 99 | The average or the 99th percentile duration consumed by Raft to commit logs. | | Average / P99 Apply Log Duration | avg, 99 | The average or the 99th percentile duration consumed by Raft to apply logs. | +| Affected Rows | {SQL type} | Rows processed per second by SQL type. | +| Leader Count | {instance} | Number of Raft leader Regions hosted by TiKV nodes. | +| Region Count | {instance} | Total data Regions managed by TiKV nodes. | ### Server @@ -78,10 +81,14 @@ The following sections illustrate the metrics on the **Metrics** page for TiDB C | TiFlash Memory Usage | node, limit | The memory usage statistics or upper limit of each TiFlash node. | | TiFlash IO MBps | node-write, node-read | The total bytes of read and write in each TiFlash node. | | TiFlash Storage Usage | node, limit | The storage usage statistics or upper limit of each TiFlash node. | +| TiProxy CPU Usage | node | The CPU usage statistics of each TiProxy node. The upper limit is 100%. | +| TiProxy Connections | node | The number of connections on each TiProxy node. | +| TiProxy Throughput | node | The bytes transferred per second on each TiProxy node. | +| TiProxy Sessions Migration Reasons | reason | The number of session migrations that happen every minute and the reasons for them. | -## Metrics for TiDB Cloud Serverless clusters +## Metrics for {{{ .starter }}} and {{{ .essential }}} clusters -The **Metrics** page provides two tabs for metrics of TiDB Cloud Serverless clusters: +The **Metrics** page provides two tabs for metrics of {{{ .starter }}} and {{{ .essential }}} clusters: - **Cluster Status**: displays the cluster-level main metrics. - **Database Status**: displays the database-level main metrics. @@ -92,14 +99,17 @@ The following table illustrates the cluster-level main metrics under the **Clust | Metric name | Labels | Description | | :------------| :------| :-------------------------------------------- | -| Request Units | RU per second | The Request Unit (RU) is a unit of measurement used to track the resource consumption of a query or transaction. In addition to queries that you run, Request Units can be consumed by background activities, so when the QPS is 0, the Request Units per second might not be zero. | -| Used Storage Size | Row-based storage, Columnar storage | The size of the row store and the size of the column store. | +| Request Units | RU per second | The Request Unit (RU) is a unit of measurement used to track the resource consumption of a query or transaction in a {{{ .starter }}} cluster. Besides user queries, background activities can also consume RUs, so when QPS is 0, RU usage per second might still be nonzero.| +| Capacity vs Usage (RU/s) | Provisioned capacity (RCU), Consumed RU/s | The Request Capacity Units (RCUs) and the consumed Request Units (RU) per second in a {{{ .essential }}} cluster. | +| Used Storage Size | Row-based storage, Columnar storage | The size of row-based storage and columnar storage. This metric is displayed only when each storage type is 50 MiB or larger. | | Query Per Second | All, {SQL type} | The number of SQL statements executed per second, which are collected by SQL types, such as `SELECT`, `INSERT`, and `UPDATE`. | -| Average Query Duration | All, {SQL type} | The duration from receiving a request from the client to the TiDB Cloud Serverless cluster until the cluster executes the request and returns the result to the client. | +| Query Duration | Avg, P99, P99-{SQL type} | The duration from receiving a request from the client to the {{{ .starter }}} or {{{ .essential }}} cluster until the cluster executes the request and returns the result to the client. | | Failed Query | All | The number of SQL statement execution errors per second. | | Transaction Per Second | All | The number of transactions executed per second. | -| Average Transaction Duration | All | The average execution duration of transactions. | -| Total Connection | All | The number of connections to the TiDB Cloud Serverless cluster. | +| Transaction Duration | Avg, P99 | The execution duration of transactions. | +| Lock-wait | P95, P99 | Time spent by transactions waiting to acquire pessimistic locks. High values indicate contention on the same rows or keys. | +| Total Connection | All | The number of connections to the {{{ .starter }}} or {{{ .essential }}} cluster. | +| Idle Connection Duration | P99, P99(in-txn), P99(not-in-txn) | The time that connections remain idle while inside an open transaction. Long durations typically indicate slow application logic or long-running transactions. | ### Database Status diff --git a/tidb-cloud/changefeed-overview.md b/tidb-cloud/changefeed-overview.md index 0d8851c6efc80..30c7040105e70 100644 --- a/tidb-cloud/changefeed-overview.md +++ b/tidb-cloud/changefeed-overview.md @@ -9,21 +9,21 @@ TiDB Cloud changefeed helps you stream data from TiDB Cloud to other data servic > **Note:** > -> - Currently, TiDB Cloud only allows up to 100 changefeeds per cluster. +> - Currently, TiDB Cloud only allows up to 100 changefeeds per clusterinstance. > - Currently, TiDB Cloud only allows up to 100 table filter rules per changefeed. -> - For [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless), the changefeed feature is unavailable. +> - For [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) and [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) clusters, the changefeed feature is unavailable. ## View the Changefeed page To access the changefeed feature, take the following steps: -1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project.navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. > **Tip:** > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. -2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Changefeed** in the left navigation pane. The changefeed page is displayed. +2. Click the name of your target clusterinstance to go to its overview page, and then click **Data** > **Changefeed** in the left navigation pane. The changefeed page is displayed. On the **Changefeed** page, you can create a changefeed, view a list of existing changefeeds, and operate the existing changefeeds (such as scaling, pausing, resuming, editing, and deleting a changefeed). @@ -36,14 +36,31 @@ To create a changefeed, refer to the tutorials: - [Sink to TiDB Cloud](/tidb-cloud/changefeed-sink-to-tidb-cloud.md) - [Sink to cloud storage](/tidb-cloud/changefeed-sink-to-cloud-storage.md) -## Query Changefeed RCUs +## Query changefeed capacity + + + +For TiDB Cloud Dedicated, you can query the TiCDC Replication Capacity Units (RCUs) of a changefeed. 1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. 2. Locate the corresponding changefeed you want to query, and click **...** > **View** in the **Action** column. 3. You can see the current TiCDC Replication Capacity Units (RCUs) in the **Specification** area of the page. + + + +For {{{ .premium }}}, you can query the TiCDC Changefeed Capacity Units (CCUs) of a changefeed. + +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB instance. +2. Locate the corresponding changefeed you want to query, and click **...** > **View** in the **Action** column. +3. You can see the current TiCDC Changefeed Capacity Units (CCUs) in the **Specification** area of the page. + + + ## Scale a changefeed + + You can change the TiCDC Replication Capacity Units (RCUs) of a changefeed by scaling up or down the changfeed. > **Note:** @@ -51,7 +68,14 @@ You can change the TiCDC Replication Capacity Units (RCUs) of a changefeed by sc > - To scale a changefeed for a cluster, make sure that all changefeeds for this cluster are created after March 28, 2023. > - If a cluster has changefeeds created before March 28, 2023, neither the existing changefeeds nor newly created changefeeds for this cluster support scaling up or down. -1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. + + + +You can change the TiCDC Changefeed Capacity Units (CCUs) of a changefeed by scaling up or down the changfeed. + + + +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB clusterinstance. 2. Locate the corresponding changefeed you want to scale, and click **...** > **Scale Up/Down** in the **Action** column. 3. Select a new specification. 4. Click **Submit**. @@ -60,7 +84,7 @@ It takes about 10 minutes to complete the scaling process (during which the chan ## Pause or resume a changefeed -1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB clusterinstance. 2. Locate the corresponding changefeed you want to pause or resume, and click **...** > **Pause/Resume** in the **Action** column. ## Edit a changefeed @@ -69,7 +93,7 @@ It takes about 10 minutes to complete the scaling process (during which the chan > > TiDB Cloud currently only allows editing changefeeds in the paused status. -1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB clusterinstance. 2. Locate the changefeed you want to pause, and click **...** > **Pause** in the **Action** column. 3. When the changefeed status changes to `Paused`, click **...** > **Edit** to edit the corresponding changefeed. @@ -82,9 +106,16 @@ It takes about 10 minutes to complete the scaling process (during which the chan 4. After editing the configuration, click **...** > **Resume** to resume the corresponding changefeed. +## Duplicate a changefeed + +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB clusterinstance. +2. Locate the changefeed that you want to duplicate. In the **Action** column, click **...** > **Duplicate**. +3. TiDB Cloud automatically populates the new changefeed configuration with the original settings. You can review and modify the configuration as needed. +4. After confirming the configuration, click **Submit** to create and start the new changefeed. + ## Delete a changefeed -1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB clusterinstance. 2. Locate the corresponding changefeed you want to delete, and click **...** > **Delete** in the **Action** column. ## Changefeed billing diff --git a/tidb-cloud/changefeed-sink-to-apache-kafka.md b/tidb-cloud/changefeed-sink-to-apache-kafka.md index 900b01671d892..b1d8b0b6a48a2 100644 --- a/tidb-cloud/changefeed-sink-to-apache-kafka.md +++ b/tidb-cloud/changefeed-sink-to-apache-kafka.md @@ -7,17 +7,23 @@ summary: This document explains how to create a changefeed to stream data from T This document describes how to create a changefeed to stream data from TiDB Cloud to Apache Kafka. + + > **Note:** > -> - To use the changefeed feature, make sure that your TiDB Cloud Dedicated cluster version is v6.1.3 or later. -> - For [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless), the changefeed feature is unavailable. +> To use the changefeed feature, make sure that your TiDB Cloud Dedicated cluster version is v6.1.3 or later. + + ## Restrictions -- For each TiDB Cloud cluster, you can create up to 100 changefeeds. +- For each TiDB Cloud clusterinstance, you can create up to 100 changefeeds. - Currently, TiDB Cloud does not support uploading self-signed TLS certificates to connect to Kafka brokers. - Because TiDB Cloud uses TiCDC to establish changefeeds, it has the same [restrictions as TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview#unsupported-scenarios). - If the table to be replicated does not have a primary key or a non-null unique index, the absence of a unique constraint during replication could result in duplicated data being inserted downstream in some retry scenarios. + + + - If you choose Private Link or Private Service Connect as the network connectivity method, ensure that your TiDB cluster version meets the following requirements: - For v6.5.x: version v6.5.9 or later @@ -30,6 +36,8 @@ This document describes how to create a changefeed to stream data from TiDB Clou - If you want to distribute changelogs by primary key or index value to Kafka partition with a specified index name, make sure the version of your TiDB cluster is v7.5.0 or later. - If you want to distribute changelogs by column value to Kafka partition, make sure the version of your TiDB cluster is v7.5.0 or later. + + ## Prerequisites Before creating a changefeed to stream data to Apache Kafka, you need to complete the following prerequisites: @@ -39,12 +47,14 @@ Before creating a changefeed to stream data to Apache Kafka, you need to complet ### Network -Ensure that your TiDB cluster can connect to the Apache Kafka service. You can choose one of the following connection methods: +Ensure that your TiDB clusterinstance can connect to the Apache Kafka service. You can choose one of the following connection methods: - Private Connect: ideal for avoiding VPC CIDR conflicts and meeting security compliance, but incurs additional [Private Data Link Cost](/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md#private-data-link-cost). - VPC Peering: suitable as a cost-effective option, but requires managing potential VPC CIDR conflicts and security considerations. - Public IP: suitable for a quick setup. + +
@@ -52,23 +62,9 @@ Private Connect leverages **Private Link** or **Private Service Connect** techno TiDB Cloud currently supports Private Connect only for self-hosted Kafka. It does not support direct integration with MSK, Confluent Kafka, or other Kafka SaaS services. To connect to these Kafka SaaS services via Private Connect, you can deploy a [kafka-proxy](https://github.com/grepplabs/kafka-proxy) as an intermediary, effectively exposing the Kafka service as self-hosted Kafka. For a detailed example, see [Set Up Self-Hosted Kafka Private Service Connect by Kafka-proxy in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md#set-up-self-hosted-kafka-private-service-connect-by-kafka-proxy). This setup is similar across all Kafka SaaS services. -- If your Apache Kafka service is hosted in AWS, follow [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) to ensure that the network connection is properly configured. After setup, provide the following information in the TiDB Cloud console to create the changefeed: - - - The ID in Kafka Advertised Listener Pattern - - The Endpoint Service Name - - The Bootstrap Ports - -- If your Apache Kafka service is hosted in Google Cloud, follow [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) to ensure that the network connection is properly configured. After setup, provide the following information in the TiDB Cloud console to create the changefeed: - - - The ID in Kafka Advertised Listener Pattern - - The Service Attachment - - The Bootstrap Ports - -- If your Apache Kafka service is hosted in Azure, follow [Set Up Self-Hosted Kafka Private Link Service in Azure](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md) to ensure that the network connection is properly configured. After setup, provide the following information in the TiDB Cloud console to create the changefeed: - - - The ID in Kafka Advertised Listener Pattern - - The Alias of Private Link Service - - The Bootstrap Ports +- If your Apache Kafka service is hosted on AWS, follow [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) to configure the network connection and obtain the **Bootstrap Ports** information, and then follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md) to create a private endpoint. +- If your Apache Kafka service is hosted on Google Cloud, follow [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) to configure the network connection and obtain the **Bootstrap Ports** information, and then follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md) to create a private endpoint. +- If your Apache Kafka service is hosted on Azure, follow [Set Up Self-Hosted Kafka Private Link Service in Azure](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md) to configure the network connection and obtain the **Bootstrap Ports** information, and then follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md) to create a private endpoint.
@@ -101,6 +97,37 @@ It is **NOT** recommended to use Public IP in a production environment.
+
+ + + + +
+ +Private Connect leverages **Private Link** or **Private Service Connect** technologies from cloud providers to enable resources in your VPC to connect to services in other VPCs using private IP addresses, as if those services were hosted directly within your VPC. + +To create a private endpoint for changefeeds in your {{{ .premium }}} instances, follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md). + +TiDB Cloud currently supports Private Connect only for self-hosted Kafka. It does not support direct integration with MSK, Confluent Kafka, or other Kafka SaaS services. To connect to these Kafka SaaS services via Private Connect, you can deploy a [kafka-proxy](https://github.com/grepplabs/kafka-proxy) as an intermediary, effectively exposing the Kafka service as self-hosted Kafka. + +If your Apache Kafka service is hosted on AWS, follow [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) to configure the network connection and obtain the **Bootstrap Ports** information, and then follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md) to create a private endpoint. + +
+
+ +If you want to provide Public IP access to your Apache Kafka service, assign Public IP addresses to all your Kafka brokers. + +It is **NOT** recommended to use Public IP in a production environment. + +
+ +
+ +Currently, the VPC Peering feature for {{{ .premium }}} instances is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for VPC Peering for {{{ .premium }}} instance" in the **Description** field, and then click **Submit**. + +
+
+
### Kafka ACL authorization @@ -114,7 +141,7 @@ For example, if your Kafka cluster is in Confluent Cloud, you can see [Resources ## Step 1. Open the Changefeed page for Apache Kafka 1. Log in to the [TiDB Cloud console](https://tidbcloud.com). -2. Navigate to the cluster overview page of the target TiDB cluster, and then click **Data** > **Changefeed** in the left navigation pane. +2. Navigate to the overview page of the target TiDB clusterinstance, and then click **Data** > **Changefeed** in the left navigation pane. 3. Click **Create Changefeed**, and select **Kafka** as **Destination**. ## Step 2. Configure the changefeed target @@ -139,71 +166,85 @@ The steps vary depending on the connectivity method you select.
1. In **Connectivity Method**, select **Private Link**. -2. Authorize the [AWS Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts) of TiDB Cloud to create an endpoint for your endpoint service. The AWS Principal is provided in the tip on the web page. -3. Make sure you select the same **Number of AZs** and **AZ IDs of Kafka Deployment**, and fill the same unique ID in **Kafka Advertised Listener Pattern** when you [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) in the **Network** section. -4. Fill in the **Endpoint Service Name** which is configured in [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md). -5. Fill in the **Bootstrap Ports**. It is recommended that you set at least one port for one AZ. You can use commas `,` to separate multiple ports. -6. Select an **Authentication** option according to your Kafka authentication configuration. +2. In **Private Endpoint**, select the private endpoint that you created in the [Network](#network) section. Make sure the AZs of the private endpoint match the AZs of the Kafka deployment. +3. Fill in the **Bootstrap Ports** that you obtained from the [Network](#network) section. It is recommended that you set at least one port for one AZ. You can use commas `,` to separate multiple ports. +4. Select an **Authentication** option according to your Kafka authentication configuration. - If your Kafka does not require authentication, keep the default option **Disable**. - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. +5. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. +6. Select a **Compression** type for the data in this changefeed. +7. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +8. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. -7. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. -8. Select a **Compression** type for the data in this changefeed. -9. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. -10. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. -11. TiDB Cloud creates the endpoint for **Private Link**, which might take several minutes. -12. Once the endpoint is created, log in to your cloud provider console and accept the connection request. -13. Return to the [TiDB Cloud console](https://tidbcloud.com) to confirm that you have accepted the connection request. TiDB Cloud will test the connection and proceed to the next page if the test succeeds. +
+ + +
+ +1. In **Connectivity Method**, select **Private Link**. +2. In **Private Endpoint**, select the private endpoint that you created in the [Network](#network) section. Make sure the AZs of the private endpoint match the AZs of the Kafka deployment. +3. Fill in the **Bootstrap Ports** that you obtained from the [Network](#network) section. It is recommended that you set at least one port for one AZ. You can use commas `,` to separate multiple ports. +4. Select an **Authentication** option according to your Kafka authentication configuration. + + - If your Kafka does not require authentication, keep the default option **Disable**. + - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. +5. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. +6. Select a **Compression** type for the data in this changefeed. +7. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +8. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page.
+
+ +
1. In **Connectivity Method**, select **Private Service Connect**. -2. Ensure that you fill in the same unique ID in **Kafka Advertised Listener Pattern** when you [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) in the **Network** section. -3. Fill in the **Service Attachment** that you have configured in [Setup Self Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) -4. Fill in the **Bootstrap Ports**. It is recommended that you provide more than one port. You can use commas `,` to separate multiple ports. -5. Select an **Authentication** option according to your Kafka authentication configuration. +2. In **Private Endpoint**, select the private endpoint that you created in the [Network](#network) section. +3. Fill in the **Bootstrap Ports** that you obtained from the [Network](#network) section. It is recommended that you provide more than one port. You can use commas `,` to separate multiple ports. +4. Select an **Authentication** option according to your Kafka authentication configuration. - If your Kafka does not require authentication, keep the default option **Disable**. - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. - -6. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. -7. Select a **Compression** type for the data in this changefeed. -8. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. -9. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. -10. TiDB Cloud creates the endpoint for **Private Service Connect**, which might take several minutes. -11. Once the endpoint is created, log in to your cloud provider console and accept the connection request. -12. Return to the [TiDB Cloud console](https://tidbcloud.com) to confirm that you have accepted the connection request. TiDB Cloud will test the connection and proceed to the next page if the test succeeds. +5. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. +6. Select a **Compression** type for the data in this changefeed. +7. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +8. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. +9. TiDB Cloud creates the endpoint for **Private Service Connect**, which might take several minutes. +10. Once the endpoint is created, log in to your cloud provider console and accept the connection request. +11. Return to the [TiDB Cloud console](https://tidbcloud.com) to confirm that you have accepted the connection request. TiDB Cloud will test the connection and proceed to the next page if the test succeeds.
+
+ +
1. In **Connectivity Method**, select **Private Link**. -2. Authorize the Azure subscription of TiDB Cloud or allow anyone with your alias to access your Private Link service before creating the changefeed. The Azure subscription is provided in the **Reminders before proceeding** tip on the web page. For more information about the visibility of Private Link service, see [Control service exposure](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview#control-service-exposure) in Azure documentation. -3. Make sure you fill in the same unique ID in **Kafka Advertised Listener Pattern** when you [Set Up Self-Hosted Kafka Private Link Service in Azure](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md) in the **Network** section. -4. Fill in the **Alias of Private Link Service** which is configured in [Set Up Self-Hosted Kafka Private Link Service in Azure](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md). -5. Fill in the **Bootstrap Ports**. It is recommended that you set at least one port for one AZ. You can use commas `,` to separate multiple ports. -6. Select an **Authentication** option according to your Kafka authentication configuration. +2. In **Private Endpoint**, select the private endpoint that you created in the [Network](#network) section. +3. Fill in the **Bootstrap Ports** that you obtained in the [Network](#network) section. It is recommended that you set at least one port for one AZ. You can use commas `,` to separate multiple ports. +4. Select an **Authentication** option according to your Kafka authentication configuration. - If your Kafka does not require authentication, keep the default option **Disable**. - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. - -7. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. -8. Select a **Compression** type for the data in this changefeed. -9. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. -10. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. -11. TiDB Cloud creates the endpoint for **Private Link**, which might take several minutes. -12. Once the endpoint is created, log in to the [Azure portal](https://portal.azure.com/) and accept the connection request. -13. Return to the [TiDB Cloud console](https://tidbcloud.com) to confirm that you have accepted the connection request. TiDB Cloud will test the connection and proceed to the next page if the test succeeds. +5. Select your **Kafka Version**. If you do not know which one to use, use **Kafka v2**. +6. Select a **Compression** type for the data in this changefeed. +7. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +8. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. +9. TiDB Cloud creates the endpoint for **Private Link**, which might take several minutes. +10. Once the endpoint is created, log in to the [Azure portal](https://portal.azure.com/) and accept the connection request. +11. Return to the [TiDB Cloud console](https://tidbcloud.com) to confirm that you have accepted the connection request. TiDB Cloud will test the connection and proceed to the next page if the test succeeds.
+
## Step 3. Set the changefeed 1. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](/table-filter.md). + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. - **Tables without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. @@ -211,7 +252,13 @@ The steps vary depending on the connectivity method you select. 2. Customize **Event Filter** to filter the events that you want to replicate. - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. You can add up to 10 event filter rules per changefeed. - - **Ignored events**: you can set which types of events the event filter will exclude from the changefeed. + - **Event Filter**: you can use the following event filters to exclude specific events from the changefeed: + - **Ignore event**: excludes specified event types. + - **Ignore SQL**: excludes DDL events that match specified expressions. For example, `^drop` excludes statements starting with `DROP`, and `add column` excludes statements containing `ADD COLUMN`. + - **Ignore insert value expression**: excludes `INSERT` statements that meet specific conditions. For example, `id >= 100` excludes `INSERT` statements where `id` is greater than or equal to 100. + - **Ignore update new value expression**: excludes `UPDATE` statements where the new value matches a specified condition. For example, `gender = 'male'` excludes updates that result in `gender` being `male`. + - **Ignore update old value expression**: excludes `UPDATE` statements where the old value matches a specified condition. For example, `age < 18` excludes updates where the old value of `age` is less than 18. + - **Ignore delete value expression**: excludes `DELETE` statements that meet a specified condition. For example, `name = 'john'` excludes `DELETE` statements where `name` is `'john'`. 3. Customize **Column Selector** to select columns from events and send only the data changes related to those columns to the downstream. @@ -234,7 +281,7 @@ The steps vary depending on the connectivity method you select. 6. If you select **Avro** as your data format, you will see some Avro-specific configurations on the page. You can fill in these configurations as follows: - In the **Decimal** and **Unsigned BigInt** configurations, specify how TiDB Cloud handles the decimal and unsigned bigint data types in Kafka messages. - - In the **Schema Registry** area, fill in your schema registry endpoint. If you enable **HTTP Authentication**, the fields for user name and password are displayed and automatically filled in with your TiDB cluster endpoint and password. + - In the **Schema Registry** area, fill in your schema registry endpoint. If you enable **HTTP Authentication**, the fields for user name and password are displayed and automatically filled in with your TiDB clusterinstance endpoint and password. 7. In the **Topic Distribution** area, select a distribution mode, and then fill in the topic name configurations according to the mode. @@ -281,11 +328,13 @@ The steps vary depending on the connectivity method you select. - **Replication Factor**: controls how many Kafka servers each Kafka message is replicated to. The valid value ranges from [`min.insync.replicas`](https://kafka.apache.org/33/documentation.html#brokerconfigs_min.insync.replicas) to the number of Kafka brokers. - **Partition Number**: controls how many partitions exist in a topic. The valid value range is `[1, 10 * the number of Kafka brokers]`. -10. Click **Next**. +10. In the **Split Event** area, choose whether to split `UPDATE` events into separate `DELETE` and `INSERT` events or keep as raw `UPDATE` events. For more information, see [Split primary or unique key UPDATE events for non-MySQL sinks](https://docs.pingcap.com/tidb/stable/ticdc-split-update-behavior/#split-primary-or-unique-key-update-events-for-non-mysql-sinks). + +11. Click **Next**. ## Step 4. Configure your changefeed specification -1. In the **Changefeed Specification** area, specify the number of Replication Capacity Units (RCUs) to be used by the changefeed. +1. In the **Changefeed Specification** area, specify the number of Replication Capacity Units (RCUs)Changefeed Capacity Units (CCUs) to be used by the changefeed. 2. In the **Changefeed Name** area, specify a name for the changefeed. 3. Click **Next** to check the configurations you set and go to the next page. diff --git a/tidb-cloud/changefeed-sink-to-apache-pulsar.md b/tidb-cloud/changefeed-sink-to-apache-pulsar.md index 4a37ba2957d0e..229fcb8e5c7bb 100644 --- a/tidb-cloud/changefeed-sink-to-apache-pulsar.md +++ b/tidb-cloud/changefeed-sink-to-apache-pulsar.md @@ -10,7 +10,7 @@ This document describes how to create a changefeed to stream data from TiDB Clou > **Note:** > > - To replicate data to Apache Pulsar using the changefeed feature, make sure that your TiDB Cloud Dedicated cluster version is v7.5.1 or later. -> - For [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless), the changefeed feature is unavailable. +> - For [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) and [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) clusters, the changefeed feature is unavailable. ## Restrictions @@ -109,6 +109,7 @@ For more information, see [How to create a topic](https://pulsar.apache.org/docs 1. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](/table-filter.md). + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. - **Tables without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. @@ -116,7 +117,13 @@ For more information, see [How to create a topic](https://pulsar.apache.org/docs 2. Customize **Event Filter** to filter the events that you want to replicate. - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. You can add up to 10 event filter rules per changefeed. - - **Ignored events**: you can set which types of events the event filter will exclude from the changefeed. + - **Event Filter**: you can use the following event filters to exclude specific events from the changefeed: + - **Ignore event**: excludes specified event types. + - **Ignore SQL**: excludes DDL events that match specified expressions. For example, `^drop` excludes statements starting with `DROP`, and `add column` excludes statements containing `ADD COLUMN`. + - **Ignore insert value expression**: excludes `INSERT` statements that meet specific conditions. For example, `id >= 100` excludes `INSERT` statements where `id` is greater than or equal to 100. + - **Ignore update new value expression**: excludes `UPDATE` statements where the new value matches a specified condition. For example, `gender = 'male'` excludes updates that result in `gender` being `male`. + - **Ignore update old value expression**: excludes `UPDATE` statements where the old value matches a specified condition. For example, `age < 18` excludes updates where the old value of `age` is less than 18. + - **Ignore delete value expression**: excludes `DELETE` statements that meet a specified condition. For example, `name = 'john'` excludes `DELETE` statements where `name` is `'john'`. 3. In the **Start Replication Position** area, select the starting point for the changefeed to replicate data to Pulsar: @@ -174,7 +181,9 @@ For more information, see [How to create a topic](https://pulsar.apache.org/docs If you want the changefeed to send Pulsar messages of a table to different partitions, choose this distribution method. The specified column values of a row changelog will determine which partition the changelog is sent to. This distribution method ensures orderliness in each partition and guarantees that changelogs with the same column values are sent to the same partition. -7. Click **Next**. +7. In the **Split Event** area, choose whether to split `UPDATE` events into separate `DELETE` and `INSERT` events or keep as raw `UPDATE` events. For more information, see [Split primary or unique key UPDATE events for non-MySQL sinks](https://docs.pingcap.com/tidb/stable/ticdc-split-update-behavior/#split-primary-or-unique-key-update-events-for-non-mysql-sinks). + +8. Click **Next**. ## Step 4. Configure specification and review diff --git a/tidb-cloud/changefeed-sink-to-cloud-storage.md b/tidb-cloud/changefeed-sink-to-cloud-storage.md index 91616d3f28de4..30cbff2fe4b79 100644 --- a/tidb-cloud/changefeed-sink-to-cloud-storage.md +++ b/tidb-cloud/changefeed-sink-to-cloud-storage.md @@ -1,16 +1,16 @@ --- title: Sink to Cloud Storage -summary: This document explains how to create a changefeed to stream data from TiDB Cloud to Amazon S3 or GCS. It includes restrictions, configuration steps for the destination, replication, and specification, as well as starting the replication process. +summary: This document explains how to create a changefeed to stream data from TiDB Cloud to Amazon S3, Google Cloud Storage (GCS), or Azure Blob Storage. It includes restrictions, configuration steps for the destination, replication, and specification, as well as starting the replication process. --- # Sink to Cloud Storage -This document describes how to create a changefeed to stream data from TiDB Cloud to cloud storage. Currently, Amazon S3 and GCS are supported. +This document describes how to create a changefeed to stream data from TiDB Cloud to cloud storage. Currently, Amazon S3, Google Cloud Storage (GCS), and Azure Blob Storage are supported. > **Note:** > > - To stream data to cloud storage, make sure that your TiDB cluster version is v7.1.1 or later. To upgrade your TiDB Cloud Dedicated cluster to v7.1.1 or later, [contact TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). -> - For [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, the changefeed feature is unavailable. +> - For [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) and [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) clusters, the changefeed feature is unavailable. ## Restrictions @@ -20,7 +20,7 @@ This document describes how to create a changefeed to stream data from TiDB Clou ## Step 1. Configure destination -Navigate to the cluster overview page of the target TiDB cluster. Click **Data** > **Changefeed** in the left navigation pane, click **Create Changefeed**, and select **Amazon S3** or **GCS** as the destination. The configuration process varies depend on the destination you choose. +Navigate to the cluster overview page of the target TiDB cluster. Click **Data** > **Changefeed** in the left navigation pane, click **Create Changefeed**, and select **Amazon S3**, **GCS**, or **Azure Blob Storage** as the destination. The configuration process varies depend on the destination you choose.
@@ -84,10 +84,50 @@ For **GCS**, before filling **GCS Endpoint**, you need to first grant the GCS bu 7. In the TiDB Cloud console, go to the Changefeed's **Configure Destination** page, and fill in the **bucket gsutil URI** field. +
+
+ +For **Azure Blob Storage**, you must configure the container and get a SAS token in the Azure portal first. Take the following steps: + +1. In the [Azure portal](https://portal.azure.com/), create a container to store changefeed data. + + 1. In the left navigation pane, click **Storage Accounts**, and then select your storage account. + 2. In the storage account navigation menu, select **Data storage** > **Containers**, and then click **+ Container**. + 3. Enter a name for your new container, set the anonymous access level (the recommended level is **Private**), and then click **Create**. + +2. Get the URL of the target container. + + 1. In the container list, select your target container. + 2. Click **...** for the container, and then select **Container properties**. + 3. Save the **URL** value for later use, for example `https://.blob.core.windows.net/`. + +3. Generate a SAS token. + + 1. In the storage account navigation menu, select **Security + networking** > **Shared access signature**. + 2. In the **Allowed services** section, select **Blob**. + 3. In the **Allowed resource types** section, select **Container** and **Object**. + 4. In the **Allowed permissions** section, select **Read**, **Write**, **Delete**, **List**, and **Create**. + 5. Specify a validity period for the SAS token that is long enough to meet your needs. + + > **Note:** + > + > - The changefeed continuously writes events, so ensure the SAS token has a sufficiently long validity period. For security, it is recommended to replace the token every six to twelve months. + > - The generated SAS token cannot be revoked, so set its validity period carefully. + > - To ensure continuous availability, regenerate and update the SAS token before it expires. + + 6. Click **Generate SAS and connection string**, and then save the **SAS token**. + + ![Generate a SAS token](/media/tidb-cloud/changefeed/sink-to-cloud-storage-azure-signature.png) + +4. In the [TiDB Cloud console](https://tidbcloud.com/), go to the Changefeed's **Configure Destination** page, and fill in the following fields: + + - **Blob URL**: enter the container URL obtained in step 2. You can optionally add a prefix. + - **SAS Token**: enter the generated SAS token obtained in step 3. +
-Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster to Amazon S3 or GCS. TiDB Cloud will automatically test and verify if the connection is successful. +Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster to Amazon S3, GCS, or Azure Blob Storage. TiDB Cloud will automatically test and verify if the connection is successful. - If yes, you are directed to the next step of configuration. - If not, a connectivity error is displayed, and you need to handle the error. After the error is resolved, click **Next** to retry the connection. @@ -98,6 +138,7 @@ Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster ![the table filter of changefeed](/media/tidb-cloud/changefeed/sink-to-s3-02-table-filter.jpg) + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. - **Tables without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when handling duplicate events downstream. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can employ filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. @@ -105,7 +146,13 @@ Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster 2. Customize **Event Filter** to filter the events that you want to replicate. - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. You can add up to 10 event filter rules per changefeed. - - **Ignored events**: you can set which types of events the event filter will exclude from the changefeed. + - **Event Filter**: you can use the following event filters to exclude specific events from the changefeed: + - **Ignore event**: excludes specified event types. + - **Ignore SQL**: excludes DDL events that match specified expressions. For example, `^drop` excludes statements starting with `DROP`, and `add column` excludes statements containing `ADD COLUMN`. + - **Ignore insert value expression**: excludes `INSERT` statements that meet specific conditions. For example, `id >= 100` excludes `INSERT` statements where `id` is greater than or equal to 100. + - **Ignore update new value expression**: excludes `UPDATE` statements where the new value matches a specified condition. For example, `gender = 'male'` excludes updates that result in `gender` being `male`. + - **Ignore update old value expression**: excludes `UPDATE` statements where the old value matches a specified condition. For example, `age < 18` excludes updates where the old value of `age` is less than 18. + - **Ignore delete value expression**: excludes `DELETE` statements that meet a specified condition. For example, `name = 'john'` excludes `DELETE` statements where `name` is `'john'`. 3. In the **Start Replication Position** area, select one of the following replication positions: @@ -149,6 +196,8 @@ Click **Next** to establish the connection from the TiDB Cloud Dedicated cluster > > These two parameters will affect the quantity of objects generated in cloud storage for each individual database table. If there are a large number of tables, using the same configuration will increase the number of objects generated and subsequently raise the cost of invoking the cloud storage API. Therefore, it is recommended to configure these parameters appropriately based on your Recovery Point Objective (RPO) and cost requirements. +6. In the **Split Event** area, choose whether to split `UPDATE` events into separate `DELETE` and `INSERT` events or keep as raw `UPDATE` events. For more information, see [Split primary or unique key UPDATE events for non-MySQL sinks](https://docs.pingcap.com/tidb/stable/ticdc-split-update-behavior/#split-primary-or-unique-key-update-events-for-non-mysql-sinks). + ## Step 3. Configure specification Click **Next** to configure your changefeed specification. diff --git a/tidb-cloud/changefeed-sink-to-mysql.md b/tidb-cloud/changefeed-sink-to-mysql.md index 005bf470b737a..587c6c87bb48b 100644 --- a/tidb-cloud/changefeed-sink-to-mysql.md +++ b/tidb-cloud/changefeed-sink-to-mysql.md @@ -7,14 +7,17 @@ summary: This document explains how to stream data from TiDB Cloud to MySQL usin This document describes how to stream data from TiDB Cloud to MySQL using the **Sink to MySQL** changefeed. + + > **Note:** > -> - To use the changefeed feature, make sure that your TiDB Cloud Dedicated cluster version is v6.1.3 or later. -> - For [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless), the changefeed feature is unavailable. +> To use the changefeed feature, make sure that your TiDB Cloud Dedicated cluster version is v6.1.3 or later. + + ## Restrictions -- For each TiDB Cloud cluster, you can create up to 100 changefeeds. +- For each TiDB Cloud clusterinstance, you can create up to 100 changefeeds. - Because TiDB Cloud uses TiCDC to establish changefeeds, it has the same [restrictions as TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview#unsupported-scenarios). - If the table to be replicated does not have a primary key or a non-null unique index, the absence of a unique constraint during replication could result in duplicated data being inserted downstream in some retry scenarios. @@ -28,7 +31,12 @@ Before creating a changefeed, you need to complete the following prerequisites: ### Network -Make sure that your TiDB Cluster can connect to the MySQL service. + + +Make sure that your TiDB Cloud cluster can connect to the MySQL service. + + +
If your MySQL service is in an AWS VPC that has no public internet access, take the following steps: @@ -48,12 +56,49 @@ If your MySQL service is in a Google Cloud VPC that has no public internet acces 2. [Set up a VPC peering connection](/tidb-cloud/set-up-vpc-peering-connections.md) between the VPC of the MySQL service and your TiDB cluster. 3. Modify the ingress firewall rules of the VPC where MySQL is located. - You must add [the CIDR of the region where your TiDB Cloud cluster is located](/tidb-cloud/set-up-vpc-peering-connections.md#prerequisite-set-a-cidr-for-a-region) to the ingress firewall rules. Doing so allows the traffic to flow from your TiDB Cluster to the MySQL endpoint. + You must add [the CIDR of the region where your TiDB Cloud cluster is located](/tidb-cloud/set-up-vpc-peering-connections.md#prerequisite-set-a-cidr-for-a-region) to the ingress firewall rules. Doing so allows the traffic to flow from your TiDB Cloud cluster to the MySQL endpoint. + +
+ +
+ +Private endpoints leverage **Private Link** or **Private Service Connect** technologies from cloud providers, enabling resources in your VPC to connect to services in other VPCs through private IP addresses, as if those services were hosted directly within your VPC. + +You can connect your TiDB Cloud cluster to your MySQL service securely through a private endpoint. If the private endpoint is not available for your MySQL service, follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md) to create one. + +
+ +
+ +
+ + + +Make sure that your TiDB Cloud instance can connect to the MySQL service. + +> **Note:** +> +> Currently, this feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for VPC Peering for {{{ .premium }}} instance" in the **Description** field, and then click **Submit**. + +Private endpoints leverage **Private Link** or **Private Service Connect** technologies from cloud providers, enabling resources in your VPC to connect to services in other VPCs through private IP addresses, as if those services were hosted directly within your VPC. + +You can connect your TiDB Cloud instance to your MySQL service securely through a private endpoint. If the private endpoint is not available for your MySQL service, follow [Set Up Private Endpoint for Changefeeds](/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md) to create one. + + ### Load existing data (optional) + + The **Sink to MySQL** connector can only sink incremental data from your TiDB cluster to MySQL after a certain timestamp. If you already have data in your TiDB cluster, you can export and load the existing data of your TiDB cluster into MySQL before enabling **Sink to MySQL**. + + + +The **Sink to MySQL** connector can only sink incremental data from your TiDB instance to MySQL after a certain timestamp. If you already have data in your TiDB instance, you can export and load the existing data of your TiDB instance into MySQL before enabling **Sink to MySQL**. + + + To load the existing data: 1. Extend the [tidb_gc_life_time](https://docs.pingcap.com/tidb/stable/system-variables#tidb_gc_life_time-new-in-v50) to be longer than the total time of the following two operations, so that historical data during the time is not garbage collected by TiDB. @@ -69,7 +114,7 @@ To load the existing data: SET GLOBAL tidb_gc_life_time = '720h'; ``` -2. Use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data from your TiDB cluster, then use community tools such as [mydumper/myloader](https://centminmod.com/mydumper.html) to load data to the MySQL service. +2. Use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data from your TiDB clusterinstance, then use community tools such as [mydumper/myloader](https://centminmod.com/mydumper.html) to load data to the MySQL service. 3. From the [exported files of Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview#format-of-exported-files), get the start position of MySQL sink from the metadata file: @@ -91,50 +136,62 @@ If you do not load the existing data, you need to create corresponding target ta After completing the prerequisites, you can sink your data to MySQL. -1. Navigate to the cluster overview page of the target TiDB cluster, and then click **Data** > **Changefeed** in the left navigation pane. +1. Navigate to the overview page of the target TiDB clusterinstance, and then click **Data** > **Changefeed** in the left navigation pane. 2. Click **Create Changefeed**, and select **MySQL** as **Destination**. -3. Fill in the MySQL endpoints, user name, and password in **MySQL Connection**. +3. In **Connectivity Method**, choose the method to connect to your MySQL service. + + - If you choose **VPC Peering** or **Public IP**, fill in your MySQL endpoint. + - If you choose **Private Link**, select the private endpoint that you created in the [Network](#network) section, and then fill in the MySQL port for your MySQL service. + +4. In **Authentication**, fill in the MySQL user name and password of your MySQL service. -4. Click **Next** to test whether TiDB can connect to MySQL successfully: +5. Click **Next** to test whether TiDB can connect to MySQL successfully: - If yes, you are directed to the next step of configuration. - If not, a connectivity error is displayed, and you need to handle the error. After the error is resolved, click **Next** again. -5. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](/table-filter.md). +6. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](/table-filter.md). + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. - **Tables without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. -6. Customize **Event Filter** to filter the events that you want to replicate. +7. Customize **Event Filter** to filter the events that you want to replicate. - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. You can add up to 10 event filter rules per changefeed. - - **Ignored events**: you can set which types of events the event filter will exclude from the changefeed. + - **Event Filter**: you can use the following event filters to exclude specific events from the changefeed: + - **Ignore event**: excludes specified event types. + - **Ignore SQL**: excludes DDL events that match specified expressions. For example, `^drop` excludes statements starting with `DROP`, and `add column` excludes statements containing `ADD COLUMN`. + - **Ignore insert value expression**: excludes `INSERT` statements that meet specific conditions. For example, `id >= 100` excludes `INSERT` statements where `id` is greater than or equal to 100. + - **Ignore update new value expression**: excludes `UPDATE` statements where the new value matches a specified condition. For example, `gender = 'male'` excludes updates that result in `gender` being `male`. + - **Ignore update old value expression**: excludes `UPDATE` statements where the old value matches a specified condition. For example, `age < 18` excludes updates where the old value of `age` is less than 18. + - **Ignore delete value expression**: excludes `DELETE` statements that meet a specified condition. For example, `name = 'john'` excludes `DELETE` statements where `name` is `'john'`. -7. In **Start Replication Position**, configure the starting position for your MySQL sink. +8. In **Start Replication Position**, configure the starting position for your MySQL sink. - If you have [loaded the existing data](#load-existing-data-optional) using Dumpling, select **Start replication from a specific TSO** and fill in the TSO that you get from Dumpling exported metadata files. - - If you do not have any data in the upstream TiDB cluster, select **Start replication from now on**. + - If you do not have any data in the upstream TiDB clusterinstance, select **Start replication from now on**. - Otherwise, you can customize the start time point by choosing **Start replication from a specific time**. -8. Click **Next** to configure your changefeed specification. +9. Click **Next** to configure your changefeed specification. - - In the **Changefeed Specification** area, specify the number of Replication Capacity Units (RCUs) to be used by the changefeed. + - In the **Changefeed Specification** area, specify the number of Replication Capacity Units (RCUs)Changefeed Capacity Units (CCUs) to be used by the changefeed. - In the **Changefeed Name** area, specify a name for the changefeed. -9. Click **Next** to review the changefeed configuration. +10. Click **Next** to review the changefeed configuration. If you confirm that all configurations are correct, check the compliance of cross-region replication, and click **Create**. If you want to modify some configurations, click **Previous** to go back to the previous configuration page. -10. The sink starts soon, and you can see the status of the sink changes from **Creating** to **Running**. +11. The sink starts soon, and you can see the status of the sink changes from **Creating** to **Running**. Click the changefeed name, and you can see more details about the changefeed, such as the checkpoint, replication latency, and other metrics. -11. If you have [loaded the existing data](#load-existing-data-optional) using Dumpling, you need to restore the GC time to its original value (the default value is `10m`) after the sink is created: +12. If you have [loaded the existing data](#load-existing-data-optional) using Dumpling, you need to restore the GC time to its original value (the default value is `10m`) after the sink is created: {{< copyable "sql" >}} diff --git a/tidb-cloud/changefeed-sink-to-tidb-cloud.md b/tidb-cloud/changefeed-sink-to-tidb-cloud.md index 1412c1cdf7c59..32bfcea1e1f60 100644 --- a/tidb-cloud/changefeed-sink-to-tidb-cloud.md +++ b/tidb-cloud/changefeed-sink-to-tidb-cloud.md @@ -1,11 +1,11 @@ --- title: Sink to TiDB Cloud -summary: This document explains how to stream data from a TiDB Cloud Dedicated cluster to a TiDB Cloud Serverless cluster. There are restrictions on the number of changefeeds and regions available for the feature. Prerequisites include extending tidb_gc_life_time, backing up data, and obtaining the start position of TiDB Cloud sink. To create a TiDB Cloud sink, navigate to the cluster overview page, establish the connection, customize table and event filters, fill in the start replication position, specify the changefeed specification, review the configuration, and create the sink. Finally, restore tidb_gc_life_time to its original value. +summary: This document explains how to stream data from a TiDB Cloud Dedicated cluster to a {{{ .starter }}} or {{{ .essential }}} cluster. There are restrictions on the number of changefeeds and regions available for the feature. Prerequisites include extending tidb_gc_life_time, backing up data, and obtaining the start position of TiDB Cloud sink. To create a TiDB Cloud sink, navigate to the cluster overview page, establish the connection, customize table and event filters, fill in the start replication position, specify the changefeed specification, review the configuration, and create the sink. Finally, restore tidb_gc_life_time to its original value. --- # Sink to TiDB Cloud -This document describes how to stream data from a TiDB Cloud Dedicated cluster to a TiDB Cloud Serverless cluster. +This document describes how to stream data from a TiDB Cloud Dedicated cluster to a {{{ .starter }}} or {{{ .essential }}} cluster. > **Note:** > @@ -23,14 +23,14 @@ This document describes how to stream data from a TiDB Cloud Dedicated cluster t - AWS Singapore (ap-southeast-1) - AWS Tokyo (ap-northeast-1) -- The source TiDB Cloud Dedicated cluster and the destination TiDB Cloud Serverless cluster must be in the same project and the same region. -- The **Sink to TiDB Cloud** feature only supports network connection via private endpoints. When you create a changefeed to stream data from a TiDB Cloud Dedicated cluster to a TiDB Cloud Serverless cluster, TiDB Cloud will automatically set up the private endpoint connection between the two clusters. +- The source TiDB Cloud Dedicated cluster and the destination {{{ .starter }}} or {{{ .essential }}} cluster must be in the same project and the same region. +- The **Sink to TiDB Cloud** feature only supports network connection via private endpoints. When you create a changefeed to stream data from a TiDB Cloud Dedicated cluster to a {{{ .starter }}} or {{{ .essential }}} cluster, TiDB Cloud will automatically set up the private endpoint connection between the two clusters. ## Prerequisites -The **Sink to TiDB Cloud** connector can only sink incremental data from a TiDB Cloud Dedicated cluster to a TiDB Cloud Serverless cluster after a certain [TSO](https://docs.pingcap.com/tidb/stable/glossary#tso). +The **Sink to TiDB Cloud** connector can only sink incremental data from a TiDB Cloud Dedicated cluster to a {{{ .starter }}} or {{{ .essential }}} cluster after a certain [TSO](https://docs.pingcap.com/tidb/stable/glossary#tso). -Before creating a changefeed, you need to export existing data from the source TiDB Cloud Dedicated cluster and load the data to the destination TiDB Cloud Serverless cluster. +Before creating a changefeed, you need to export existing data from the source TiDB Cloud Dedicated cluster and load the data to the destination {{{ .starter }}} or {{{ .essential }}} cluster. 1. Extend the [tidb_gc_life_time](https://docs.pingcap.com/tidb/stable/system-variables#tidb_gc_life_time-new-in-v50) to be longer than the total time of the following two operations, so that historical data during the time is not garbage collected by TiDB. @@ -43,7 +43,7 @@ Before creating a changefeed, you need to export existing data from the source T SET GLOBAL tidb_gc_life_time = '720h'; ``` -2. Use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data from your TiDB Cloud Dedicated cluster, then use [TiDB Cloud Serverless Import](/tidb-cloud/import-csv-files-serverless.md) to load data to the destination TiDB Cloud Serverless cluster. +2. Use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data from your TiDB Cloud Dedicated cluster, then use [the import feature](/tidb-cloud/import-csv-files-serverless.md) to load data to the destination {{{ .starter }}} or {{{ .essential }}} cluster. 3. From the [exported files of Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview#format-of-exported-files), get the start position of TiDB Cloud sink from the metadata file: @@ -59,13 +59,13 @@ Before creating a changefeed, you need to export existing data from the source T ## Create a TiDB Cloud sink -After completing the prerequisites, you can sink your data to the destination TiDB Cloud Serverless cluster. +After completing the prerequisites, you can sink your data to the destination {{{ .starter }}} or {{{ .essential }}} cluster. 1. Navigate to the cluster overview page of the target TiDB cluster, and then click **Data** > **Changefeed** in the left navigation pane. 2. Click **Create Changefeed**, and select **TiDB Cloud** as the destination. -3. In the **TiDB Cloud Connection** area, select the destination TiDB Cloud Serverless cluster, and then fill in the user name and password of the destination cluster. +3. In the **TiDB Cloud Connection** area, select the destination {{{ .starter }}} or {{{ .essential }}} cluster, and then fill in the user name and password of the destination cluster. 4. Click **Next** to establish the connection between the two TiDB clusters and test whether the changefeed can connect them successfully: @@ -74,6 +74,7 @@ After completing the prerequisites, you can sink your data to the destination Ti 5. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](/table-filter.md). + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules in the box on the right. You can add up to 100 filter rules. - **Tables with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. - **Tables without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. @@ -81,7 +82,13 @@ After completing the prerequisites, you can sink your data to the destination Ti 6. Customize **Event Filter** to filter the events that you want to replicate. - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. You can add up to 10 event filter rules per changefeed. - - **Ignored events**: you can set which types of events the event filter will exclude from the changefeed. + - **Event Filter**: you can use the following event filters to exclude specific events from the changefeed: + - **Ignore event**: excludes specified event types. + - **Ignore SQL**: excludes DDL events that match specified expressions. For example, `^drop` excludes statements starting with `DROP`, and `add column` excludes statements containing `ADD COLUMN`. + - **Ignore insert value expression**: excludes `INSERT` statements that meet specific conditions. For example, `id >= 100` excludes `INSERT` statements where `id` is greater than or equal to 100. + - **Ignore update new value expression**: excludes `UPDATE` statements where the new value matches a specified condition. For example, `gender = 'male'` excludes updates that result in `gender` being `male`. + - **Ignore update old value expression**: excludes `UPDATE` statements where the old value matches a specified condition. For example, `age < 18` excludes updates where the old value of `age` is less than 18. + - **Ignore delete value expression**: excludes `DELETE` statements that meet a specified condition. For example, `name = 'john'` excludes `DELETE` statements where `name` is `'john'`. 7. In the **Start Replication Position** area, fill in the TSO that you get from Dumpling exported metadata files. diff --git a/tidb-cloud/cli-reference.md b/tidb-cloud/cli-reference.md index d49899c553385..ee3d40436dd22 100644 --- a/tidb-cloud/cli-reference.md +++ b/tidb-cloud/cli-reference.md @@ -7,7 +7,7 @@ summary: Provides an overview of TiDB Cloud CLI. > **Note:** > -> TiDB Cloud CLI is in beta. +> Currently, TiDB Cloud CLI is in beta and not applicable to TiDB Cloud Dedicated clusters. TiDB Cloud CLI is a command line interface, which allows you to operate TiDB Cloud from your terminal with a few lines of commands. In the TiDB Cloud CLI, you can easily manage your TiDB Cloud clusters, import data to your clusters, and perform more operations. @@ -24,18 +24,17 @@ To use the `ticloud` CLI in your terminal, run `ticloud [command] [subcommand]`. | Command | Subcommand | Description | |-----------------------|-----------------------------------------------------------------------|------------------------------------------------| | auth | login, logout, whoami | Login and logout | -| serverless (alias: s) | create, delete, describe, list, update, spending-limit, region, shell | Manage TiDB Cloud Serverless clusters | -| serverless branch | create, delete, describe, list, shell | Manage TiDB Cloud Serverless branches | -| serverless import | cancel, describe, list, start | Manage TiDB Cloud Serverless import tasks | -| serverless export | create, describe, list, cancel, download | Manage TiDB Cloud Serverless export tasks | -| serverless sql-user | create, list, delete, update | Manage TiDB Cloud Serverless SQL users | -| serverless audit-log | config, describe, filter-rule (alias: filter), download | Manage TiDB Cloud Serverless database audit logging | -| ai | - | Chat with TiDB Bot | +| serverless (alias: s) | create, delete, describe, list, update, spending-limit, region, shell | Manage {{{ .starter }}} or {{{ .essential }}} cluster | +| serverless branch | create, delete, describe, list, shell | Manage branches for your {{{ .starter }}} or {{{ .essential }}} cluster | +| serverless import | cancel, describe, list, start | Manage import tasks for your {{{ .starter }}} or {{{ .essential }}} cluster | +| serverless export | create, describe, list, cancel, download | Manage export tasks for your {{{ .starter }}} or {{{ .essential }}} cluster | +| serverless sql-user | create, list, delete, update | Manage SQL users for your {{{ .starter }}} or {{{ .essential }}} cluster | +| serverless audit-log | config, describe, filter-rule (alias: filter), download | Manage database audit logging for your {{{ .starter }}} or {{{ .essential }}} cluster | | completion | bash, fish, powershell, zsh | Generate completion script for specified shell | | config | create, delete, describe, edit, list, set, use | Configure user profiles | | project | list | Manage projects | -| upgrade | - | Update the CLI to the latest version | -| help | auth, config, serverless, ai, project, upgrade, help, completion | View help for any command | +| upgrade | - | Update the CLI to the latest version | +| help | auth, config, serverless, project, upgrade, help, completion | View help for any command | ## Command modes diff --git a/tidb-cloud/serverless-external-storage.md b/tidb-cloud/configure-external-storage-access.md similarity index 72% rename from tidb-cloud/serverless-external-storage.md rename to tidb-cloud/configure-external-storage-access.md index 0be2ff4a95580..3796e472fbd11 100644 --- a/tidb-cloud/serverless-external-storage.md +++ b/tidb-cloud/configure-external-storage-access.md @@ -1,17 +1,28 @@ --- -title: Configure TiDB Cloud Serverless External Storage Access -summary: Learn how to configure Amazon Simple Storage Service (Amazon S3) access. +title: Configure External Storage Access +summary: Learn how to configure cross-account access to an external storage such as Amazon Simple Storage Service (Amazon S3). +aliases: ['/tidbcloud/serverless-external-storage'] --- -# Configure External Storage Access for TiDB Cloud Serverless +# Configure External Storage Access -If you want to import data from or export data to an external storage in a TiDB Cloud Serverless cluster, you need to configure cross-account access. This document describes how to configure access to an external storage for TiDB Cloud Serverless clusters. + + +If you want to import data from or export data to an external storage in a TiDB Cloud cluster, you need to configure cross-account access. This document describes how to configure access to an external storage for {{{ .starter }}} and {{{ .essential }}} clusters. + + + + + +If you want to import data from or export data to an external storage in a TiDB Cloud instance, you need to configure cross-account access. This document describes how to configure access to an external storage for {{{ .premium }}} instances. + + If you need to configure these external storages for a TiDB Cloud Dedicated cluster, see [Configure External Storage Access for TiDB Cloud Dedicated](/tidb-cloud/dedicated-external-storage.md). ## Configure Amazon S3 access -To allow a TiDB Cloud Serverless cluster to access the source data in your Amazon S3 bucket, configure the bucket access for the cluster using either of the following methods: +To allow a TiDB Cloud clusterinstance to access the source data in your Amazon S3 bucket, configure the bucket access for the clusterinstance using either of the following methods: - [Use a Role ARN](#configure-amazon-s3-access-using-a-role-arn): use a Role ARN to access your Amazon S3 bucket. - [Use an AWS access key](#configure-amazon-s3-access-using-an-aws-access-key): use the access key of an IAM user to access your Amazon S3 bucket. @@ -22,13 +33,13 @@ It is recommended that you use [AWS CloudFormation](https://docs.aws.amazon.com/ > **Note:** > -> Role ARN access to Amazon S3 is only supported for clusters with AWS as the cloud provider. If you use a different cloud provider, use an AWS access key instead. For more information, see [Configure Amazon S3 access using an AWS access key](#configure-amazon-s3-access-using-an-aws-access-key). +> Role ARN access to Amazon S3 is only supported for clustersinstances with AWS as the cloud provider. If you use a different cloud provider, use an AWS access key instead. For more information, see [Configure Amazon S3 access using an AWS access key](#configure-amazon-s3-access-using-an-aws-access-key). -1. Open the **Import** page for your target cluster. +1. Open the **Import** page for your target clusterinstance. - 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project.navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. - 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. + 2. Click the name of your target clusterinstance to go to its overview page, and then click **Data** > **Import** in the left navigation pane. 2. Open the **Add New ARN** dialog. @@ -40,7 +51,7 @@ It is recommended that you use [AWS CloudFormation](https://docs.aws.amazon.com/ - If you want to export data to Amazon S3, open the **Add New ARN** dialog as follows: - 1. Click **Export data to...** > **Amazon S3**. If your cluster has neither imported nor exported any data before, click **Click here to export data to...** > **Amazon S3** at the bottom of the page. + 1. Click **Export data to...** > **Amazon S3**. If your clusterinstance has neither imported nor exported any data before, click **Click here to export data to...** > **Amazon S3** at the bottom of the page. 2. Fill in the **Folder URI** field. 3. Choose **AWS Role ARN** and click **Click here to create new one with AWS CloudFormation**. @@ -56,7 +67,7 @@ It is recommended that you use [AWS CloudFormation](https://docs.aws.amazon.com/ 5. After the CloudFormation stack is executed, you can click the **Outputs** tab and find the Role ARN value in the **Value** column. - ![img.png](/media/tidb-cloud/serverless-external-storage/serverless-role-arn.png) + ![Role ARN](/media/tidb-cloud/serverless-external-storage/serverless-role-arn.png) If you have any trouble creating a role ARN with AWS CloudFormation, you can take the following steps to create one manually: @@ -79,10 +90,10 @@ If you have any trouble creating a role ARN with AWS CloudFormation, you can tak 4. On the **Create policy** page, click the **JSON** tab. - 5. Configure the policy in the policy text field according to your needs. The following is an example that you can use to export data from and import data into a TiDB Cloud Serverless cluster. + 5. Configure the policy in the policy text field according to your needs. The following is an example that you can use to export data from and import data into a TiDB Cloud clusterinstance. - - Exporting data from a TiDB Cloud Serverless cluster needs the **s3:PutObject** and **s3:ListBucket** permissions. - - Importing data into a TiDB Cloud Serverless cluster needs the **s3:GetObject**, **s3:GetObjectVersion**, and **s3:ListBucket** permissions. + - Exporting data from a TiDB Cloud clusterinstance needs the **s3:PutObject** and **s3:ListBucket** permissions. + - Importing data into a TiDB Cloud clusterinstance needs the **s3:GetObject**, **s3:GetObjectVersion**, and **s3:ListBucket** permissions. ```json { @@ -150,7 +161,7 @@ If you have any trouble creating a role ARN with AWS CloudFormation, you can tak - In **Trusted entity type**, select **AWS account**. - In **An AWS account**, select **Another AWS account**, and then paste the TiDB Cloud account ID to the **Account ID** field. - - In **Options**, click **Require external ID (Best practice when a third party will assume this role)**, and then paste the TiDB Cloud External ID to the **External ID** field. If the role is created without a Require external ID, once the configuration is done for one TiDB cluster in a project, all TiDB clusters in that project can use the same Role ARN to access your Amazon S3 bucket. If the role is created with the account ID and external ID, only the corresponding TiDB cluster can access the bucket. + - In **Options**, click **Require external ID (Best practice when a third party will assume this role)**, and then paste the TiDB Cloud External ID to the **External ID** field. If the role is created without a Require external ID, once the configuration is done for one TiDB cluster in a project, all TiDB clusters in that project can use the same Role ARN to access your Amazon S3 bucket. If the role is created with the account ID and external ID, only the corresponding TiDB cluster can access the bucket. 3. Click **Next** to open the policy list, choose the policy you just created, and then click **Next**. @@ -178,9 +189,11 @@ Take the following steps to configure an access key: > > TiDB Cloud does not store your access keys. It is recommended that you [delete the access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) after the import or export is complete. + + ## Configure GCS access -To allow a TiDB Cloud Serverless cluster to access your GCS bucket, you need to configure the GCS access for the bucket. You can use a service account key to configure the bucket access: +To allow a TiDB Cloud cluster to access your GCS bucket, you need to configure the GCS access for the bucket. You can use a service account key to configure the bucket access: Take the following steps to configure a service account key: @@ -191,8 +204,8 @@ Take the following steps to configure a service account key: 3. Click **CREATE AND CONTINUE** to create the service account. 4. In the `Grant this service account access to project`, choose the [IAM roles](https://cloud.google.com/iam/docs/understanding-roles) with the needed permission. - - Exporting data from a TiDB Cloud Serverless cluster needs a role with `storage.objects.create` permission. - - Importing data into a TiDB Cloud Serverless cluster needs a role with `storage.buckets.get`, `storage.objects.get`, and `storage.objects.list` permissions. + - Exporting data from a TiDB Cloud cluster needs a role with `storage.objects.create` permission. + - Importing data into a TiDB Cloud cluster needs a role with `storage.buckets.get`, `storage.objects.get`, and `storage.objects.list` permissions. 5. Click **Continue** to go to the next step. 6. Optional: In the `Grant users access to this service account`, choose members that need to [attach the service account to other resources](https://cloud.google.com/iam/docs/attach-service-accounts). @@ -204,13 +217,17 @@ Take the following steps to configure a service account key: ![service-account-key](/media/tidb-cloud/serverless-external-storage/gcs-service-account-key.png) -3. Choose the default `JSON` key type, and then click **CREATE** to download the Google Cloud credentials file. The file contains the service account key that you need to use when configuring the GCS access for the TiDB Cloud Serverless cluster. +3. Choose the default `JSON` key type, and then click **CREATE** to download the Google Cloud credentials file. The file contains the service account key that you need to use when configuring the GCS access for the TiDB Cloud cluster. + + + + ## Configure Azure Blob Storage access -To allow TiDB Cloud Serverless to access your Azure Blob container, you need to create a service SAS token for the container. +To allow TiDB Cloud to access your Azure Blob container, you need to create a service SAS token for the container. -You can create a SAS token either using an [Azure ARM template](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) (recommended) or manual configuration. +You can create a SAS token either using an [Azure ARM template](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) (recommended) or manual configuration. To create a SAS token using an Azure ARM template, take the following steps: @@ -221,15 +238,15 @@ To create a SAS token using an Azure ARM template, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. 2. Open the **Generate New SAS Token via ARM Template Deployment** dialog. - + 1. Click **Export data to...** > **Azure Blob Storage**. If your cluster has neither imported nor exported any data before, click **Click here to export data to...** > **Azure Blob Storage** at the bottom of the page. - - 2. Scroll down to the **Azure Blob Storage Settings** area, and then click **Click here to create a new one with Azure ARM template** under the SAS Token field. - + + 2. Scroll down to the **Azure Blob Storage Settings** area, and then click **Click here to create a new one with Azure ARM template** under the SAS Token field. + 3. Create a SAS token with the Azure ARM template. 1. In the **Generate New SAS Token via ARM Template Deployment** dialog, click **Click to open the Azure Portal with the pre-configured ARM template**. - + 2. After logging in to Azure, you will be redirected to the Azure **Custom deployment** page. 3. Fill in the **Resource group** and **Storage Account Name** in the **Custom deployment** page. You can get all the information from the storage account overview page where the container is located. @@ -237,7 +254,7 @@ To create a SAS token using an Azure ARM template, take the following steps: ![azure-storage-account-overview](/media/tidb-cloud/serverless-external-storage/azure-storage-account-overview.png) 4. Click **Review + create** or **Next** to review the deployment. Click **Create** to start the deployment. - + 5. After it completes, you will be redirected to the deployment overview page. Navigate to the **Outputs** section to get the SAS token. If you have any trouble creating a SAS token with the Azure ARM template, take the following steps to create one manually: @@ -246,7 +263,7 @@ If you have any trouble creating a SAS token with the Azure ARM template, take t Click here to see details 1. On the [Azure Storage account](https://portal.azure.com/#browse/Microsoft.Storage%2FStorageAccounts) page, click your storage account to which the container belongs. - + 2. On your **Storage account** page, click the **Security+network**, and then click **Shared access signature**. ![sas-position](/media/tidb-cloud/serverless-external-storage/azure-sas-position.png) @@ -257,8 +274,8 @@ If you have any trouble creating a SAS token with the Azure ARM template, take t 2. In the **Allowed Resource types** section, choose **Container** and **Object**. 3. In the **Allowed permissions** section, choose the permission as needed. - - Exporting data from a TiDB Cloud Serverless cluster needs the **Read** and **Write** permissions. - - Importing data into a TiDB Cloud Serverless cluster needs the **Read** and **List** permissions. + - Exporting data from a TiDB Cloud cluster needs the **Read** and **Write** permissions. + - Importing data into a TiDB Cloud cluster needs the **Read** and **List** permissions. 4. Adjust **Start and expiry date/time** as needed. 5. You can keep the default values for other settings. @@ -269,26 +286,28 @@ If you have any trouble creating a SAS token with the Azure ARM template, take t + + ## Configure Alibaba Cloud Object Storage Service (OSS) access -To allow TiDB Cloud Serverless to access your Alibaba Cloud OSS bucket, you need to create an AccessKey pair for the bucket. +To allow TiDB Cloud to access your Alibaba Cloud OSS bucket, you need to create an AccessKey pair for the bucket. Take the following steps to configure an AccessKey pair: 1. Create a RAM user and get the AccessKey pair. For more information, see [Create a RAM user](https://www.alibabacloud.com/help/en/ram/user-guide/create-a-ram-user). - + In the **Access Mode** section, select **Using permanent AccessKey to access**. 2. Create a custom policy with the required permissions. For more information, see [Create custom policies](https://www.alibabacloud.com/help/en/ram/user-guide/create-a-custom-policy). - + - In the **Effect** section, select **Allow**. - In the **Service** section, select **Object Storage Service**. - In the **Action** section, select the permissions as needed. - - To import data into a TiDB Cloud Serverless cluster, grant **oss:GetObject**, **oss:GetBucketInfo**, and **oss:ListObjects** permissions. - To export data from a TiDB Cloud Serverless cluster, grant **oss:PutObject**, **oss:GetBucketInfo**, and **oss:ListBuckets** permissions. - + To import data into a TiDB Cloud clusterinstance, grant **oss:GetObject**, **oss:GetBucketInfo**, and **oss:ListObjects** permissions. + + To export data from a TiDB Cloud clusterinstance, grant `oss:PutObject` and `oss:GetBucketInfo` permissions. + - In the **Resource** section, select the bucket and the objects in the bucket. 3. Attach the custom policies to the RAM user. For more information, see [Grant permissions to a RAM user](https://www.alibabacloud.com/help/en/ram/user-guide/grant-permissions-to-the-ram-user). diff --git a/tidb-cloud/configure-ip-access-list.md b/tidb-cloud/configure-ip-access-list.md index ec4e9bdc929fc..e9ebd2256107f 100644 --- a/tidb-cloud/configure-ip-access-list.md +++ b/tidb-cloud/configure-ip-access-list.md @@ -9,7 +9,7 @@ For each TiDB Cloud Dedicated cluster in TiDB Cloud, you can configure an IP acc > **Note:** > -> This document applies to [**TiDB Cloud Dedicated**](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For instructions on configuring an IP access list for **TiDB Cloud Serverless**, see [Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). +> This document applies to [**TiDB Cloud Dedicated**](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). For instructions on configuring an IP access list for **{{{ .starter }}}** or **{{{ .essential }}}**, see [Configure {{{ .starter }}} or Essential Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). To configure an IP access list for your TiDB Cloud Dedicated cluster, take the following steps: diff --git a/tidb-cloud/configure-maintenance-window.md b/tidb-cloud/configure-maintenance-window.md index b8275c35712a4..a1f0ecc11ea48 100644 --- a/tidb-cloud/configure-maintenance-window.md +++ b/tidb-cloud/configure-maintenance-window.md @@ -49,7 +49,7 @@ For every maintenance window, TiDB Cloud sends four email notifications to all p ## View and configure maintenance windows -Regular maintenance ensures that essential updates are performed to safeguard TiDB Cloud from security threats, performance issues, and unreliability. Therefore, the maintenance window is enabled by default and cannot be disabled. +Regular maintenance ensures that important updates are performed to safeguard TiDB Cloud from security threats, performance issues, and unreliability. Therefore, the maintenance window is enabled by default and cannot be disabled. > **Note:** > @@ -70,7 +70,7 @@ You can modify the start time to your preferred time or reschedule maintenance t 5. To reschedule a specific maintenance task, click **...** > **Reschedule** in the **Action** column, and choose a new time before the deadline. - If you need to reschedule the maintenance task beyond the deadline, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md#tidb-cloud-support) for assistance. + If you need to reschedule the maintenance task beyond the deadline, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for assistance. ## FAQs diff --git a/tidb-cloud/configure-security-settings.md b/tidb-cloud/configure-security-settings.md index 95b0ff69a50d6..3ccb82e4a5aa5 100644 --- a/tidb-cloud/configure-security-settings.md +++ b/tidb-cloud/configure-security-settings.md @@ -9,7 +9,7 @@ For TiDB Cloud Dedicated clusters, you can configure the root password and allow > **Note:** > -> For TiDB Cloud Serverless clusters, this document is inapplicable and you can refer to [TLS Connection to TiDB Cloud Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md) instead. +> For {{{ .starter }}} or {{{ .essential }}} clusters, this document is inapplicable and you can refer to [TLS Connection to {{{ .starter }}} or Essential](/tidb-cloud/secure-connections-to-serverless-clusters.md) instead. 1. In the TiDB Cloud console, navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. diff --git a/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md b/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md index 68fc79da05cbd..2c899def81f67 100644 --- a/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md +++ b/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md @@ -1,23 +1,23 @@ --- -title: Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints -summary: Learn how to configure and manage firewall rules with public access to your TiDB Cloud Serverless cluster securely. +title: Configure {{{ .starter }}} or Essential Firewall Rules for Public Endpoints +summary: Learn how to configure and manage firewall rules with public access to your {{{ .starter }}} or {{{ .essential }}} cluster securely. --- -# Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints +# Configure {{{ .starter }}} or Essential Firewall Rules for Public Endpoints -This document describes the public connectivity option for TiDB Cloud Serverless. You will learn key concepts for securely managing a TiDB Cloud Serverless cluster accessible via the internet. +This document describes the public connectivity option for {{{ .starter }}} and {{{ .essential }}} clusters. You will learn key concepts for securely managing a cluster accessible via the internet. > **Note:** > -> This document applies to **TiDB Cloud Serverless**. For instructions on configuring an IP access list for **TiDB Cloud Dedicated**, see [Configure an IP Access List for TiDB Cloud Dedicated](/tidb-cloud/configure-ip-access-list.md). +> This document applies to **{{{ .starter }}}** and **{{{ .essential }}}**. For instructions on configuring an IP access list for **TiDB Cloud Dedicated**, see [Configure an IP Access List for TiDB Cloud Dedicated](/tidb-cloud/configure-ip-access-list.md). ## Public endpoints -Configuring public access on your TiDB Cloud Serverless cluster allows the cluster access through a public endpoint. That is, the cluster is accessible through the internet. The public endpoint is a publicly resolvable DNS address. The term "authorized network" refers to a range of IP addresses you choose to permit access to your cluster. These permissions are enforced through **firewall rules**. +Configuring public access on your cluster allows the cluster access through a public endpoint. That is, the cluster is accessible through the internet. The public endpoint is a publicly resolvable DNS address. The term "authorized network" refers to a range of IP addresses you choose to permit access to your cluster. These permissions are enforced through **firewall rules**. ### Characteristics of public access -- Only specified IP addresses can access TiDB Cloud Serverless. +- Only specified IP addresses can access your cluster. - By default, all IP addresses (`0.0.0.0 - 255.255.255.255`) are allowed. - You can update allowed IP addresses after cluster creation. - Your cluster has a publicly resolvable DNS name. @@ -31,15 +31,15 @@ You can create a maximum of 200 IP firewall rules. ### Allow AWS access -You can enable access from **all AWS IP addresses** by referring to the official [AWS IP address list](https://docs.aws.amazon.com/vpc/latest/userguide/aws-ip-ranges.html). +If your {{{ .starter }}} cluster is hosted on AWS, you can enable access from **all AWS IP addresses** by referring to the official [AWS IP address list](https://docs.aws.amazon.com/vpc/latest/userguide/aws-ip-ranges.html). TiDB Cloud regularly updates this list and uses the reserved IP address **169.254.65.87** to represent all AWS IP addresses. ## Create and manage a firewall rule -This section describes how to manage firewall rules for a TiDB Cloud Serverless cluster. With a public endpoint, the connections to the TiDB Cloud Serverless cluster are restricted to the IP addresses specified in the firewall rules. +This section describes how to manage firewall rules for a {{{ .starter }}} or {{{ .essential }}} cluster. With a public endpoint, the connections to your cluster are restricted to the IP addresses specified in the firewall rules. -To add firewall rules to a TiDB Cloud Serverless cluster, take the following steps: +To add firewall rules to a {{{ .starter }}} or {{{ .essential }}} cluster, take the following steps: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. @@ -51,8 +51,8 @@ To add firewall rules to a TiDB Cloud Serverless cluster, take the following ste > > In some situations, the IP address observed by the TiDB Cloud console differs from the IP address used when accessing the internet. Therefore, you might need to change the start and end IP addresses to make the rule function as expected. You can use a search engine or other online tool to check your own IP address. For example, search for "what is my IP." -4. Click **Add rule** to add more address ranges. In the displayed window, you can specify a single IP address or a range of IP addresses. If you want to limit the rule to a single IP address, type the same IP address in the **Start IP Address** and **End IP Address** fields. Opening the firewall enables administrators, users, and applications to access any database on your TiDB Cloud Serverless cluster to which they have valid credentials. Click **Submit** to add the firewall rule. +4. Click **Add rule** to add more address ranges. In the displayed window, you can specify a single IP address or a range of IP addresses. If you want to limit the rule to a single IP address, type the same IP address in the **Start IP Address** and **End IP Address** fields. Opening the firewall enables administrators, users, and applications to access any database on your cluster to which they have valid credentials. Click **Submit** to add the firewall rule. ## What's next -- [Connect to TiDB Cloud Serverless via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) \ No newline at end of file +- [Connect to {{{ .starter }}} or Essential via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) \ No newline at end of file diff --git a/tidb-cloud/configure-sql-users.md b/tidb-cloud/configure-sql-users.md new file mode 100644 index 0000000000000..db6352b2396bd --- /dev/null +++ b/tidb-cloud/configure-sql-users.md @@ -0,0 +1,102 @@ +--- +title: Manage Database Users and Roles +summary: Learn how to manage database users and roles in the TiDB Cloud console. +--- + +# Manage Database Users and Roles + +This document describes how to manage database users and roles using the **SQL Users** page in the [TiDB Cloud console](https://tidbcloud.com/). + +> **Note:** +> +> - The **SQL Users** page is in beta and is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for the SQL Users page" in the **Description** field, and then click **Submit**. +> - Database users and roles are independent of [organization and project users and roles](/tidb-cloud/manage-user-access.md). Database users are used to access databases in a TiDB cluster, while organization and project users are used to access organizations and projects in the [TiDB Cloud console](https://tidbcloud.com/). +> - In addition to the **SQL Users** page, you can also manage database users and roles by connecting to your cluster with a SQL client and writing SQL statements. For more information, see [TiDB User Account Management](https://docs.pingcap.com/tidb/dev/user-account-management). + +## Roles of database users + +In TiDB Cloud, you can grant both a built-in role and multiple custom roles (if available) to a SQL user for role-based access control. + +- Built-in roles + + TiDB Cloud provides the following built-in roles to help you control the database access of SQL users. You can grant one of the built-in roles to a SQL user. + + - `Database Admin` + - `Database Read-Write` + - `Database Read-Only` + +- Custom roles + + In addition to a built-in role, if your cluster has custom roles that are created using the [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) statement, you can also grant these custom roles to a SQL user when you create or edit SQL users in the TiDB Cloud console. + +After a SQL user is granted both a built-in role and multiple custom roles, the user's permissions will be the union of all the permissions derived from these roles. + +## Prerequisites + +- To manage database users and roles using the **SQL Users** page, you must be in the `Organization Owner` role of your organization or the `Project Owner` role of your project. +- If you are in the `Project Data Access Read-Write` or `Project Data Access Read-Only` role of a project, you can only view database users on the **SQL Users** page of that project. + +## View SQL users + +To view SQL users of a cluster, take the following steps: + +1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, click the name of your target cluster to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. In the left navigation pane, click **Settings** > **SQL Users**. + +## Create a SQL user + +To create a SQL user for a cluster, take the following steps: + +1. Navigate to the [**SQL Users**](/tidb-cloud/configure-sql-users.md#view-sql-users) page of your cluster. + +2. Click **Create SQL User** in the upper-right corner. + + A dialog for the SQL user configuration is displayed. + +3. In the dialog, provide the information of the SQL user as follows: + + 1. Enter the name of the SQL user. + 2. Either create a password for the SQL user or let TiDB Cloud automatically generate a password for the user. + 3. Grant roles to the SQL user. + + - **Built-in Role**: you need to select a built-in role for the SQL user in the **Built-in Role** drop-down list. + + - **Custom Role**: if your cluster has custom roles that are created using the [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) statement, you can grant custom roles to the SQL user by selecting the roles from the **Custom Role** drop-down list. Otherwise, the **Custom Roles** drop-down list is invisible here. + + For each SQL user, you can grant a built-in role and multiple custom roles (if any). + +4. Click **Create**. + +## Edit a SQL user + +To edit the password or roles of a SQL user, take the following steps: + +1. Navigate to the [**SQL Users**](/tidb-cloud/configure-sql-users.md#view-sql-users) page of your cluster. + +2. In the row of the SQL user to be edited, click **...** in the **Action** column, and then click **Edit**. + + A dialog for the SQL user configuration is displayed. + +3. In the dialog, you can edit the user password and roles as needed, and then click **Update**. + + > **Note:** + > + > The roles of the default `.root` user do not support modification. You can only change the password. + +## Delete a SQL user + +To delete a SQL user, take the following steps: + +1. Navigate to the [**SQL Users**](/tidb-cloud/configure-sql-users.md#view-sql-users) page of your cluster. +2. In the row of the SQL user to be deleted, click **...** in the **Action** column, and then click **Delete**. + + > **Note:** + > + > The default `.root` user does not support deletion. + +3. Confirm the deletion. diff --git a/tidb-cloud/connect-to-tidb-cluster-serverless.md b/tidb-cloud/connect-to-tidb-cluster-serverless.md index 604786f4776c1..9dd93545e9e71 100644 --- a/tidb-cloud/connect-to-tidb-cluster-serverless.md +++ b/tidb-cloud/connect-to-tidb-cluster-serverless.md @@ -1,31 +1,32 @@ --- -title: Connect to Your TiDB Cloud Serverless Cluster -summary: Learn how to connect to your TiDB Cloud Serverless cluster via different methods. +title: Connect to Your {{{ .starter }}} or Essential Cluster +summary: Learn how to connect to your {{{ .starter }}} or {{{ .essential }}} cluster via different methods. --- -# Connect to Your TiDB Cloud Serverless Cluster +# Connect to Your {{{ .starter }}} or Essential Cluster -This document describes how to connect to your TiDB Cloud Serverless cluster. +This document describes how to connect to your {{{ .starter }}} or {{{ .essential }}} cluster. > **Tip:** > -> To learn how to connect to a TiDB Cloud Dedicated cluster, see [Connect to Your TiDB Cloud Dedicated Cluster](/tidb-cloud/connect-to-tidb-cluster.md). +> - To learn how to connect to a TiDB Cloud Dedicated cluster, see [Connect to Your TiDB Cloud Dedicated Cluster](/tidb-cloud/connect-to-tidb-cluster.md). +> - This document focuses on the network connection methods for {{{ .starter }}} and {{{ .essential }}}. To connect to TiDB via a specific tool, driver, or ORM, see [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md). -## Connection methods +## Network connection methods -After your TiDB Cloud Serverless cluster is created on TiDB Cloud, you can connect to it via one of the following methods: +After your {{{ .starter }}} or {{{ .essential }}} cluster is created on TiDB Cloud, you can connect to it via one of the following methods: - Direct connections - Direct connections mean the MySQL native connection system over TCP. You can connect to your TiDB Cloud Serverless cluster using any tool that supports MySQL connection, such as [MySQL client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html). + Direct connections mean the MySQL native connection system over TCP. You can connect to your cluster using any tool that supports MySQL connection, such as [MySQL client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html). - [Data Service (beta)](/tidb-cloud/data-service-overview.md) - TiDB Cloud provides a Data Service feature that enables you to connect to your TiDB Cloud Serverless cluster via an HTTPS request using a custom API endpoint. Unlike direct connections, Data Service accesses TiDB Cloud Serverless data via a RESTful API rather than raw SQL. + TiDB Cloud provides a Data Service feature that enables you to connect to your {{{ .starter }}} cluster hosted on AWS via an HTTPS request using a custom API endpoint. Unlike direct connections, Data Service accesses your cluster data via a RESTful API rather than raw SQL. -- [Serverless Driver (beta)](/tidb-cloud/serverless-driver.md) +- [Serverless Driver (beta)](/develop/serverless-driver.md) - TiDB Cloud provides a serverless driver for JavaScript, which allows you to connect to your TiDB Cloud Serverless cluster in edge environments with the same experience as direct connections. + TiDB Cloud provides a serverless driver for JavaScript, which allows you to connect to your {{{ .starter }}} or {{{ .essential }}} cluster in edge environments with the same experience as direct connections. In the preceding connection methods, you can choose your desired one based on your needs: @@ -37,7 +38,7 @@ In the preceding connection methods, you can choose your desired one based on yo ## Network -There are two network connection types for TiDB Cloud Serverless: +There are two network connection types for {{{ .starter }}} and {{{ .essential }}}: - [Private endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) (recommended) @@ -47,14 +48,14 @@ There are two network connection types for TiDB Cloud Serverless: The standard connection exposes a public endpoint, so you can connect to your TiDB cluster via a SQL client from your laptop. - TiDB Cloud Serverless requires [TLS connections](/tidb-cloud/secure-connections-to-serverless-clusters.md), which ensures the security of data transmission from your applications to TiDB clusters. + {{{ .starter }}} and {{{ .essential }}} require [TLS connections](/tidb-cloud/secure-connections-to-serverless-clusters.md), which ensures the security of data transmission from your applications to TiDB clusters. The following table shows the network you can use in different connection methods: | Connection method | Network | Description | |----------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------| | Direct connections | Public or private endpoint | Direct connections can be made via both public and private endpoints. | -| Data Service (beta) | / | Accessing TiDB Cloud Serverless via Data Service (beta) does not need to specify the network type. | +| Data Service (beta) | / | Accessing {{{ .starter }}} hosted on AWS via Data Service (beta) does not need to specify the network type. | | Serverless Driver (beta) | Public endpoint | Serverless Driver only supports connections via public endpoint. | ## What's next diff --git a/tidb-cloud/connect-to-tidb-cluster.md b/tidb-cloud/connect-to-tidb-cluster.md index 10cd319dfbe3d..92b06eb3e36c5 100644 --- a/tidb-cloud/connect-to-tidb-cluster.md +++ b/tidb-cloud/connect-to-tidb-cluster.md @@ -9,9 +9,10 @@ This document introduces the methods to connect to your TiDB Cloud Dedicated clu > **Tip:** > -> To learn how to connect to a TiDB Cloud Serverless cluster, see [Connect to Your TiDB Cloud Serverless Cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster, see [Connect to Your {{{ .starter }}} or Essential Cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). +> - This document focuses on the network connection methods for TiDB Cloud Dedicated. To connect to TiDB via a specific tool, driver, or ORM, see [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md). -After your TiDB Cloud Dedicated cluster is created on TiDB Cloud, you can connect to it via one of the following methods: +After your TiDB Cloud Dedicated cluster is created on TiDB Cloud, you can connect to it via one of the following network connection methods: - Direct connections diff --git a/tidb-cloud/connect-via-sql-shell.md b/tidb-cloud/connect-via-sql-shell.md index 5e34649839927..80427ab1e3a0f 100644 --- a/tidb-cloud/connect-via-sql-shell.md +++ b/tidb-cloud/connect-via-sql-shell.md @@ -9,7 +9,7 @@ In TiDB Cloud SQL Shell, you can try TiDB SQL, test out TiDB's compatibility wit > **Note:** > -> You cannot connect to [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) using SQL Shell. To connect to your TiDB Cloud Serverless cluster, see [Connect to TiDB Cloud Serverless clusters](/tidb-cloud/connect-to-tidb-cluster-serverless.md). +> You cannot connect to [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) or [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) using SQL Shell. To connect to your {{{ .starter }}} or {{{ .essential }}} cluster, see [Connect to {{{ .starter }}} or Essential Cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). To connect to your TiDB cluster using SQL shell, perform the following steps: diff --git a/tidb-cloud/connect-via-standard-connection-serverless.md b/tidb-cloud/connect-via-standard-connection-serverless.md index 540744211502c..7732dd82e4d8f 100644 --- a/tidb-cloud/connect-via-standard-connection-serverless.md +++ b/tidb-cloud/connect-via-standard-connection-serverless.md @@ -1,11 +1,11 @@ --- -title: Connect to TiDB Cloud Serverless via Public Endpoint -summary: Learn how to connect to your TiDB Cloud Serverless cluster via public endpoint. +title: Connect to {{{ .starter }}} or Essential via Public Endpoint +summary: Learn how to connect to your {{{ .starter }}} or {{{ .essential }}} cluster via public endpoint. --- -# Connect to TiDB Cloud Serverless via Public Endpoint +# Connect to {{{ .starter }}} or Essential via Public Endpoint -This document describes how to connect to your TiDB Cloud Serverless cluster via a public endpoint, using a SQL client from your computer, as well as how to disable a public endpoint. +This document describes how to connect to your {{{ .starter }}} or {{{ .essential }}} cluster via a public endpoint, using a SQL client from your computer, as well as how to disable a public endpoint. ## Connect via a public endpoint @@ -13,7 +13,7 @@ This document describes how to connect to your TiDB Cloud Serverless cluster via > > To learn how to connect to a TiDB Cloud Dedicated cluster via public endpoint, see [Connect to TiDB Cloud Dedicated via Public Connection](/tidb-cloud/connect-via-standard-connection.md). -To connect to a TiDB Cloud Serverless cluster via public endpoint, take the following steps: +To connect to a {{{ .starter }}} or {{{ .essential }}} cluster via public endpoint, take the following steps: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. @@ -21,12 +21,28 @@ To connect to a TiDB Cloud Serverless cluster via public endpoint, take the foll 3. In the dialog, keep the default setting of the connection type as `Public`, and select your preferred connection method and operating system to get the corresponding connection string. + + + > **Note:** + > + > - Keeping the connection type as `Public` means the connection is via standard TLS connection. For more information, see [TLS Connection to {{{ .starter }}} or Essential](/tidb-cloud/secure-connections-to-serverless-clusters.md). + > - If you choose **Private Endpoint** in the **Connection Type** drop-down list, it means that the connection is via private endpoint. For more information, see the following documents: + > + > - [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + > - [Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + + + + + > **Note:** > - > - Keeping the connection type as `Public` means the connection is via standard TLS connection. For more information, see [TLS Connection to TiDB Cloud Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md). - > - If you choose **Private Endpoint** in the **Connection Type** drop-down list, it means that the connection is via private endpoint. For more information, see [Connect to TiDB Cloud Serverless via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + > - Keeping the connection type as `Public` means the connection is via standard TLS connection. For more information, see [TLS Connection to {{{ .starter }}} or Essential](/tidb-cloud/secure-connections-to-serverless-clusters.md). + > - If you choose **Private Endpoint** in the **Connection Type** drop-down list, it means that the connection is via private endpoint. For more information, see [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + + -4. TiDB Cloud Serverless lets you create [branches](/tidb-cloud/branch-overview.md) for your cluster. After a branch is created, you can choose to connect to the branch via the **Branch** drop-down list. `main` represents the cluster itself. +4. TiDB Cloud lets you create [branches](/tidb-cloud/branch-overview.md) for your {{{ .starter }}} or {{{ .essential }}} cluster. After a branch is created, you can choose to connect to the branch via the **Branch** drop-down list. `main` represents the cluster itself. 5. If you have not set a password yet, click **Generate Password** to generate a random password. The generated password will not show again, so save your password in a secure location. @@ -34,12 +50,12 @@ To connect to a TiDB Cloud Serverless cluster via public endpoint, take the foll > **Note:** > - > When you connect to a TiDB Cloud Serverless cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). - > Your client IP must be in the allowed IP rules of the public endpoint of your cluster. For more information, see [Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). + > When you connect to a {{{ .starter }}} or {{{ .essential }}} cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). + > Your client IP must be in the allowed IP rules of the public endpoint of your cluster. For more information, see [Configure {{{ .starter }}} or Essential Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). ## Disable a public endpoint -If you do not need to use a public endpoint of a TiDB Cloud Serverless cluster, you can disable it to prevent connections from the internet: +If you do not need to use a public endpoint of a {{{ .starter }}} or {{{ .essential }}} cluster, you can disable it to prevent connections from the internet: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. diff --git a/tidb-cloud/connect-via-standard-connection.md b/tidb-cloud/connect-via-standard-connection.md index e19cecc7260d1..63e190a8c0261 100644 --- a/tidb-cloud/connect-via-standard-connection.md +++ b/tidb-cloud/connect-via-standard-connection.md @@ -9,7 +9,7 @@ This document describes how to connect to your TiDB Cloud Dedicated cluster via > **Tip:** > -> To learn how to connect to a TiDB Cloud Serverless cluster via public connection, see [Connect to TiDB Cloud Serverless via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md). +> To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via public connection, see [Connect to {{{ .starter }}} or Essential via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md). ## Prerequisite: Configure IP access list diff --git a/tidb-cloud/connected-ai-chat-in-im.md b/tidb-cloud/connected-ai-chat-in-im.md index 1c2eb3e30d283..210a0c44f0c1d 100644 --- a/tidb-cloud/connected-ai-chat-in-im.md +++ b/tidb-cloud/connected-ai-chat-in-im.md @@ -5,7 +5,7 @@ summary: Introduces detailed information about the AI chat in Instant Message (I # Connected: AI chat in IM -The AI chat in Instant Message (IM) powered by PingCAP, is an offering to invite TiDB AI assistant chatbot to specific IM channels for preliminary technical support and consultation. This service is based on Graph RAG (Retrieval-Augmented Generation) built on top of [TiDB Vector Search](/vector-search/vector-search-overview.md). +The AI chat in Instant Message (IM) powered by PingCAP, is an offering to invite TiDB AI assistant chatbot to specific IM channels for preliminary technical support and consultation. This service is based on Graph RAG (Retrieval-Augmented Generation) built on top of [TiDB Vector Search](/ai/concepts/vector-search-overview.md). ## Limitation diff --git a/tidb-cloud/connected-care-detail.md b/tidb-cloud/connected-care-detail.md index e037efa18774a..dedc759a424df 100644 --- a/tidb-cloud/connected-care-detail.md +++ b/tidb-cloud/connected-care-detail.md @@ -107,22 +107,22 @@ This document provides detailed information about the [Connected Care](/tidb-clo
- - - - + + + + - - - - + + + + - - - - + + + + diff --git a/tidb-cloud/connected-care-overview.md b/tidb-cloud/connected-care-overview.md index 65ea9988b206e..85cf367ff1c61 100644 --- a/tidb-cloud/connected-care-overview.md +++ b/tidb-cloud/connected-care-overview.md @@ -67,11 +67,11 @@ The support plans in the Connected Care services introduce a completely new set With these new features, the Connected Care services offer you better connectivity, more personalized support, and cost-effective solutions for different customer needs. -- New **Enterprise** and **Premium** plans: connect customers with modern communication tools and advanced AI capabilities through advanced monitoring service in Clinic, IM subscription for TiDB Cloud alerts, IM subscription for ticket updates, AI chat in IM, and IM interaction for support tickets. +- New **Enterprise** and **Premium** plans: provide modern communication tools and advanced AI capabilities through advanced monitoring service in Clinic, IM subscription for TiDB Cloud alerts, IM subscription for ticket updates, AI chat in IM, and IM interaction for support tickets. -- New **Developer** plan: customers benefit from access to the same community and [TiDB.AI](https://tidb.ai/) assistance as the **Basic** plan, while also enjoying direct connections with unlimited access to technical support. +- New **Developer** plan: provides access to the same community channels ([Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap) and [Discord](https://discord.com/invite/KVRZBR2DrG)) and [TiDB.AI](https://tidb.ai/) assistance as the **Basic** plan, along with direct connections and unlimited access to technical support. -- New **Basic** plan: customers will be guided to join the active community channels, where they can engage with other community members and interact with [TiDB.AI](https://tidb.ai/) for technical assistance. +- New **Basic** plan: you can join community channels ([Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap) and [Discord](https://discord.com/invite/KVRZBR2DrG)) to engage with other community members and use [TiDB.AI](https://tidb.ai/) for technical assistance. ## Transition to Connected Care diff --git a/tidb-cloud/create-tidb-cluster-serverless.md b/tidb-cloud/create-tidb-cluster-serverless.md index 20eb8ad07c946..a5ebf4a10d925 100644 --- a/tidb-cloud/create-tidb-cluster-serverless.md +++ b/tidb-cloud/create-tidb-cluster-serverless.md @@ -1,11 +1,11 @@ --- -title: Create a TiDB Cloud Serverless Cluster -summary: Learn how to create your TiDB Cloud Serverless cluster. +title: Create a {{{ .starter }}} or Essential Cluster +summary: Learn how to create a {{{ .starter }}} or {{{ .essential }}} cluster. --- -# Create a TiDB Cloud Serverless Cluster +# Create a {{{ .starter }}} or Essential Cluster -This document describes how to create a TiDB Cloud Serverless cluster in the [TiDB Cloud console](https://tidbcloud.com/). +This document describes how to create a {{{ .starter }}} or {{{ .essential }}} cluster in the [TiDB Cloud console](https://tidbcloud.com/). > **Tip:** > @@ -15,30 +15,54 @@ This document describes how to create a TiDB Cloud Serverless cluster in the [Ti If you do not have a TiDB Cloud account, click [here](https://tidbcloud.com/signup) to sign up for an account. + + +- You can either sign up with email and password so that you can manage your password using TiDB Cloud, or sign up with your Google, GitHub, or Microsoft account. +- For AWS Marketplace users, you can also sign up through AWS Marketplace. To do that, search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Azure Marketplace users, you can also sign up through Azure Marketplace. To do that, search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Google Cloud Marketplace users, you can also sign up through Google Cloud Marketplace. To do that, search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Alibaba Cloud Marketplace users, you can also sign up through Alibaba Cloud Marketplace. To do that, search for `TiDB Cloud` in [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + + + + + - You can either sign up with email and password so that you can manage your password using TiDB Cloud, or sign up with your Google, GitHub, or Microsoft account. - For AWS Marketplace users, you can also sign up through AWS Marketplace. To do that, search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. - For Azure Marketplace users, you can also sign up through Azure Marketplace. To do that, search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. - For Google Cloud Marketplace users, you can also sign up through Google Cloud Marketplace. To do that, search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + + ## Steps -If you are in the `Organization Owner` or the `Project Owner` role, you can create a TiDB Cloud Serverless cluster as follows: +If you are in the `Organization Owner` or the `Project Owner` role, you can create a {{{ .starter }}} or {{{ .essential }}} cluster as follows: 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/), and then navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page. 2. Click **Create Cluster**. -3. On the **Create Cluster** page, **Serverless** is selected by default. +3. Select a cluster plan. + + You can start with a **Starter** cluster and later upgrade to an **Essential** cluster as your needs grow. For more information, see [cluster plans](/tidb-cloud/select-cluster-tier.md). -4. The cloud provider of TiDB Cloud Serverless is AWS. You can select an AWS region where you want to host your cluster. +4. Choose a cloud provider and a region where you want to host your cluster. 5. Update the default cluster name if necessary. -6. Select a cluster plan. TiDB Cloud Serverless provides two [cluster plans](/tidb-cloud/select-cluster-tier.md#cluster-plans): **Free Cluster** and **Scalable Cluster**. You can start with a free cluster and later upgrade to a scalable cluster as your needs grow. To create a scalable cluster, you need to specify a **Monthly Spending Limit** and add a credit card. +6. Update the capacity of the cluster. + + - **Starter** plan: + + - You can update the spending limit for your cluster. If the spending limit is set to 0, the cluster remains free. If the spending limit is greater than 0, you need to add a credit card before creating the cluster. + + - By default, each organization can create up to five [free Starter clusters](/tidb-cloud/select-cluster-tier.md#starter). To create additional Starter clusters, you must add a credit card and specify a spending limit. + + - **Essential** plan: + + - You must specify both a minimum and maximum number of Request Capacity Units (RCUs) for your cluster. - > **Note:** - > - > For each organization in TiDB Cloud, you can create a maximum of five [free clusters](/tidb-cloud/select-cluster-tier.md#free-cluster-plan) by default. To create more TiDB Cloud Serverless clusters, you need to add a credit card and create [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan) for the usage. + - RCUs represent the compute resources provisioned for your workload. TiDB Cloud automatically scales your cluster within this range based on demand. 7. Click **Create**. @@ -46,7 +70,7 @@ If you are in the `Organization Owner` or the `Project Owner` role, you can crea ## What's next -After your cluster is created, follow the instructions in [Connect to TiDB Cloud Serverless via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) to create a password for your cluster. +After your cluster is created, follow the instructions in [Connect to TiDB Cloud via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) to create a password for your cluster. > **Note:** > diff --git a/tidb-cloud/create-tidb-cluster.md b/tidb-cloud/create-tidb-cluster.md index c582f52ea286c..ee95d7ee36930 100644 --- a/tidb-cloud/create-tidb-cluster.md +++ b/tidb-cloud/create-tidb-cluster.md @@ -9,7 +9,7 @@ This tutorial guides you through signing up and creating a TiDB Cloud Dedicated > **Tip:** > -> To learn how to create a TiDB Cloud Serverless cluster, see [Create a TiDB Cloud Serverless Cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +> To learn how to create a {{{ .starter }}} or {{{ .essential }}} cluster, see [Create a {{{ .starter }}} or Essential Cluster](/tidb-cloud/create-tidb-cluster-serverless.md). ## Before you begin @@ -79,7 +79,11 @@ If you are in the `Organization Owner` or the `Project Owner` role, you can crea 6. Click **Create**. - Your TiDB Cloud cluster will be created in approximately 20 to 30 minutes. + Your TiDB Cloud cluster will be created in approximately 20 to 30 minutes. You will receive a notification from the TiDB Cloud console when the creation is complete. + + > **Note:** + > + > The cluster creation time can vary by region and might take longer than 30 minutes. If the process takes significantly longer than expected, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). ## Step 3. Set the root password diff --git a/tidb-cloud/csv-config-for-import-data.md b/tidb-cloud/csv-config-for-import-data.md index 80b11e055ef3a..a67ec6c53f013 100644 --- a/tidb-cloud/csv-config-for-import-data.md +++ b/tidb-cloud/csv-config-for-import-data.md @@ -9,7 +9,7 @@ This document introduces CSV configurations for the Import Data service on TiDB The following is the CSV Configuration window when you use the Import Data service on TiDB Cloud to import CSV files. For more information, see [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). -![CSV Configurations](/media/tidb-cloud/import-data-csv-config.png) + ## Separator @@ -33,9 +33,15 @@ The following is the CSV Configuration window when you use the Import Data servi - Default: `"` -## Backslash escape +## Null Value -- Definition: whether to parse backslash inside fields as escape characters. If **Backslash escape** is `True`, the following sequences are recognized and converted: +- Definition: defines the string that represents a `NULL` value in the CSV file. + +- Default: `\N` + +## Backslash Escape + +- Definition: controls whether to parse backslashes within fields as escape characters. If **Backslash Escape** is enabled, the following sequences are recognized and converted: | Sequence | Converted to | |----------|--------------------------| @@ -65,12 +71,10 @@ The following is the CSV Configuration window when you use the Import Data servi `"{\"key1\": \"val1\", \"key2\":\"val2\" }"` -- Default: `True` - -## NULL value +- Default: Enabled -- Definition: defines the string that represents a `NULL` value in the CSV file. +## Skip Header -- Default: `\N` +- Definition: controls whether to skip the header row in the CSV file. If **Skip Header** is enabled, the first row of the CSV file will be skipped during import. -- Custom null values are not supported in the console. You can use the [TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) instead. For more information, see [`ticloud serverless import start`](/tidb-cloud/ticloud-import-start.md). +- Default: Disabled \ No newline at end of file diff --git a/tidb-cloud/data-service-get-started.md b/tidb-cloud/data-service-get-started.md index 1548bb65f7a2e..5804f51ce04d6 100644 --- a/tidb-cloud/data-service-get-started.md +++ b/tidb-cloud/data-service-get-started.md @@ -17,7 +17,11 @@ This document introduces how to quickly get started with TiDB Cloud Data Service ## Before you begin -Before creating a Data App, make sure that you have created a [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster. If you do not have one, follow the steps in [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) to create one. +Before creating a Data App, make sure that you have created a [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md) cluster hosted on AWS. If you do not have one, follow the steps in [Create a {{{ .starter }}} or Essential Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) to create one. + +> **Note:** +> +> Data Service is available only for [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md) clusters hosted on AWS. To use Data Service in TiDB Cloud Dedicated clusters, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). ## Get started with a sample Data App diff --git a/tidb-cloud/data-service-manage-endpoint.md b/tidb-cloud/data-service-manage-endpoint.md index 84fd09a3ff20b..6f78f9ddc1d8d 100644 --- a/tidb-cloud/data-service-manage-endpoint.md +++ b/tidb-cloud/data-service-manage-endpoint.md @@ -44,7 +44,7 @@ In TiDB Cloud Data Service, you can generate one or multiple endpoints automatic For each operation you select, TiDB Cloud Data Service will generate a corresponding endpoint. If you select a batch operation (such as `POST (Batch Create)`), the generated endpoint lets you operate on multiple rows in a single request. - If the table you selected contains [vector data types](/vector-search/vector-search-data-types.md), you can enable the **Vector Search Operations** option and select a vector distance function to generate a vector search endpoint that automatically calculates vector distances based on your selected distance function. The supported [vector distance functions](/vector-search/vector-search-functions-and-operators.md) include the following: + If the table you selected contains [vector data types](/ai/reference/vector-search-data-types.md), you can enable the **Vector Search Operations** option and select a vector distance function to generate a vector search endpoint that automatically calculates vector distances based on your selected distance function. The supported [vector distance functions](/ai/reference/vector-search-functions-and-operators.md) include the following: - `VEC_L2_DISTANCE` (default): calculates the L2 distance (Euclidean distance) between two vectors. - `VEC_COSINE_DISTANCE`: calculates the cosine distance between two vectors. diff --git a/tidb-cloud/data-service-oas-with-nextjs.md b/tidb-cloud/data-service-oas-with-nextjs.md index 21357b96f4851..f06f3071f1429 100644 --- a/tidb-cloud/data-service-oas-with-nextjs.md +++ b/tidb-cloud/data-service-oas-with-nextjs.md @@ -11,12 +11,12 @@ This document introduces how to use the OpenAPI Specification of a [Data App](/t Before using OpenAPI Specification with Next.js, make sure that you have the following: -- A TiDB cluster. For more information, see [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) or [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). +- A TiDB cluster. For more information, see [Create a {{{ .starter }}} or Essential Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) or [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). - [Node.js](https://nodejs.org/en/download) - [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) - [yarn](https://yarnpkg.com/getting-started/install) -This document uses a TiDB Cloud Serverless cluster as an example. +This document uses a {{{ .starter }}} cluster as an example. ## Step 1. Prepare data diff --git a/tidb-cloud/data-service-overview.md b/tidb-cloud/data-service-overview.md index d77b7ef9b4887..d9bb808b9fc5e 100644 --- a/tidb-cloud/data-service-overview.md +++ b/tidb-cloud/data-service-overview.md @@ -11,7 +11,7 @@ Data Service enables you to access TiDB Cloud data via an HTTPS request using a > **Note:** > -> Data Service is available for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. To use Data Service in TiDB Cloud Dedicated clusters, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). +> Data Service is available only for TiDB Cloud Starter hosted on AWS. To use Data Service in TiDB Cloud Dedicated clusters, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). An endpoint in Data Service is a web API that you can customize to execute SQL statements. You can specify parameters for your SQL statements, such as the value used in the `WHERE` clause. When a client calls an endpoint and provides values for the parameters in a request URL, the endpoint executes the corresponding SQL statement with the provided parameters and returns the results as part of the HTTP response. diff --git a/tidb-cloud/dedicated-external-storage.md b/tidb-cloud/dedicated-external-storage.md index 093b78c9fc168..abcf80e4492b0 100644 --- a/tidb-cloud/dedicated-external-storage.md +++ b/tidb-cloud/dedicated-external-storage.md @@ -8,7 +8,7 @@ aliases: ['/tidb-cloud/config-s3-and-gcs-access'] If your source data is stored in Amazon S3 buckets, Azure Blob Storage containers, or Google Cloud Storage (GCS) buckets, before importing or migrating the data to TiDB Cloud, you need to configure cross-account access to the buckets. This document describes how to do this for TiDB Cloud Dedicated clusters. -If you need to configure these external storages for TiDB Cloud Serverless clusters, see [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md). +If you need to configure these external storages for {{{ .starter }}} or {{{ .essential }}} clusters, see [Configure External Storage Access for {{{ .starter }}} or Essential](/tidb-cloud/configure-external-storage-access.md). ## Configure Amazon S3 access diff --git a/tidb-cloud/_index.md b/tidb-cloud/dedicated/_index.md similarity index 89% rename from tidb-cloud/_index.md rename to tidb-cloud/dedicated/_index.md index dfd7eec3a3374..25b60eaa79ba5 100644 --- a/tidb-cloud/_index.md +++ b/tidb-cloud/dedicated/_index.md @@ -20,14 +20,12 @@ summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings -[Try Out TiDB Cloud Serverless](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart) +[Try Out TiDB Cloud](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart) [Try Out TiDB + AI](https://docs.pingcap.com/tidbcloud/vector-search-get-started-using-python) [Try Out HTAP](https://docs.pingcap.com/tidbcloud/tidb-cloud-htap-quickstart) -[Try Out TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli) - [Proof of Concept](https://docs.pingcap.com/tidbcloud/tidb-cloud-poc) @@ -60,17 +58,13 @@ summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings [Use API (Beta)](https://docs.pingcap.com/tidbcloud/api-overview) -[Use TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli) - [Import Sample Data](https://docs.pingcap.com/tidbcloud/import-sample-data) -[From MySQL](https://docs.pingcap.com/tidbcloud/migrate-data-into-tidb) - -[From Amazon Aurora MySQL](https://docs.pingcap.com/tidbcloud/migrate-from-aurora-bulk-import) +[From MySQL](https://docs.pingcap.com/tidbcloud/migrate-from-mysql-using-data-migration/) [From Amazon RDS for Oracle](https://docs.pingcap.com/tidbcloud/migrate-from-oracle-using-aws-dms) @@ -80,8 +74,6 @@ summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings [From Apache Parquet Files](https://docs.pingcap.com/tidbcloud/import-csv-files) -[With MySQL CLI](https://docs.pingcap.com/tidbcloud/import-with-mysql-cli) - @@ -114,9 +106,9 @@ summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings [Manage User Profiles](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-user-profiles) -[Manage organization access](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-organization-access) +[Manage Organization Access](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-organization-access) -[Manage project access](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-project-access) +[Manage Project Access](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-project-access) [Configure Password Settings](https://docs.pingcap.com/tidbcloud/configure-security-settings) diff --git a/tidb-cloud/delete-tidb-cluster.md b/tidb-cloud/delete-tidb-cluster.md index f3bfb7d360bee..19b2d20f5e13d 100644 --- a/tidb-cloud/delete-tidb-cluster.md +++ b/tidb-cloud/delete-tidb-cluster.md @@ -26,13 +26,13 @@ You can delete a cluster at any time by performing the following steps: > **Note:** > - > [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) do not support restoring data after the deletion. If you want to delete a TiDB Cloud Serverless cluster and restore its data in the future, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) to export your data as a backup. + > [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) and [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) clusters do not support restoring data after the deletion. If you want to delete a {{{ .starter }}} or {{{ .essential }}} cluster and restore its data in the future, see [Export Data from {{{ .starter }}} or Essential](/tidb-cloud/serverless-export.md) to export your data as a backup. 5. Click **I understand, delete it**. Once a backed up TiDB Cloud Dedicated cluster is deleted, the existing backup files of the cluster are moved to the recycle bin. - - Automatic backups will expire and be automatically deleted once the retention period ends. The default retention period is 7 days if you don't modify it. + - Automatic backups will expire and be automatically deleted once the retention period ends, except for the latest one. The default retention period is 7 days if you don't modify it. The latest automatic backup will not be deleted unless you explicitly delete it. - Manual backups will be kept in the Recycle Bin until manually deleted. > **Note:** diff --git a/tidb-cloud/dev-guide-bi-looker-studio.md b/tidb-cloud/dev-guide-bi-looker-studio.md deleted file mode 100644 index 606081a7f7ce6..0000000000000 --- a/tidb-cloud/dev-guide-bi-looker-studio.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Connect to TiDB Cloud Serverless with Looker Studio -summary: Learn how to connect to TiDB Cloud Serverless using Looker Studio. ---- - -# Connect to TiDB Cloud Serverless with Looker Studio - -TiDB is a MySQL-compatible database, TiDB Cloud Serverless is a fully managed TiDB offering, and [Looker Studio](https://lookerstudio.google.com/) is a free web-based BI tool that can visualize data from various sources. - -In this tutorial, you can learn how to connect to your TiDB Cloud Serverless cluster with Looker Studio. - -> **Note:** -> -> Most steps in this tutorial work with TiDB Cloud Dedicated as well. However, for TiDB Cloud Dedicated, you need to note the following: -> -> - Import your dataset following [Import data from files to TiDB Cloud](/tidb-cloud/tidb-cloud-migration-overview.md#import-data-from-files-to-tidb-cloud). -> - Get the connection information for your cluster following [Connect to TiDB Cloud Dedicated](/tidb-cloud/connect-via-standard-connection.md). When connecting to TiDB Cloud Dedicated, you need to allow access from `142.251.74.0/23`. For more information about connections from Looker Studio, see [Looker Studio documentation](https://support.google.com/looker-studio/answer/7088031#zippy=%2Cin-this-article). - -## Prerequisites - -To complete this tutorial, you need: - -- A Google account -- A TiDB Cloud Serverless cluster - -**If you don't have a TiDB Cloud Serverless cluster, you can create one as follows:** - -- [Create a TiDB Cloud Serverless cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-tidb-cloud-cluster) - -## Step 1. Import a dataset - -You can import the S&P 500 dataset provided in the interactive tutorial of TiDB Cloud Serverless. - -1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and click **?** in the lower-right corner. A **Help** dialog is displayed. - -2. In the dialog, click **Interactive Tutorials**, and then click **S&P 500 Analysis**. - -3. Select your TiDB Cloud Serverless cluster, and then click **Import Dataset** to import the S&P 500 dataset to your cluster. - -4. After the import status changes to **IMPORTED**, click **Exit Tutorial** to close this dialog. - -If you encounter any issues during import, you can cancel this import task as follows: - -1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the name of your TiDB Cloud Serverless cluster to go to its overview page. -2. In the left navigation pane, click **Data** > **Import**. -3. Find the import task named **sp500-insight**, click **...** in the **Action** column, and then click **Cancel**. - -## Step 2. Get the connection information for your cluster - -1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. - -2. Click **Connect** in the upper-right corner. A connection dialog is displayed. - -3. In the connection dialog, set **Connect With** to `General`, and then click **Generate Password** to create a random password. - - > **Tip:** - > - > If you have created a password before, use the original password or click **Reset Password** to generate a new one. - -4. Download the [CA cert](https://letsencrypt.org/certs/isrgrootx1.pem). - - > **Tip:** - > - > TiDB Cloud Serverless requires a secure TLS connection between the client and the cluster, so you need this CA cert for connection settings in Looker Studio. - -## Step 3. Connect to your TiDB cluster with Looker Studio - -1. Log into [Looker Studio](https://lookerstudio.google.com/), and then click **Create** > **Report** in the left navigation pane. - -2. On the displayed page, search and select the **MySQL** connector, and then click **AUTHORIZE**. - -3. In the **BASIC** setting pane, configure the connection parameters. - - - **Host Name or IP**: enter the `HOST` parameter from the TiDB Cloud Serverless connection dialog. - - **Port(Optional)**: enter the `PORT` parameter from the TiDB Cloud Serverless connection dialog. - - **Database**: enter the database you want to connect to. For this tutorial, enter `sp500insight`. - - **Username**: enter the `USERNAME` parameter from the TiDB Cloud Serverless connection dialog. - - **Password**: enter the `PASSWORD` parameter from the TiDB Cloud Serverless connection dialog. - - **Enable SSL**: select this option, and then click the upload icon to the right of **MySQL SSL Client Configuration Files** to upload the CA file downloaded from [Step 2](#step-2-get-the-connection-information-for-your-cluster). - - ![Looker Studio: configure connection settings for TiDB Cloud Serverless](/media/tidb-cloud/looker-studio-configure-connection.png) - -4. Click **AUTHENTICATE**. - -If the authentication succeeds, you can see tables in the database. - -## Step 4. Create a simple chart - -Now, you can use the TiDB cluster as a data source and create a simple chart with data. - -1. In the right pane, click **CUSTOM QUERY**. - - ![Looker Studio: custom query](/media/tidb-cloud/looker-studio-custom-query.png) - -2. Copy the following code to the **Enter Custom Query** area, and then click **Add** in the lower-right corner. - - ```sql - SELECT sector, - COUNT(*) AS companies, - ROW_NUMBER() OVER (ORDER BY COUNT(*) DESC ) AS companies_ranking, - SUM(market_cap) AS total_market_cap, - ROW_NUMBER() OVER (ORDER BY SUM(market_cap) DESC ) AS total_market_cap_ranking, - SUM(revenue_growth * weight) / SUM(weight) AS avg_revenue_growth, - ROW_NUMBER() OVER (ORDER BY SUM(revenue_growth * weight) / SUM(weight) DESC ) AS avg_revenue_growth_ranking - FROM companies - LEFT JOIN index_compositions ic ON companies.stock_symbol = ic.stock_symbol - GROUP BY sector - ORDER BY 5 ASC; - ``` - - If you see the **You are about to add data to this report** dialog, click **ADD TO REPORT**. Then, a table is displayed in the report. - -3. In the toolbar of the report, click **Add a chart**, and then select `Combo chart` in the `Line` category. - -4. In the **Chart** settings pane on the right, configure the following parameters: - - - In the **SETUP** Tab: - - **Dimension**: `sector`. - - **Metric**: `companies` and `total_market_cap`. - - In the **STYLE** Tab: - - Series #1: select the `Line` option and the `Right` axis. - - Series #2: select the `Bars` option and the `Left` axis. - - Leave other fields as defaults. - -Then, you can see a combo chart similar as follows: - -![Looker Studio: A simple Combo chart](/media/tidb-cloud/looker-studio-simple-chart.png) - -## Next steps - -- Learn more usage of Looker Studio from [Looker Studio Help](https://support.google.com/looker-studio). -- Learn the best practices for TiDB application development with the chapters in the [Developer guide](/develop/dev-guide-overview.md), such as [Insert data](/develop/dev-guide-insert-data.md), [Update data](/develop/dev-guide-update-data.md), [Delete data](/develop/dev-guide-delete-data.md), [Single table reading](/develop/dev-guide-get-data-from-single-table.md), [Transactions](/develop/dev-guide-transaction-overview.md), and [SQL performance optimization](/develop/dev-guide-optimize-sql-overview.md). -- Learn through the professional [TiDB developer courses](https://www.pingcap.com/education/) and earn [TiDB certifications](https://www.pingcap.com/education/certification/) after passing the exam. - -## Need help? - -Ask the community on [Discord](https://discord.gg/DQZ2dy3cuc?utm_source=doc) or [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap-docs), or [submit a support ticket](https://tidb.support.pingcap.com/). diff --git a/tidb-cloud/essential-changefeed-overview.md b/tidb-cloud/essential-changefeed-overview.md new file mode 100644 index 0000000000000..563016ca1502c --- /dev/null +++ b/tidb-cloud/essential-changefeed-overview.md @@ -0,0 +1,191 @@ +--- +title: Changefeed (Beta) +summary: TiDB Cloud changefeed helps you stream data from TiDB Cloud to other data services. +--- + +# Changefeed (Beta) + +TiDB Cloud changefeed helps you stream data from TiDB Cloud to other data services. Currently, TiDB Cloud supports streaming data to Apache Kafka and MySQL. + +> **Note:** +> +> - Currently, TiDB Cloud only allows up to 10 changefeeds per {{{ .essential }}} cluster. +> - For [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) clusters, the changefeed feature is unavailable. + +## Restrictions + +- Changefeeds do not support DDL statements that rename multiple tables in a single `RENAME TABLE` statement, for example, `RENAME TABLE t1 TO t3, t2 TO t4`. Executing this statement permanently interrupts changefeed data replication. +- The changefeed throughput is approximately 20 MiB/s. If your incremental data volume exceeds this limit, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for assistance. + +## Supported regions + +The changefeed feature is available in the following regions: + +| Cloud provider | Supported regions | +| --- | --- | +| AWS |
  • `ap-east-1`
  • `ap-northeast-1`
  • `ap-southeast-1`
  • `eu-central-1`
  • `us-east-1`
  • `us-west-2`
| +| Alibaba Cloud |
  • `ap-southeast-1`
  • `ap-southeast-5`
  • `cn-hongkong`
| + +Additional regions will be supported in the future. For immediate support in a specific region, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +## View the Changefeed page + +To access the changefeed feature, take the following steps: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Changefeed** in the left navigation pane. The changefeed page is displayed. + +On the **Changefeed** page, you can create a changefeed, view a list of existing changefeeds, and operate the existing changefeeds (such as pausing, resuming, editing, and deleting a changefeed). + +## Create a changefeed + +To create a changefeed, refer to the tutorials: + +- [Sink to Apache Kafka](/tidb-cloud/essential-changefeed-sink-to-kafka.md) +- [Sink to MySQL](/tidb-cloud/essential-changefeed-sink-to-mysql.md) + +## View a changefeed + +You can view a changefeed using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +2. Locate the corresponding changefeed you want to view, and click **...** > **View** in the **Action** column. +3. You can see the details of a changefeed, including its configuration, status, and metrics. + +
+ +
+ +Run the following command: + +```bash +ticloud serverless changefeed get --cluster-id --changefeed-id +``` + +
+
+ +## Pause or resume a changefeed + +You can pause or resume a changefeed using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +2. Locate the corresponding changefeed you want to pause or resume, and click **...** > **Pause/Resume** in the **Action** column. + +
+ +
+ +To pause a changefeed, run the following command: + +```bash +ticloud serverless changefeed pause --cluster-id --changefeed-id +``` + +To resume a changefeed: + +``` +ticloud serverless changefeed resume -c --changefeed-id +``` + +
+
+ +## Edit a changefeed + +> **Note:** +> +> TiDB Cloud currently only allows editing changefeeds in the paused status. + +You can edit a changefeed using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +2. Locate the changefeed you want to pause, and click **...** > **Pause** in the **Action** column. +3. When the changefeed status changes to `Paused`, click **...** > **Edit** to edit the corresponding changefeed. + + TiDB Cloud populates the changefeed configuration by default. You can modify the following configurations: + + - Apache Kafka sink: all configurations except **Destination**, **Connection**, and **Start Position** + - MySQL sink: all configurations except **Destination**, **Connection** and **Start Position** + +4. After editing the configuration, click **...** > **Resume** to resume the corresponding changefeed. + +
+ +
+ +Edit a changefeed with an Apache Kafka sink: + +```bash +ticloud serverless changefeed edit --cluster-id --changefeed-id --name --kafka --filter +``` + +Edit a changefeed with a MySQL sink: + +```bash +ticloud serverless changefeed edit --cluster-id --changefeed-id --name --mysql --filter +``` + +
+
+ +## Duplicate a changefeed + +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +2. Locate the changefeed that you want to duplicate. In the **Action** column, click **...** > **Duplicate**. +3. TiDB Cloud automatically populates the new changefeed configuration with the original settings. You can review and modify the configuration as needed. +4. After confirming the configuration, click **Submit** to create and start the new changefeed. + +## Delete a changefeed + +You can delete a changefeed using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +1. Navigate to the [**Changefeed**](#view-the-changefeed-page) page of your target TiDB cluster. +2. Locate the changefeed you want to delete, and click **...** > **Delete** in the **Action** column. + +
+ +
+ +Run the following command: + +```bash +ticloud serverless changefeed delete --cluster-id --changefeed-id +``` + +
+
+ +## Changefeed billing + +Changefeeds are free of charge during the beta phase. + +## Changefeed states + +During the running process, changefeeds might fail with errors, or be manually paused or resumed. These behaviors can lead to changes of the changefeed state. + +The states are described as follows: + +- `CREATING`: the changefeed is being created. +- `CREATE_FAILED`: the changefeed creation fails. You need to delete the changefeed and create a new one. +- `RUNNING`: the changefeed runs normally and the checkpoint-ts proceeds normally. +- `PAUSED`: the changefeed is paused. +- `WARNING`: the changefeed returns a warning. The changefeed cannot continue due to some recoverable errors. The changefeed in this state keeps trying to resume until the state transfers to `RUNNING`. The changefeed in this state blocks [GC operations](https://docs.pingcap.com/tidb/stable/garbage-collection-overview). +- `RUNNING_FAILED`: the changefeed fails. Due to some errors, the changefeed cannot resume and cannot be recovered automatically. If the issues are resolved before the garbage collection (GC) of the incremental data, you can manually resume the failed changefeed. The default Time-To-Live (TTL) duration for incremental data is 24 hours, which means that the GC mechanism does not delete any data within 24 hours after the changefeed is interrupted. diff --git a/tidb-cloud/essential-changefeed-sink-to-kafka.md b/tidb-cloud/essential-changefeed-sink-to-kafka.md new file mode 100644 index 0000000000000..4326ec396012e --- /dev/null +++ b/tidb-cloud/essential-changefeed-sink-to-kafka.md @@ -0,0 +1,199 @@ +--- +title: Sink to Apache Kafka (Beta) +summary: This document explains how to create a changefeed to stream data from {{{ .essential }}} to Apache Kafka. It includes restrictions, prerequisites, and steps to configure the changefeed for Apache Kafka. The process involves setting up network connections, adding permissions for Kafka ACL authorization, and configuring the changefeed. +--- + +# Sink to Apache Kafka (Beta) + +This document describes how to create a changefeed to stream data from {{{ .essential }}} to Apache Kafka. + +## Restrictions + +- For each {{{ .essential }}} cluster, you can create up to 10 changefeeds. +- Currently, {{{ .essential }}} does not support uploading self-signed TLS certificates to connect to Kafka brokers. +- Because {{{ .essential }}} uses TiCDC to establish changefeeds, it has the same [restrictions as TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview#unsupported-scenarios). +- If the table to be replicated does not have a primary key or a non-null unique index, the absence of a unique constraint during replication could result in duplicated data being inserted downstream in some retry scenarios. + +## Prerequisites + +Before creating a changefeed to stream data to Apache Kafka, you need to complete the following prerequisites: + +- Set up your network connection +- Add permissions for Kafka ACL authorization + +### Network + +Ensure that your {{{ .essential }}} cluster can connect to the Apache Kafka service. You can choose one of the following connection methods: + +- Private Link Connection: meeting security compliance and ensuring network quality. +- Public Network: suitable for a quick setup. + + +
+ +Private link connections leverage **Private Link** technologies from cloud providers to enable resources in your VPC to connect to services in other VPCs using private IP addresses, as if those services were hosted directly within your VPC. + +{{{ .essential }}} currently supports Private Link connections only for self-hosted Kafka and Confluent Cloud Dedicated clusters. It does not support direct integration with MSK or other Kafka SaaS services. + +To set up a Private Link connection based on your Kafka deployment and cloud provider, see the following guides: + +- [Connect to Confluent Cloud on AWS via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md) +- [Connect to AWS Self-Hosted Kafka via Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md) +- [Connect to Alibaba Cloud Self-Hosted Kafka via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md) + +
+ +
+ +If you want to provide public access to your Apache Kafka service, assign public IP addresses or domain names to all your Kafka brokers. + +It is not recommended to use public access in a production environment. + +
+
+ +### Kafka ACL authorization + +To allow {{{ .essential }}} changefeeds to stream data to Apache Kafka and create Kafka topics automatically, ensure that the following permissions are added in Kafka: + +- The `Create` and `Write` permissions are added for the topic resource type in Kafka. +- The `DescribeConfigs` permission is added for the cluster resource type in Kafka. + +For example, if your Kafka cluster is in Confluent Cloud, refer to [Resources](https://docs.confluent.io/platform/current/kafka/authorization.html#resources) and [Adding ACLs](https://docs.confluent.io/platform/current/security/authorization/acls/manage-acls.html#add-acls) in the Confluent documentation for more information. + +## Step 1. Open the Changefeed page for Apache Kafka + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com). +2. Navigate to the overview page of the target {{{ .essential }}} cluster, and then click **Data** > **Changefeed** in the left navigation pane. +3. Click **Create Changefeed**, and then select **Kafka** as **Destination**. + +## Step 2. Configure the changefeed target + +The steps vary depending on the connectivity method you select. + + +
+ +1. In **Connectivity Method**, select **Public**, and fill in your Kafka broker endpoints. You can use commas `,` to separate multiple endpoints. +2. Select an **Authentication** option according to your Kafka authentication configuration. + + - If your Kafka does not require authentication, keep the default option **Disable**. + - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. + +3. For **Kafka Version**, select **Kafka v2** or **Kafka v3** based on your Kafka version. +4. Select a **Compression** type for the data in this changefeed. +5. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +6. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. + +
+
+ +1. In **Connectivity Method**, select **Private Link**. +2. In **Private Link Connection**, select the private link connection that you created in the [Network](#network) section. Make sure the Availability Zones of the private link connection match those of the Kafka deployment. +3. Fill in the **Bootstrap Port** that you obtained from the [Network](#network) section. +4. Select an **Authentication** option according to your Kafka authentication configuration. + + - If your Kafka does not require authentication, keep the default option **Disable**. + - If your Kafka requires authentication, select the corresponding authentication type, and then fill in the **user name** and **password** of your Kafka account for authentication. + +5. For **Kafka Version**, select **Kafka v2** or **Kafka v3** based on your Kafka version. +6. Select a **Compression** type for the data in this changefeed. +7. Enable the **TLS Encryption** option if your Kafka has enabled TLS encryption and you want to use TLS encryption for the Kafka connection. +8. If your Kafka requires TLS SNI verification, enter the **TLS Server Name**. For example, `Confluent Cloud Dedicated clusters`. +9. Click **Next** to test the network connection. If the test succeeds, you will be directed to the next page. + +
+
+ +## Step 3. Set the changefeed + +1. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](https://docs.pingcap.com/tidb/stable/table-filter/#syntax). + + - **Replication Scope**: you can choose to only replicate tables with valid keys or replicate all selected tables. + - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule and click **Apply**, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules under **Filter results**. + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. + - **Filter results with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. + - **Filter results without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. + +2. Customize **Event Filter** to filter the events that you want to replicate. + + - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. + - **Event Filter**: you can choose the events you want to ignore. + +3. Customize **Column Selector** to select columns from events and send only the data changes related to those columns to the downstream. + + - **Tables matching**: specify which tables the column selector applies to. For tables that do not match any rule, all columns are sent. + - **Column Selector**: specify which columns of the matched tables will be sent to the downstream. + + For more information about the matching rules, see [Column selectors](https://docs.pingcap.com/tidb/stable/ticdc-sink-to-kafka/#column-selectors). + +4. In the **Data Format** area, select your desired format of Kafka messages. + + - Avro is a compact, fast, and binary data format with rich data structures, which is widely used in various flow systems. For more information, see [Avro data format](https://docs.pingcap.com/tidb/stable/ticdc-avro-protocol). + - Canal-JSON is a plain JSON text format, which is easy to parse. For more information, see [Canal-JSON data format](https://docs.pingcap.com/tidb/stable/ticdc-canal-json). + - Open Protocol is a row-level data change notification protocol that provides data sources for monitoring, caching, full-text indexing, analysis engines, and primary-secondary replication between different databases. For more information, see [Open Protocol data format](https://docs.pingcap.com/tidb/stable/ticdc-open-protocol). + - Debezium is a tool for capturing database changes. It converts each captured database change into a message called an "event" and sends these events to Kafka. For more information, see [Debezium data format](https://docs.pingcap.com/tidb/stable/ticdc-debezium). + +5. Enable the **TiDB Extension** option if you want to add TiDB-extension fields to the Kafka message body. + + For more information about TiDB-extension fields, see [TiDB extension fields in Avro data format](https://docs.pingcap.com/tidb/stable/ticdc-avro-protocol#tidb-extension-fields) and [TiDB extension fields in Canal-JSON data format](https://docs.pingcap.com/tidb/stable/ticdc-canal-json#tidb-extension-field). + +6. If you select **Avro** as your data format, you will see some Avro-specific configurations on the page. You can fill in these configurations as follows: + + - In the **Decimal** and **Unsigned BigInt** configurations, specify how TiDB Cloud handles the decimal and unsigned bigint data types in Kafka messages. + - In the **Schema Registry** area, fill in your schema registry endpoint. If you enable **HTTP Authentication**, enter the user name and password. + +7. In the **Topic Distribution** area, select a distribution mode, and then fill in the topic name configurations according to the mode. + + If you select **Avro** as your data format, you can only choose the **Distribute changelogs by table to Kafka Topics** mode in the **Distribution Mode** drop-down list. + + The distribution mode controls how the changefeed creates Kafka topics, by table, by database, or creating one topic for all changelogs. + + - **Distribute changelogs by table to Kafka Topics** + + If you want the changefeed to create a dedicated Kafka topic for each table, choose this mode. Then, all Kafka messages of a table are sent to a dedicated Kafka topic. You can customize topic names for tables by setting a topic prefix, a separator between a database name and table name, and a suffix. For example, if you set the separator as `_`, the topic names are in the format of `_`. + + For changelogs of non-row events, such as Create Schema Event, you can specify a topic name in the **Default Topic Name** field. The changefeed will create a topic accordingly to collect such changelogs. + + - **Distribute changelogs by database to Kafka Topics** + + If you want the changefeed to create a dedicated Kafka topic for each database, choose this mode. Then, all Kafka messages of a database are sent to a dedicated Kafka topic. You can customize topic names of databases by setting a topic prefix and a suffix. + + For changelogs of non-row events, such as Resolved Ts Event, you can specify a topic name in the **Default Topic Name** field. The changefeed will create a topic accordingly to collect such changelogs. + + - **Send all changelogs to one specified Kafka Topic** + + If you want the changefeed to create one Kafka topic for all changelogs, choose this mode. Then, all Kafka messages in the changefeed will be sent to one Kafka topic. You can define the topic name in the **Topic Name** field. + +8. In the **Partition Distribution** area, you can decide which partition a Kafka message will be sent to. You can define **a single partition dispatcher for all tables**, or **different partition dispatchers for different tables**. TiDB Cloud provides four types of dispatchers: + + - **Distribute changelogs by primary key or index value to Kafka partition** + + If you want the changefeed to send Kafka messages of a table to different partitions, choose this distribution method. The primary key or index value of a row changelog will determine which partition the changelog is sent to. Keep the **Index Name** field empty if you want to use the primary key. This distribution method provides a better partition balance and ensures row-level orderliness. + + - **Distribute changelogs by table to Kafka partition** + + If you want the changefeed to send Kafka messages of a table to one Kafka partition, choose this distribution method. The table name of a row changelog will determine which partition the changelog is sent to. This distribution method ensures table orderliness but might cause unbalanced partitions. + + - **Distribute changelogs by timestamp to Kafka partition** + + If you want the changefeed to send Kafka messages to different Kafka partitions randomly, choose this distribution method. The commitTs of a row changelog will determine which partition the changelog is sent to. This distribution method provides a better partition balance and ensures orderliness in each partition. However, multiple changes of a data item might be sent to different partitions and the consumer progress of different consumers might be different, which might cause data inconsistency. Therefore, the consumer needs to sort the data from multiple partitions by commitTs before consuming. + + - **Distribute changelogs by column value to Kafka partition** + + If you want the changefeed to send Kafka messages of a table to different partitions, choose this distribution method. The specified column values of a row changelog will determine which partition the changelog is sent to. This distribution method ensures orderliness in each partition and guarantees that the changelog with the same column values is sent to the same partition. + +9. In the **Topic Configuration** area, configure the following numbers. The changefeed will automatically create the Kafka topics according to the numbers. + + - **Replication Factor**: controls how many Kafka servers each Kafka message is replicated to. The valid value ranges from [`min.insync.replicas`](https://kafka.apache.org/33/documentation.html#brokerconfigs_min.insync.replicas) to the number of Kafka brokers. + - **Partition Number**: controls how many partitions exist in a topic. The valid value range is `[1, 10 * the number of Kafka brokers]`. + +10. In the **Split Event** area, choose whether to split `UPDATE` events into separate `DELETE` and `INSERT` events or keep as raw `UPDATE` events. For more information, see [Split primary or unique key UPDATE events for non-MySQL sinks](https://docs.pingcap.com/tidb/stable/ticdc-split-update-behavior/#split-primary-or-unique-key-update-events-for-non-mysql-sinks). + +11. Click **Next**. + +## Step 4. Review and create your changefeed + +1. In the **Changefeed Name** area, specify a name for the changefeed. +2. Review all the changefeed configurations that you set. Click **Previous** to make changes if necessary. +3. If all configurations are correct, click **Submit** to create the changefeed. diff --git a/tidb-cloud/essential-changefeed-sink-to-mysql.md b/tidb-cloud/essential-changefeed-sink-to-mysql.md new file mode 100644 index 0000000000000..5cba762687d5b --- /dev/null +++ b/tidb-cloud/essential-changefeed-sink-to-mysql.md @@ -0,0 +1,127 @@ +--- +title: Sink to MySQL (Beta) +summary: This document explains how to stream data from {{{ .essential }}} to MySQL using the Sink to MySQL changefeed. It includes restrictions, prerequisites, and steps to create a MySQL sink for data replication. The process involves setting up network connections, loading existing data to MySQL, and creating target tables in MySQL. After completing the prerequisites, users can create a MySQL sink to replicate data to MySQL. +--- + +# Sink to MySQL (Beta) + +This document describes how to stream data from {{{ .essential }}} to MySQL using the **Sink to MySQL** changefeed. + +## Restrictions + +- For each {{{ .essential }}} cluster, you can create up to 10 changefeeds. +- Because {{{ .essential }}} uses TiCDC to establish changefeeds, it has the same [restrictions as TiCDC](https://docs.pingcap.com/tidb/stable/ticdc-overview#unsupported-scenarios). +- If the table to be replicated does not have a primary key or a non-null unique index, the absence of a unique constraint during replication could result in duplicated data being inserted downstream in some retry scenarios. + +## Prerequisites + +Before creating a changefeed, you need to complete the following prerequisites: + +- Set up your network connection +- Export and load the existing data to MySQL (optional) +- Create corresponding target tables in MySQL if you do not load the existing data and only want to replicate incremental data to MySQL + +### Network + +Make sure that your {{{ .essential }}} cluster can connect to the MySQL service. You can choose one of the following connection methods: + +- Private Link Connection: meeting security compliance and ensuring network quality. +- Public Network: suitable for a quick setup. + + +
+ +Private link connections leverage **Private Link** technologies from cloud providers, enabling resources in your VPC to connect to services in other VPCs through private IP addresses, as if those services were hosted directly within your VPC. + +You can connect your {{{ .essential }}} cluster to your MySQL service securely through a private link connection. If the private link connection is not available for your MySQL service, follow [Connect to Amazon RDS via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-aws-rds.md) or [Connect to Alibaba Cloud ApsaraDB RDS for MySQL via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md) to create one. + +
+ +
+ +If your MySQL service can be accessed over the public network, you can choose to connect to MySQL through a public IP or domain name. + +
+ +
+ +### Load existing data (optional) + +The **Sink to MySQL** connector can only sink incremental data from your {{{ .essential }}} cluster to MySQL after a certain timestamp. If you already have data in your {{{ .essential }}} cluster, you can export and load the existing data of your {{{ .essential }}} cluster into MySQL before enabling **Sink to MySQL**. + +To load the existing data: + +1. Extend the [tidb_gc_life_time](https://docs.pingcap.com/tidb/stable/system-variables#tidb_gc_life_time-new-in-v50) to be longer than the total time of the following two operations, so that historical data during this period is not garbage collected by TiDB. + + - The time to export and import the existing data + - The time to create **Sink to MySQL** + + For example: + + ```sql + SET GLOBAL tidb_gc_life_time = '72h'; + ``` + +2. Use the [Export](/tidb-cloud/serverless-export.md) feature to export data from your {{{ .essential }}} cluster, then use community tools such as [mydumper/myloader](https://centminmod.com/mydumper.html) to load the data into the MySQL service. + +3. Record the snapshot time returned by [Export](/tidb-cloud/serverless-export.md). Use this timestamp as the starting position when you configure the MySQL sink. + +### Create target tables in MySQL + +If you do not load the existing data, you need to create corresponding target tables in MySQL manually to store the incremental data from TiDB. Otherwise, the data will not be replicated. + +## Create a MySQL sink + +After completing the prerequisites, you can sink your data to MySQL. + +1. Navigate to the overview page of the target {{{ .essential }}} cluster, and then click **Data** > **Changefeed** in the left navigation pane. + +2. Click **Create Changefeed**, and select **MySQL** as **Destination**. + +3. In **Connectivity Method**, choose the method to connect to your MySQL service. + + - If you choose **Public**, fill in your MySQL endpoint. + - If you choose **Private Link**, select the private link connection that you created in the [Network](#network) section, and then fill in the MySQL port for your MySQL service. + +4. In **Authentication**, fill in the MySQL user name and password, and configure TLS encryption for your MySQL service. Currently, TiDB Cloud does not support self-signed certificates for MySQL TLS connections. + +5. Click **Next** to test whether TiDB can connect to MySQL successfully: + + - If yes, you are directed to the next step of configuration. + - If not, a connectivity error is displayed, and you need to handle the error. After the error is resolved, click **Next** again. + +6. Customize **Table Filter** to filter the tables that you want to replicate. For the rule syntax, refer to [table filter rules](https://docs.pingcap.com/tidb/stable/table-filter/#syntax). + + - **Replication Scope**: you can choose to only replicate tables with valid keys or replicate all selected tables. + - **Filter Rules**: you can set filter rules in this column. By default, there is a rule `*.*`, which stands for replicating all tables. When you add a new rule and click **Apply**, TiDB Cloud queries all the tables in TiDB and displays only the tables that match the rules under **Filter results**. + - **Case Sensitive**: you can set whether the matching of database and table names in filter rules is case-sensitive. By default, matching is case-insensitive. + - **Filter results with valid keys**: this column displays the tables that have valid keys, including primary keys or unique indexes. + - **Filter results without valid keys**: this column shows tables that lack primary keys or unique keys. These tables present a challenge during replication because the absence of a unique identifier can result in inconsistent data when the downstream handles duplicate events. To ensure data consistency, it is recommended to add unique keys or primary keys to these tables before initiating the replication. Alternatively, you can add filter rules to exclude these tables. For example, you can exclude the table `test.tbl1` by using the rule `"!test.tbl1"`. + +7. Customize **Event Filter** to filter the events that you want to replicate. + + - **Tables matching**: you can set which tables the event filter will be applied to in this column. The rule syntax is the same as that used for the preceding **Table Filter** area. + - **Event Filter**: you can choose the events you want to ignore. + +8. In **Start Replication Position**, configure the starting position for your MySQL sink. + + - If you have [loaded the existing data](#load-existing-data-optional) using Export, select **From Time** and fill in the snapshot time returned by Export. Ensure that the time zone is correct. + - If you do not have any data in the upstream TiDB cluster, select **Start replication from now on**. + +9. Click **Next** to configure your changefeed. + + In the **Changefeed Name** area, specify a name for the changefeed. + +10. Review the configuration. If all settings are correct, click **Submit**. + + If you want to modify some configurations, click **Previous** to go back to the previous configuration page. + +11. After creation, the sink status changes from **Creating** to **Running**. + + Click the changefeed name, and you can see more details about the changefeed, such as the checkpoint, replication latency, and other metrics. + +12. If you have [loaded the existing data](#load-existing-data-optional) and increased the GC time, restore it to its original value (the default value is `10m`) after the sink is created: + + ```sql + SET GLOBAL tidb_gc_life_time = '10m'; + ``` diff --git a/tidb-cloud/essential-database-audit-logging.md b/tidb-cloud/essential-database-audit-logging.md new file mode 100644 index 0000000000000..6b511807b58bb --- /dev/null +++ b/tidb-cloud/essential-database-audit-logging.md @@ -0,0 +1,470 @@ +--- +title: Database Audit Logging (Beta) for {{{ .essential }}} +summary: Learn about how to audit a {{{ .essential }}} cluster in TiDB Cloud. +aliases: ['/tidbcloud/serverless-audit-logging'] +--- + +# Database Audit Logging (Beta) for {{{ .essential }}} + +{{{ .essential }}} provides an audit logging feature that records user access activities of your database, such as executed SQL statements. + +> **Note:** +> +> Currently, the database audit logging feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for {{{ .essential }}} database audit logging" in the **Description** field, and then click **Submit**. + +To evaluate the effectiveness of user access policies and other information security measures of your organization, it is a security best practice to periodically analyze database audit logs. + +The audit logging feature is **disabled by default**. To audit a TiDB cluster, you need to enable audit logging for it. + +## Audit logging configurations + +### Data redaction + +By default, {{{ .essential }}} redacts sensitive data in audit logs. Take the following SQL statement as an example: + +```sql +INSERT INTO `test`.`users` (`id`, `name`, `password`) VALUES (1, 'Alice', '123456'); +``` + +It is redacted as follows: + +```sql +INSERT INTO `test`.`users` (`id`, `name`, `password`) VALUES ( ... ); +``` + +### Log file rotation + +{{{ .essential }}} generates a new audit log file when either of the following conditions is met: + +- The current log file reaches the rotation size (100 MiB by default). +- The rotation interval (one hour by default) has passed since the previous log generation. Depending on the internal scheduling mechanism, log generation might be delayed by a few minutes. + +## Audit logging locations + +You can store audit logs in the following locations: + +- TiDB Cloud +- [Amazon S3](https://aws.amazon.com/s3/) +- [Google Cloud Storage](https://cloud.google.com/storage) +- [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) +- [Alibaba Cloud Object Storage Service (OSS)](https://www.alibabacloud.com/product/oss) + +### TiDB Cloud + +You can store audit logs in TiDB Cloud and download them to your local machine. Audit logs expire and are deleted after 365 days. To request a longer retention period, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +### Amazon S3 + +To store audit logs in Amazon S3, you need to provide the following information: + +- URI: `s3:////` +- Access credentials: choose one of the following: + - An [access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) with the `s3:PutObject` permission. + - A [role ARN](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) with the `s3:PutObject` permission. Only clusters hosted on AWS support using a role ARN. + +For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). + +### Google Cloud Storage + +To store audit logs in Google Cloud Storage, you need to provide the following information: + +- URI: `gs:////` +- Access credential: a [service account key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) with the `storage.objects.create` and `storage.objects.delete` permissions. + +For more information, see [Configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access). + +### Azure Blob Storage + +To store audit logs in Azure Blob Storage, you need to provide the following information: + +- URI: `azure://.blob.core.windows.net///` or `https://.blob.core.windows.net///` +- Access credential: a [shared access signature (SAS) token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) with `Read` and `Write` permissions on the `Container` and `Object` resources. + +For more information, see [Configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access). + +### Alibaba Cloud OSS + +To store audit logs in Alibaba Cloud OSS, you need to provide the following information: + +- URI: `oss:////` +- Access credential: an [AccessKey pair](https://www.alibabacloud.com/help/en/ram/user-guide/create-an-accesskey-pair) with the `oss:PutObject` and `oss:GetBucketInfo` permissions to allow data export to the OSS bucket. + +For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access). + +## Audit logging filter rules + +To filter audit logs, you need to create a filter rule to specify which events to log. + +The filter rule contains the following fields: + +- `users`: A list of user names to filter audit events. You can use the wildcard `%` to match any user name. +- `filters`: A list of filter objects. Each filter object contains the following fields: + + - `classes`: A list of event classes to filter audit events. For example, `["QUERY", "EXECUTE"]`. + - `tables`: A list of table filters. For more information, see [Table Filter](https://docs.pingcap.com/tidb/stable/table-filter/). + - `statusCodes`: A list of status codes to filter audit events. `1` means success, and `0` means failure. + +The following table shows all event classes in database audit logging: + +| Event class | Description | Parent-class | +|---------------|--------------------------------------------------------------------------------------------------|---------------| +| `CONNECTION` | Records all operations related to connections, such as handshaking, connections, disconnections, connection reset, and changing users | - | +| `CONNECT` | Records all operations of the handshaking in connections | `CONNECTION` | +| `DISCONNECT` | Records all operations of the disconnections | `CONNECTION` | +| `CHANGE_USER` | Records all operations of changing users | `CONNECTION` | +| `QUERY` | Records all operations of SQL statements, including all errors about querying and modifying data | - | +| `TRANSACTION` | Records all operations related to transactions, such as `BEGIN`, `COMMIT`, and `ROLLBACK` | `QUERY` | +| `EXECUTE` | Records all operations of the `EXECUTE` statements | `QUERY` | +| `QUERY_DML` | Records all operations of the DML statements, including `INSERT`, `REPLACE`, `UPDATE`, `DELETE`, and `LOAD DATA` | `QUERY` | +| `INSERT` | Records all operations of the `INSERT` statements | `QUERY_DML` | +| `REPLACE` | Records all operations of the `REPLACE` statements | `QUERY_DML` | +| `UPDATE` | Records all operations of the `UPDATE` statements | `QUERY_DML` | +| `DELETE` | Records all operations of the `DELETE` statements | `QUERY_DML` | +| `LOAD DATA` | Records all operations of the `LOAD DATA` statements | `QUERY_DML` | +| `SELECT` | Records all operations of the `SELECT` statements | `QUERY` | +| `QUERY_DDL` | Records all operations of the DDL statements | `QUERY` | +| `AUDIT` | Records all operations related to setting TiDB database auditing, including setting system variables and calling system functions | - | +| `AUDIT_FUNC_CALL` | Records all operations of calling system functions related to TiDB Cloud database auditing | `AUDIT` | +| `AUDIT_SET_SYS_VAR` | Records all operations of setting system variables | `AUDIT` | + +> **Note:** +> +> The `AUDIT` event class and its subclasses are always recorded in audit logs and cannot be filtered out. + +## Configure audit logging + +You can enable, edit, and disable audit logging. + +### Enable audit logging + +You can enable audit logging for a {{{ .essential }}} cluster using the TiDB Cloud console or the TiDB Cloud CLI. + +> **Note:** +> +> Enabling audit logging alone does not generate audit logs. You must also configure filters to specify which events to log. For more information, see [Manage audit logging filter rules](#manage-audit-logging-filter-rules). + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, click **Enable**. + +4. Select a storage location for the audit logs and fill in the required information. Then click **Test Connection and Next** or **Next**. For more information about available storage locations, see [Audit logging locations](#audit-logging-locations). + +5. In the **Database Audit Logging Settings** dialog, fill in the log file rotation and log redaction settings, and then click **Save**. + +
+ +
+ +Take Amazon S3 storage as an example. To enable audit logging and store audit logs in Amazon S3, run the following command: + +```shell +ticloud serverless audit-log config update -c --enabled --cloud-storage S3 --s3.uri --s3.access-key-id --s3.secret-access-key --rotation-size-mib --rotation-interval-minutes --unredacted= +``` + +The `--rotation-size-mib`, `--rotation-interval-minutes`, and `--unredacted` parameters are optional. If you do not specify them, the default values are used. + +
+
+ +### Edit audit logging + +You can edit the audit logging for a {{{ .essential }}} cluster after enabling it. + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, click **Settings**. + +4. In the **Database Audit Logging Settings** dialog, update the log file rotation or log redaction settings, and then click **Save**. + +
+ +
+ +To update the audit logging settings using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log config update -c --rotation-size-mib --rotation-interval-minutes --unredacted= +``` + +
+
+ +### Disable audit logging + +You can disable audit logging for a {{{ .essential }}} cluster. + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, click **...** in the upper-right corner, and then click **Disable**. + +4. In the **Disable DB Audit Logging** dialog, click **Disable**. + +
+ +
+ +To disable audit logging using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log config update -c --disabled=true +``` + +
+
+ +## Manage audit logging filter rules + +You can create, edit, disable, and delete an audit logging filter rule. + +### Create a filter rule + +To create a filter rule, define which users and events you want to capture in the audit logs. You can specify users, event classes, tables, and status codes to tailor the logging to your needs. + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, click **Add Filter Rule**. + +4. In the **Add Filter Rule** dialog, fill in the **Filter Name**, **SQL Users**, and **Filter Rule** fields, and then click **Confirm**. For more information about these fields, see [Audit logging filter rules](#audit-logging-filter-rules). + +
+ +
+ +To create a filter rule using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log filter create --cluster-id --display-name --rule '{"users":["%@%"],"filters":[{}]}' +``` + +
+
+ +### Edit a filter rule + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, locate the filter rule you want to edit, click **...** in its row, and then click **Edit**. + +4. In the **Edit Filter Rule** dialog, update the **Filter Name** or **Filter Rule** field, and then click **Confirm**. + +
+ +
+ +To edit a filter rule using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log filter update --cluster-id --filter-rule-id --rule '{"users":["%@%"],"filters":[{"classes":["QUERY"],"tables":["test.t"]}]}' +``` + +
+
+ +### Disable a filter rule + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, locate the filter rule you want to disable, and turn off the toggle to disable the filter rule. + +
+ +
+ +To disable a filter rule using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log filter update --cluster-id --filter-rule-id --enabled=false +``` + +
+
+ +### Delete a filter rule + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, locate the filter rule you want to delete and click **...**. + +4. Click **Delete**, and then click **I understand. Delete it** to confirm. + +
+ +
+ +```shell +ticloud serverless audit-log filter delete --cluster-id --filter-rule-id +``` + +
+
+ +## Access audit logging with TiDB Cloud Storage + +When you store audit logs in TiDB Cloud, {{{ .essential }}} saves them as readable text files named `YYYY-MM-DD-.log`. You can view and download these files from the TiDB Cloud console or using the TiDB Cloud CLI. + +> **Note:** +> +> - {{{ .essential }}} does not guarantee that audit logs are stored in sequential order. A log file named `YYYY-MM-DD-.log` might contain entries from earlier dates. +> - To retrieve all logs for a specific date (for example, January 1, 2025), set `--start-date 2025-01-01` and `--end-date 2025-01-02`. In some cases, you might need to download all log files and sort them by the `TIME` field. + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + +3. On the **DB Audit Logging** page, you can view the list of audit logs under **TiDB Cloud Storage**. + +4. To download audit logs, select one or more logs from the list and then click **Download**. + +
+ +
+ +To download audit logs using the TiDB Cloud CLI, run the following command: + +```shell +ticloud serverless audit-log download --cluster-id --output-path --start-date --end-date +``` + +- `start-date`: the start date of the audit logs to download, in the format of `YYYY-MM-DD`, for example, `2025-01-01`. +- `end-date`: the end date of the audit logs to download, in the format of `YYYY-MM-DD`, for example, `2025-01-01`. + +
+
+ +## Audit logging fields + +For each database event record in audit logs, TiDB Cloud provides the following fields: + +### General information + +All classes of audit logs contain the following information: + +| Field | Description | +|---------------|-----------------------------------------------------------------------------------------------| +| `ID` | The unique identifier that identifies the audit record of an operation. | +| `TIME` | The timestamp of the audit record. | +| `EVENT` | The event classes of the audit record. Multiple event types are separated by commas (`,`). | +| `USER` | The username of the audit record. | +| `ROLES` | The roles of the user at the time of the operation. | +| `CONNECTION_ID` | The identifier of the user's connection. | +| `TABLES` | The accessed tables related to this audit record. | +| `STATUS_CODE` | The status code of the audit record. `1` means success, and `0` means failure. | +| `KEYSPACE_NAME` | The keyspace name of the audit record. | +| `SERVERLESS_TENANT_ID` | The ID of the serverless tenant that the cluster belongs to. | +| `SERVERLESS_PROJECT_ID` | The ID of the serverless project that the cluster belongs to. | +| `SERVERLESS_CLUSTER_ID` | The ID of the serverless cluster that the audit record belongs to. | +| `REASON` | The error message of the audit record. Only recorded when an error occurs during the operation.| + +### SQL statement information + +When the event class is `QUERY` or a subclass of `QUERY`, the audit logs contain the following information: + +| Field | Description | +|----------------|---------------------------------------------------------------------------------------------------------------| +| `CURRENT_DB` | The name of the current database. | +| `SQL_TEXT` | The executed SQL statements. If audit log redaction is enabled, the redacted SQL statements are recorded. | +| `EXECUTE_PARAMS` | The parameters for the `EXECUTE` statements. Recorded only when the event classes include `EXECUTE` and redaction is disabled. | +| `AFFECTED_ROWS` | The number of affected rows of the SQL statements. Recorded only when the event classes include `QUERY_DML`. | + +### Connection information + +When the event class is `CONNECTION` or a subclass of `CONNECTION`, the audit logs contain the following information: + +| Field | Description | +|-----------------|-----------------------------------------------------------------------------------------------| +| `CURRENT_DB` | The name of the current database. When the event classes include DISCONNECT, this information is not recorded. | +| `CONNECTION_TYPE` | The type of connection, including Socket, UnixSocket, and SSL/TLS. | +| `PID` | The process ID of the current connection. | +| `SERVER_VERSION` | The current version of the connected TiDB server. | +| `SSL_VERSION` | The current version of SSL in use. | +| `HOST_IP` | The current IP address of the connected TiDB server. | +| `HOST_PORT` | The current port of the connected TiDB server. | +| `CLIENT_IP` | The current IP address of the client. | +| `CLIENT_PORT` | The current port of the client. | + +> **Note:** +> +> To improve traffic visibility, `CLIENT_IP` now displays the real client IP address for connections via AWS PrivateLink, instead of the Load Balancer (LB) IP. Currently, this feature is in beta and is available only in the AWS region `Frankfurt (eu-central-1)`. + +### Audit operation information + +When the event class is `AUDIT` or a subclass of `AUDIT`, the audit logs contain the following information: + +| Field | Description | +|----------------|---------------------------------------------------------------------------------------------------------------| +| `AUDIT_OP_TARGET`| The objects of the setting related to TiDB Cloud database auditing. | +| `AUDIT_OP_ARGS` | The arguments of the setting related to TiDB Cloud database auditing. | + +## Audit logging limitations + +{{{ .essential }}} does not guarantee the sequential order of audit logs, which means that you might have to review all log files find the most recent events. To sort the logs chronologically, you can use the `TIME` field in the audit logs. diff --git a/tidb-cloud/essential/_index.md b/tidb-cloud/essential/_index.md new file mode 100644 index 0000000000000..397829626b606 --- /dev/null +++ b/tidb-cloud/essential/_index.md @@ -0,0 +1,140 @@ +--- +title: TiDB Cloud Documentation +hide_sidebar: true +hide_commit: true +summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings everything great about TiDB to your cloud. It offers guides, samples, and references for learning, trying, developing, maintaining, migrating, monitoring, tuning, securing, billing, integrating, and referencing. +--- + + + + + +[Why TiDB Cloud](https://docs.pingcap.com/tidbcloud/tidb-cloud-intro/?plan=essential) + +[Key Concepts](https://docs.pingcap.com/tidbcloud/key-concepts/?plan=essential) + +[FAQ](https://docs.pingcap.com/tidbcloud/tidb-cloud-faq/?plan=essential) + + + + + +[Try Out TiDB Cloud](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart/?plan=essential) + +[Try Out TiDB + AI](https://docs.pingcap.com/tidbcloud/vector-search-get-started-using-python/?plan=essential) + +[Try Out HTAP](https://docs.pingcap.com/tidbcloud/tidb-cloud-htap-quickstart/?plan=essential) + +[Try Out TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli/?plan=essential) + + + + + +[Developer Guide Overview](https://docs.pingcap.com/tidbcloud/dev-guide-overview/?plan=essential) + +[Quick Start](https://docs.pingcap.com/tidbcloud/dev-guide-build-cluster-in-cloud/?plan=essential) + +[Example Application](https://docs.pingcap.com/tidbcloud/dev-guide-sample-application-spring-boot/?plan=essential) + + + + + +[Create a Cluster](https://docs.pingcap.com/tidbcloud/create-tidb-cluster-serverless/?plan=essential) + +[Connect to a Cluster](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster-serverless/?plan=essential) + +[Use an HTAP Cluster](https://docs.pingcap.com/tidbcloud/tiflash-overview/?plan=essential) + +[Back Up and Restore Data](https://docs.pingcap.com/tidbcloud/backup-and-restore-serverless/?plan=essential) + +[Use API (Beta)](https://docs.pingcap.com/tidbcloud/api-overview/?plan=essential) + +[Use TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli/?plan=essential) + + + + + +[From Amazon RDS for Oracle](https://docs.pingcap.com/tidbcloud/migrate-from-oracle-using-aws-dms/?plan=essential) + +[Import Sample Data](https://docs.pingcap.com/tidbcloud/import-sample-data-serverless/?plan=essential) + +[Import CSV Files](https://docs.pingcap.com/tidbcloud/import-csv-files-serverless/?plan=essential) + +[Import Parquet Files](https://docs.pingcap.com/tidbcloud/import-parquet-files-serverless/?plan=essential) + +[With MySQL CLI](https://docs.pingcap.com/tidbcloud/import-with-mysql-cli-serverless/?plan=essential) + + + + + +[Status and Metrics](https://docs.pingcap.com/tidbcloud/monitor-tidb-cluster/?plan=essential) + +[Built-in Monitoring](https://docs.pingcap.com/tidbcloud/built-in-monitoring/?plan=essential) + + + + + +[Tuning Overview](https://docs.pingcap.com/tidbcloud/tidb-cloud-tune-performance-overview/?plan=essential) + +[Analyze Performance](https://docs.pingcap.com/tidbcloud/tune-performance/?plan=essential) + +[Tune SQL Performance](https://docs.pingcap.com/tidbcloud/tidb-cloud-sql-tuning-overview/?plan=essential) + +[Tune TiFlash Performance](https://docs.pingcap.com/tidbcloud/tune-tiflash-performance/?plan=essential) + + + + + +[Password Authentication](https://docs.pingcap.com/tidbcloud/tidb-cloud-password-authentication/?plan=essential) + +[User Roles](https://docs.pingcap.com/tidbcloud/manage-user-access#user-roles/?plan=essential) + +[Manage User Profiles](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-user-profiles/?plan=essential) + +[Manage Organization Access](https://docs.pingcap.com/tidbcloud/manage-user-access/?plan=essential#manage-organization-access) + +[Manage Project Access](https://docs.pingcap.com/tidbcloud/manage-user-access/?plan=essential#manage-project-access) + +[Configure Firewall Rules for Public Endpoints](https://docs.pingcap.com/tidbcloud/configure-serverless-firewall-rules-for-public-endpoints/?plan=essential) + + + + + +[Pricing](https://www.pingcap.com/tidb-cloud-essential-pricing-details/) + +[Invoices](https://docs.pingcap.com/tidbcloud/tidb-cloud-billing#invoices/?plan=essential) + +[Credits](https://docs.pingcap.com/tidbcloud/tidb-cloud-billing#credits/?plan=essential) + + + + + +[Airbyte](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-airbyte/?plan=essential) + +[Zapier](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-zapier/?plan=essential) + +[Vercel](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel/?plan=essential) + +[Terraform](https://docs.pingcap.com/tidbcloud/terraform-tidbcloud-provider-overview/?plan=essential) + +[Amazon AppFlow](https://docs.pingcap.com/tidbcloud/dev-guide-aws-appflow-integration/?plan=essential) + + + + + +[SQL Reference](https://docs.pingcap.com/tidbcloud/basic-sql-operations/?plan=essential) + +[System Variables](https://docs.pingcap.com/tidbcloud/system-variables/?plan=essential) + + + + diff --git a/tidb-cloud/explore-data-with-chat2query.md b/tidb-cloud/explore-data-with-chat2query.md index 61ce1ed8e8656..883496c645846 100644 --- a/tidb-cloud/explore-data-with-chat2query.md +++ b/tidb-cloud/explore-data-with-chat2query.md @@ -21,7 +21,7 @@ The recommended use cases of SQL Editor are as follows: - SQL queries generated by the AI might not be 100% accurate, and you might need to refine them. - SQL Editor is only supported for TiDB clusters that are v6.5.0 or later and hosted on AWS. -- SQL Editor is available by default for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. To use SQL Editor and Chat2Query on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). +- SQL Editor is available only for [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) clusters hosted on AWS. To use SQL Editor and Chat2Query on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). ## Access SQL Editor diff --git a/tidb-cloud/features.md b/tidb-cloud/features.md new file mode 100644 index 0000000000000..6f252433bbdcb --- /dev/null +++ b/tidb-cloud/features.md @@ -0,0 +1,336 @@ +--- +title: Features +summary: Learn about feature support status for different TiDB Cloud plans. +--- + +# Features + +This document lists the feature support status for different TiDB Cloud plans, including {{{ .starter }}}, Essential, and Dedicated. + +> **Tip:** +> +> [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) is the best way to get started with TiDB Cloud. Additionally, you can try out TiDB Cloud features on [TiDB Playground](https://play.tidbcloud.com/?utm_source=docs&utm_medium=tidb_cloud_quick_start). + +- ✅: The feature is **generally available** or in **public preview**. +- 🔒: The feature is in **private preview**. +- 🚧: The feature is **under development**. +- ❌: The feature is **currently not available**. + +
Customer ResourcesDocumentationsDocumentationsDocumentationsDocumentationsDocumentationDocumentationDocumentationDocumentation
FAQFAQFAQFAQFAQsFAQsFAQsFAQs
Community WorkspaceCommunity WorkspaceCommunity WorkspaceCommunity WorkspaceCommunity (Slack or Discord)Community (Slack or Discord)Community (Slack or Discord)Community (Slack or Discord)
TiDB.AI
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryFeatureStarterEssentialDedicated
BasicsScalable transactional processing
Analytical processing
Vector storage & vector search
(Public preview)

(Public preview)

(Public preview)
API
(Public preview)

(Public preview)

(Public preview)
Developer experienceData branch
SQL editor
Cluster managementPay as you use
Automatic scaling based on workload
Manual cluster modification
Password setting
Pause & resume
System maintenance window
Backup file recycle bin
Data processingData import from CSV, Parquet, and SQL files to TiDB Cloud
Data migration from MySQL-compatible databases into TiDB Cloud
(Public preview)
Data export via CSV, Parquet, and SQL files to local or object storages
(Public preview)

(Public preview)
Change data replication to Kafka or other MySQL-compatible databases using changefeeds
(Public preview)
Backup & restoreAutomatic backup
Manual backup
Dual region backup
Point-in-time recovery (PITR)
Restore
ObservabilityBuilt-in metrics
Alerting🚧
SQL statement analysis
Slow query log
Top SQL
Events
Third-party integrations, such as Datadog, Prometheus, and New Relic
High availabilityCross-AZ failover
Resource allocationNode groups
Resource control
Network connectionPrivate endpoint
Public endpoint
VPC peering
SecurityDatabase audit logging🔒
Console audit logging
Log redaction
CMEK
Dual-layer encryption
IAM (including email and password login, standard SSO, and organization SSO)
Cloud and regionsAWS
Alibaba Cloud
Azure
Google Cloud
Cloud and regionsAWS
Azure
Google Cloud
+ +> **Tip:** +> +> To request a feature in private preview, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for ``" in the **Description** field, and then click **Submit**. \ No newline at end of file diff --git a/tidb-cloud/get-started-with-cli.md b/tidb-cloud/get-started-with-cli.md index db21975b117e2..6e79c55b29e48 100644 --- a/tidb-cloud/get-started-with-cli.md +++ b/tidb-cloud/get-started-with-cli.md @@ -87,7 +87,7 @@ Install the MySQL command-line client if you do not have it. You can refer to th ## Quick start -[TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) is the best way to get started with TiDB Cloud. In this section, you will learn how to create a TiDB Cloud Serverless cluster with TiDB Cloud CLI. +[{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) is the best way to get started with TiDB Cloud. In this section, you will learn how to create a {{{ .starter }}} cluster with TiDB Cloud CLI. ### Create a user profile or log into TiDB Cloud @@ -115,9 +115,9 @@ Before creating a cluster with TiDB Cloud CLI, you need to either create a user > > In the preceding two methods, the TiDB Cloud API key takes precedence over the OAuth token. If both are available, the API key will be used. -### Create a TiDB Cloud Serverless cluster +### Create a {{{ .starter }}} cluster -To create a TiDB Cloud Serverless cluster, enter the following command, and then follow the CLI prompts to provide the required information: +To create a {{{ .starter }}} cluster, enter the following command, and then follow the CLI prompts to provide the required information: ```shell ticloud serverless create diff --git a/tidb-cloud/import-csv-files-serverless.md b/tidb-cloud/import-csv-files-serverless.md index a38102b427262..80df511c07642 100644 --- a/tidb-cloud/import-csv-files-serverless.md +++ b/tidb-cloud/import-csv-files-serverless.md @@ -1,39 +1,43 @@ --- -title: Import CSV Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless -summary: Learn how to import CSV files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into TiDB Cloud Serverless. +title: Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential +summary: Learn how to import CSV files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into {{{ .starter }}} or {{{ .essential }}}. --- -# Import CSV Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless +# Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential -This document describes how to import CSV files from Amazon Simple Storage Service (Amazon S3), Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into TiDB Cloud Serverless. +This document describes how to import CSV files from Amazon Simple Storage Service (Amazon S3), Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into {{{ .starter }}} or {{{ .essential }}}. + +> **Note:** +> +> For TiDB Cloud Dedicated, see [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). ## Limitations -- To ensure data consistency, TiDB Cloud Serverless allows to import CSV files into empty tables only. To import data into an existing table that already contains data, you can use TiDB Cloud Serverless to import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. +- To ensure data consistency, TiDB Cloud allows importing CSV files into empty tables only. To import data into an existing table that already contains data, you can import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. ## Step 1. Prepare the CSV files -1. If a CSV file is larger than 256 MB, consider splitting it into smaller files, each with a size around 256 MB. +1. If a CSV file is larger than 256 MiB, consider splitting it into smaller files, each with a size around 256 MiB. - TiDB Cloud Serverless supports importing very large CSV files but performs best with multiple input files around 256 MB in size. This is because TiDB Cloud Serverless can process multiple files in parallel, which can greatly improve the import speed. + TiDB Cloud supports importing very large CSV files but performs best with multiple input files around 256 MiB in size. This is because TiDB Cloud can process multiple files in parallel, which can greatly improve the import speed. 2. Name the CSV files as follows: - If a CSV file contains all data of an entire table, name the file in the `${db_name}.${table_name}.csv` format, which maps to the `${db_name}.${table_name}` table when you import the data. - If the data of one table is separated into multiple CSV files, append a numeric suffix to these CSV files. For example, `${db_name}.${table_name}.000001.csv` and `${db_name}.${table_name}.000002.csv`. The numeric suffixes can be inconsecutive but must be in ascending order. You also need to add extra zeros before the number to ensure all the suffixes are in the same length. - - TiDB Cloud Serverless supports importing compressed files in the following formats: `.gzip`, `.gz`, `.zstd`, `.zst` and `.snappy`. If you want to import compressed CSV files, name the files in the `${db_name}.${table_name}.${suffix}.csv.${compress}` format, in which `${suffix}` is optional and can be any integer such as '000001'. For example, if you want to import the `trips.000001.csv.gz` file to the `bikeshare.trips` table, you need to rename the file as `bikeshare.trips.000001.csv.gz`. + - TiDB Cloud supports importing compressed files in the following formats: `.gzip`, `.gz`, `.zstd`, `.zst` and `.snappy`. If you want to import compressed CSV files, name the files in the `${db_name}.${table_name}.${suffix}.csv.${compress}` format, in which `${suffix}` is optional and can be any integer such as '000001'. For example, if you want to import the `trips.000001.csv.gz` file to the `bikeshare.trips` table, you need to rename the file as `bikeshare.trips.000001.csv.gz`. > **Note:** > > - To achieve better performance, it is recommended to limit the size of each compressed file to 100 MiB. > - The Snappy compressed file must be in the [official Snappy format](https://github.com/google/snappy). Other variants of Snappy compression are not supported. - > - For uncompressed files, if you cannot update the CSV filenames according to the preceding rules in some cases (for example, the CSV file links are also used by your other programs), you can keep the filenames unchanged and use the **Mapping Settings** in [Step 4](#step-4-import-csv-files-to-tidb-cloud-serverless) to import your source data to a single target table. + > - For uncompressed files, if you cannot update the CSV filenames according to the preceding rules in some cases (for example, the CSV file links are also used by your other programs), you can keep the filenames unchanged and use the **Mapping Settings** in [Step 4](#step-4-import-csv-files) to import your source data to a single target table. ## Step 2. Create the target table schemas -Because CSV files do not contain schema information, before importing data from CSV files into TiDB Cloud Serverless, you need to create the table schemas using either of the following methods: +Because CSV files do not contain schema information, before importing data from CSV files into TiDB Cloud, you need to create the table schemas using either of the following methods: -- Method 1: In TiDB Cloud Serverless, create the target databases and tables for your source data. +- Method 1: In TiDB Cloud, create the target databases and tables for your source data. - Method 2: In the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the CSV files are located, create the target table schema files for your source data as follows: @@ -41,9 +45,9 @@ Because CSV files do not contain schema information, before importing data from If your CSV files follow the naming rules in [Step 1](#step-1-prepare-the-csv-files), the database schema files are optional for the data import. Otherwise, the database schema files are mandatory. - Each database schema file must be in the `${db_name}-schema-create.sql` format and contain a `CREATE DATABASE` DDL statement. With this file, TiDB Cloud Serverless will create the `${db_name}` database to store your data when you import the data. + Each database schema file must be in the `${db_name}-schema-create.sql` format and contain a `CREATE DATABASE` DDL statement. With this file, TiDB Cloud will create the `${db_name}` database to store your data when you import the data. - For example, if you create a `mydb-scehma-create.sql` file that contains the following statement, TiDB Cloud Serverless will create the `mydb` database when you import the data. + For example, if you create a `mydb-scehma-create.sql` file that contains the following statement, TiDB Cloud will create the `mydb` database when you import the data. ```sql CREATE DATABASE mydb; @@ -51,11 +55,11 @@ Because CSV files do not contain schema information, before importing data from 2. Create table schema files for your source data. - If you do not include the table schema files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the CSV files are located, TiDB Cloud Serverless will not create the corresponding tables for you when you import the data. + If you do not include the table schema files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the CSV files are located, TiDB Cloud will not create the corresponding tables for you when you import the data. - Each table schema file must be in the `${db_name}.${table_name}-schema.sql` format and contain a `CREATE TABLE` DDL statement. With this file, TiDB Cloud Serverless will create the `${db_table}` table in the `${db_name}` database when you import the data. + Each table schema file must be in the `${db_name}.${table_name}-schema.sql` format and contain a `CREATE TABLE` DDL statement. With this file, TiDB Cloud will create the `${db_table}` table in the `${db_name}` database when you import the data. - For example, if you create a `mydb.mytable-schema.sql` file that contains the following statement, TiDB Cloud Serverless will create the `mytable` table in the `mydb` database when you import the data. + For example, if you create a `mydb.mytable-schema.sql` file that contains the following statement, TiDB Cloud will create the `mytable` table in the `mydb` database when you import the data. ```sql CREATE TABLE mytable ( @@ -70,21 +74,21 @@ Because CSV files do not contain schema information, before importing data from ## Step 3. Configure cross-account access -To allow TiDB Cloud Serverless to access the CSV files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service bucket, do one of the following: +To allow TiDB Cloud to access the CSV files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service bucket, do one of the following: -- If your CSV files are located in Amazon S3, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). +- If your CSV files are located in Amazon S3, [configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access) for your cluster. - You can use either an AWS access key or a Role ARN to access your bucket. Once finished, make a note of the access key (including the access key ID and secret access key) or the Role ARN value as you will need it in [Step 4](#step-4-import-csv-files-to-tidb-cloud-serverless). + You can use either an AWS access key or a Role ARN to access your bucket. Once finished, make a note of the access key (including the access key ID and secret access key) or the Role ARN value as you will need it in [Step 4](#step-4-import-csv-files). -- If your CSV files are located in GCS, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-gcs-access). +- If your CSV files are located in GCS, [configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access) for your cluster. -- If your CSV files are located in Azure Blob Storage, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). +- If your CSV files are located in Azure Blob Storage, [configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access) for your cluster. -- If your CSV files are located in Alibaba Cloud Object Storage Service (OSS), [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-alibaba-cloud-object-storage-service-oss-access). +- If your CSV files are located in Alibaba Cloud Object Storage Service (OSS), [configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access) for your cluster. -## Step 4. Import CSV files to TiDB Cloud Serverless +## Step 4. Import CSV files -To import the CSV files to TiDB Cloud Serverless, take the following steps: +To import the CSV files to {{{ .starter }}} or {{{ .essential }}}, take the following steps:
@@ -99,43 +103,44 @@ To import the CSV files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Amazon S3**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Amazon S3** page, provide the following information for the source CSV files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **CSV**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `s3://sampledata/ingest/TableName.01.csv`. - - When importing multiple files, enter the source file URI and name in the following format `s3://[bucket_name]/[data_source_folder]/`. For example, `s3://sampledata/ingest/`. - - **Bucket Access**: you can use either an AWS Role ARN or an AWS access key to access your bucket. For more information, see [Configure Amazon S3 access](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). + - **Storage Provider**: select **Amazon S3**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `s3://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `s3://[bucket_name]/[data_source_folder]/`. For example, `s3://sampledata/ingest/`. + - **Credential**: you can use either an AWS Role ARN or an AWS access key to access your bucket. For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). - **AWS Role ARN**: enter the AWS Role ARN value. - **AWS Access Key**: enter the AWS access key ID and AWS secret access key. -4. Click **Connect**. +4. Click **Next**. + +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. -5. In the **Destination** section, select the target database and table. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding CSV file. After that, the data source files will be re-scanned using the provided custom mapping rule. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `s3://sampledata/ingest/TableName.01.csv`. + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **CSV** as the data format. - You can also use wildcards to match the source files. For example: + - To manually configure the mapping rules to associate your source CSV files with the target database and table, unselect this option, and then fill in the following fields: - - `s3://[bucket_name]/[data_source_folder]/my-data?.csv`: all CSV files starting with `my-data` followed by one character (such as `my-data1.csv` and `my-data2.csv`) in that folder will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].csv` format. For example: `TableName.01.csv`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - - `s3://[bucket_name]/[data_source_folder]/my-data*.csv`: all CSV files in the folder starting with `my-data` will be imported into the same target table. + - `my-data?.csv`: matches all CSV files that start with `my-data` followed by a single character, such as `my-data1.csv` and `my-data2.csv`. + - `my-data*.csv`: matches all CSV files that start with `my-data`, such as `my-data-2023.csv` and `my-data-final.csv`. - Note that only `?` and `*` are supported. + - **Target Database** and **Target Table**: select the target database and table to import the data to. - > **Note:** - > - > The URI must contain the data source folder. +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables.
@@ -151,41 +156,42 @@ To import the CSV files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Google Cloud Storage**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Google Cloud Storage** page, provide the following information for the source CSV files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **CSV**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.csv`. - - When importing multiple files, enter the source file URI and name in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/`. For example, `[gcs|gs]://sampledata/ingest/`. - - **Bucket Access**: you can use a service account key to access your bucket. For more information, see [Configure GCS access](/tidb-cloud/serverless-external-storage.md#configure-gcs-access). + - **Storage Provider**: select **Google Cloud Storage**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/`. For example, `[gcs|gs]://sampledata/ingest/`. + - **Credential**: you can use a GCS IAM Role Service Account key to access your bucket. For more information, see [Configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding CSV file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.csv`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **CSV** as the data format. - - `[gcs|gs]://[bucket_name]/[data_source_folder]/my-data?.csv`: all CSV files starting with `my-data` followed by one character (such as `my-data1.csv` and `my-data2.csv`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source CSV files with the target database and table, unselect this option, and then fill in the following fields: - - `[gcs|gs]://[bucket_name]/[data_source_folder]/my-data*.csv`: all CSV files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].csv` format. For example: `TableName.01.csv`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.csv`: matches all CSV files that start with `my-data` followed by a single character, such as `my-data1.csv` and `my-data2.csv`. + - `my-data*.csv`: matches all CSV files that start with `my-data`, such as `my-data-2023.csv` and `my-data-final.csv`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables.
@@ -201,41 +207,42 @@ To import the CSV files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Azure Blob Storage**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Azure Blob Storage** page, provide the following information for the source CSV files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **CSV**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[azure|https]://sampledata/ingest/TableName.01.csv`. - - When importing multiple files, enter the source file URI and name in the following format `[azure|https]://[bucket_name]/[data_source_folder]/`. For example, `[azure|https]://sampledata/ingest/`. - - **Bucket Access**: you can use a shared access signature (SAS) token to access your bucket. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). + - **Storage Provider**: select **Azure Blob Storage**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[azure|https]://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `[azure|https]://[bucket_name]/[data_source_folder]/`. For example, `[azure|https]://sampledata/ingest/`. + - **Credential**: you can use a shared access signature (SAS) token to access your bucket. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding CSV file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `[azure|https]://sampledata/ingest/TableName.01.csv`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **CSV** as the data format. - - `[azure|https]://[bucket_name]/[data_source_folder]/my-data?.csv`: all CSV files starting with `my-data` followed by one character (such as `my-data1.csv` and `my-data2.csv`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source CSV files with the target database and table, unselect this option, and then fill in the following fields: - - `[azure|https]://[bucket_name]/[data_source_folder]/my-data*.csv`: all CSV files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].csv` format. For example: `TableName.01.csv`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.csv`: matches all CSV files that start with `my-data` followed by a single character, such as `my-data1.csv` and `my-data2.csv`. + - `my-data*.csv`: matches all CSV files that start with `my-data`, such as `my-data-2023.csv` and `my-data-final.csv`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables. @@ -251,47 +258,48 @@ To import the CSV files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Alibaba Cloud OSS**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Alibaba Cloud OSS** page, provide the following information for the source CSV files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **CSV**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `oss://sampledata/ingest/TableName.01.csv`. - - When importing multiple files, enter the source file URI and name in the following format `oss://[bucket_name]/[data_source_folder]/`. For example, `oss://sampledata/ingest/`. - - **Bucket Access**: you can use an AccessKey pair to access your bucket. For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/serverless-external-storage.md#configure-alibaba-cloud-object-storage-service-oss-access). + - **Storage Provider**: select **Alibaba Cloud OSS**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `oss://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `oss://[bucket_name]/[data_source_folder]/`. For example, `oss://sampledata/ingest/`. + - **Credential**: you can use an AccessKey pair to access your bucket. For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding CSV file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `oss://sampledata/ingest/TableName.01.csv`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **CSV** as the data format. - - `oss://[bucket_name]/[data_source_folder]/my-data?.csv`: all CSV files starting with `my-data` followed by one character (such as `my-data1.csv` and `my-data2.csv`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source CSV files with the target database and table, unselect this option, and then fill in the following fields: - - `oss://[bucket_name]/[data_source_folder]/my-data*.csv`: all CSV files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].csv` format. For example: `TableName.01.csv`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.csv`: matches all CSV files that start with `my-data` followed by a single character, such as `my-data1.csv` and `my-data2.csv`. + - `my-data*.csv`: matches all CSV files that start with `my-data`, such as `my-data-2023.csv` and `my-data-final.csv`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables.
-When you run an import task, if any unsupported or invalid conversions are detected, TiDB Cloud Serverless terminates the import job automatically and reports an importing error. +When you run an import task, if any unsupported or invalid conversions are detected, TiDB Cloud terminates the import job automatically and reports an importing error. If you get an importing error, do the following: diff --git a/tidb-cloud/import-csv-files.md b/tidb-cloud/import-csv-files.md index bd3b41367175b..3a1d35c2bf47c 100644 --- a/tidb-cloud/import-csv-files.md +++ b/tidb-cloud/import-csv-files.md @@ -10,15 +10,15 @@ This document describes how to import CSV files from Amazon Simple Storage Servi ## Limitations -- To ensure data consistency, TiDB Cloud allows to import CSV files into empty tables only. To import data into an existing table that already contains data, you can use TiDB Cloud to import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. +- To ensure data consistency, TiDB Cloud allows importing CSV files into empty tables only. To import data into an existing table that already contains data, you can use TiDB Cloud to import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. - If a TiDB Cloud Dedicated cluster has a [changefeed](/tidb-cloud/changefeed-overview.md) or has [Point-in-time Restore](/tidb-cloud/backup-and-restore.md#turn-on-point-in-time-restore) enabled, you cannot import data to the cluster (the **Import Data** button will be disabled) because the current data import feature uses the [physical import mode](https://docs.pingcap.com/tidb/stable/tidb-lightning-physical-import-mode). In this mode, the imported data does not generate change logs, so the changefeed and Point-in-time Restore cannot detect the imported data. ## Step 1. Prepare the CSV files -1. If a CSV file is larger than 256 MB, consider splitting it into smaller files, each with a size of around 256 MB. +1. If a CSV file is larger than 256 MiB, consider splitting it into smaller files, each with a size of around 256 MiB. - TiDB Cloud supports importing very large CSV files but performs best with multiple input files around 256 MB in size. This is because TiDB Cloud can process multiple files in parallel, which can greatly improve the import speed. + TiDB Cloud supports importing very large CSV files but performs best with multiple input files around 256 MiB in size. This is because TiDB Cloud can process multiple files in parallel, which can greatly improve the import speed. 2. Name the CSV files as follows: @@ -151,7 +151,7 @@ To import the CSV files to TiDB Cloud, take the following steps: 2. Select **Import data from Cloud Storage**. -3. On the **Import Data from Google Cloud Storage** page, provide the following information for the source CSV files: +3. On the **Import Data from Cloud Storage** page, provide the following information for the source CSV files: - **Included Schema Files**: if the source folder contains the target table schema files (such as `${db_name}-schema-create.sql`), select **Yes**. Otherwise, select **No**. - **Data Format**: select **CSV**. @@ -196,12 +196,32 @@ To import the CSV files to TiDB Cloud, take the following steps: - **Included Schema Files**: if the source folder contains the target table schema files (such as `${db_name}-schema-create.sql`), select **Yes**. Otherwise, select **No**. - **Data Format**: select **CSV**. + - **Connectivity Method**: select how TiDB Cloud connects to your Azure Blob Storage: + + - **Public** (default): connects over the public internet. Use this option when the storage account allows public network access. + - **Private Link**: connects through an Azure private endpoint for network-isolated access. Use this option when the storage account blocks public access or when your security policy requires private connectivity. If you select **Private Link**, you also need to fill in the additional field **Azure Blob Storage Resource ID**. To find the resource ID: + + 1. Go to the [Azure portal](https://portal.azure.com/). + 2. Navigate to your storage account and click **Overview** > **JSON View**. + 3. Copy the value of the `id` property. The resource ID is in the format `/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/`. + - **Edit CSV Configuration**: if necessary, configure the options according to your CSV files. You can set the separator and delimiter characters, specify whether to use backslashes for escaped characters, and specify whether your files contain a header row. - **Folder URI**: enter the Azure Blob Storage URI where your source files are located using the format `https://[account_name].blob.core.windows.net/[container_name]/[data_source_folder]/`. The path must end with a `/`. For example, `https://myaccount.blob.core.windows.net/mycontainer/myfolder/`. - **SAS Token**: enter an account SAS token to allow TiDB Cloud to access the source files in your Azure Blob Storage container. If you don't have one yet, you can create it using the provided Azure ARM template by clicking **Click here to create a new one with Azure ARM template** and following the instructions on the screen. Alternatively, you can manually create an account SAS token. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/dedicated-external-storage.md#configure-azure-blob-storage-access). 4. Click **Connect**. + If you selected **Private Link** as the connectivity method, TiDB Cloud creates a private endpoint for your storage account. You need to approve this endpoint request in the Azure portal before the connection can proceed: + + 1. Go to the [Azure portal](https://portal.azure.com/) and navigate to your storage account. + 2. Click **Networking** > **Private endpoint connections**. + 3. Find the pending connection request from TiDB Cloud and click **Approve**. + 4. Return to the [TiDB Cloud console](https://tidbcloud.com/). The import wizard proceeds automatically once the endpoint is approved. + + > **Note:** + > + > If the endpoint is not yet approved, TiDB Cloud displays a message indicating that the connection is pending approval. Approve the request in the [Azure portal](https://portal.azure.com/) and retry the connection. + 5. In the **Destination** section, select the target database and table. When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to customize the mapping of individual target tables to their corresponding CSV files. For each target database and table: diff --git a/tidb-cloud/import-parquet-files-serverless.md b/tidb-cloud/import-parquet-files-serverless.md index c7ba5cf642fba..3068b4e50e9bc 100644 --- a/tidb-cloud/import-parquet-files-serverless.md +++ b/tidb-cloud/import-parquet-files-serverless.md @@ -1,22 +1,23 @@ --- -title: Import Apache Parquet Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless -summary: Learn how to import Apache Parquet files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into TiDB Cloud Serverless. +title: Import Apache Parquet Files from Cloud Storage into {{{ .starter }}} or Essential +summary: Learn how to import Apache Parquet files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into {{{ .starter }}} or {{{ .essential }}}. --- -# Import Apache Parquet Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless +# Import Apache Parquet Files from Cloud Storage into {{{ .starter }}} or Essential -You can import both uncompressed and Snappy compressed [Apache Parquet](https://parquet.apache.org/) format data files to TiDB Cloud Serverless. This document describes how to import Parquet files from Amazon Simple Storage Service (Amazon S3), Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into TiDB Cloud Serverless. +You can import both uncompressed and Snappy compressed [Apache Parquet](https://parquet.apache.org/) format data files to {{{ .starter }}} or {{{ .essential }}}. This document describes how to import Parquet files from Amazon Simple Storage Service (Amazon S3), Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud Object Storage Service (OSS) into {{{ .starter }}} or {{{ .essential }}}. > **Note:** > -> - TiDB Cloud Serverless only supports importing Parquet files into empty tables. To import data into an existing table that already contains data, you can use TiDB Cloud Serverless to import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. +> - For TiDB Cloud Dedicated, see [Import Parquet Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-parquet-files.md). +> - TiDB Cloud only supports importing Parquet files into empty tables. To import data into an existing table that already contains data, you can import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. > - The Snappy compressed file must be in the [official Snappy format](https://github.com/google/snappy). Other variants of Snappy compression are not supported. ## Step 1. Prepare the Parquet files > **Note:** > -> Currently, TiDB Cloud Serverless does not support importing Parquet files that contain any of the following data types. If Parquet files to be imported contain such data types, you need to first regenerate the Parquet files using the [supported data types](#supported-data-types) (for example, `STRING`). Alternatively, you could use a service such as AWS Glue to transform data types easily. +> Currently, TiDB Cloud does not support importing Parquet files that contain any of the following data types. If Parquet files to be imported contain such data types, you need to first regenerate the Parquet files using the [supported data types](#supported-data-types) (for example, `STRING`). Alternatively, you could use a service such as AWS Glue to transform data types easily. > > - `LIST` > - `NEST STRUCT` @@ -26,7 +27,7 @@ You can import both uncompressed and Snappy compressed [Apache Parquet](https:// 1. If a Parquet file is larger than 256 MB, consider splitting it into smaller files, each with a size around 256 MB. - TiDB Cloud Serverless supports importing very large Parquet files but performs best with multiple input files around 256 MB in size. This is because TiDB Cloud Serverless can process multiple files in parallel, which can greatly improve the import speed. + TiDB Cloud supports importing very large Parquet files but performs best with multiple input files around 256 MB in size. This is because TiDB Cloud can process multiple files in parallel, which can greatly improve the import speed. 2. Name the Parquet files as follows: @@ -35,13 +36,13 @@ You can import both uncompressed and Snappy compressed [Apache Parquet](https:// > **Note:** > - > If you cannot update the Parquet filenames according to the preceding rules in some cases (for example, the Parquet file links are also used by your other programs), you can keep the filenames unchanged and use the **Mapping Settings** in [Step 4](#step-4-import-parquet-files-to-tidb-cloud-serverless) to import your source data to a single target table. + > If you cannot update the Parquet filenames according to the preceding rules in some cases (for example, the Parquet file links are also used by your other programs), you can keep the filenames unchanged and use the **Mapping Settings** in [Step 4](#step-4-import-parquet-files) to import your source data to a single target table. ## Step 2. Create the target table schemas -Because Parquet files do not contain schema information, before importing data from Parquet files into TiDB Cloud Serverless, you need to create the table schemas using either of the following methods: +Because Parquet files do not contain schema information, before importing data from Parquet files into TiDB Cloud, you need to create the table schemas using either of the following methods: -- Method 1: In TiDB Cloud Serverless, create the target databases and tables for your source data. +- Method 1: In TiDB Cloud, create the target databases and tables for your source data. - Method 2: In the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the Parquet files are located, create the target table schema files for your source data as follows: @@ -49,9 +50,9 @@ Because Parquet files do not contain schema information, before importing data f If your Parquet files follow the naming rules in [Step 1](#step-1-prepare-the-parquet-files), the database schema files are optional for the data import. Otherwise, the database schema files are mandatory. - Each database schema file must be in the `${db_name}-schema-create.sql` format and contain a `CREATE DATABASE` DDL statement. With this file, TiDB Cloud Serverless will create the `${db_name}` database to store your data when you import the data. + Each database schema file must be in the `${db_name}-schema-create.sql` format and contain a `CREATE DATABASE` DDL statement. With this file, TiDB Cloud will create the `${db_name}` database to store your data when you import the data. - For example, if you create a `mydb-scehma-create.sql` file that contains the following statement, TiDB Cloud Serverless will create the `mydb` database when you import the data. + For example, if you create a `mydb-scehma-create.sql` file that contains the following statement, TiDB Cloud will create the `mydb` database when you import the data. ```sql CREATE DATABASE mydb; @@ -59,11 +60,11 @@ Because Parquet files do not contain schema information, before importing data f 2. Create table schema files for your source data. - If you do not include the table schema files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the Parquet files are located, TiDB Cloud Serverless will not create the corresponding tables for you when you import the data. + If you do not include the table schema files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service directory where the Parquet files are located, TiDB Cloud will not create the corresponding tables for you when you import the data. - Each table schema file must be in the `${db_name}.${table_name}-schema.sql` format and contain a `CREATE TABLE` DDL statement. With this file, TiDB Cloud Serverless will create the `${db_table}` table in the `${db_name}` database when you import the data. + Each table schema file must be in the `${db_name}.${table_name}-schema.sql` format and contain a `CREATE TABLE` DDL statement. With this file, TiDB Cloud will create the `${db_table}` table in the `${db_name}` database when you import the data. - For example, if you create a `mydb.mytable-schema.sql` file that contains the following statement, TiDB Cloud Serverless will create the `mytable` table in the `mydb` database when you import the data. + For example, if you create a `mydb.mytable-schema.sql` file that contains the following statement, TiDB Cloud will create the `mytable` table in the `mydb` database when you import the data. ```sql CREATE TABLE mytable ( @@ -78,21 +79,21 @@ Because Parquet files do not contain schema information, before importing data f ## Step 3. Configure cross-account access -To allow TiDB Cloud Serverless to access the Parquet files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service bucket, do one of the following: +To allow TiDB Cloud to access the Parquet files in the Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud Object Storage Service bucket, do one of the following: -- If your Parquet files are located in Amazon S3, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). +- If your Parquet files are located in Amazon S3, [configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access) for your cluster. - You can use either an AWS access key or a Role ARN to access your bucket. Once finished, make a note of the access key (including the access key ID and secret access key) or the Role ARN value as you will need it in [Step 4](#step-4-import-parquet-files-to-tidb-cloud-serverless). + You can use either an AWS access key or a Role ARN to access your bucket. Once finished, make a note of the access key (including the access key ID and secret access key) or the Role ARN value as you will need it in [Step 4](#step-4-import-parquet-files). -- If your Parquet files are located in GCS, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-gcs-access). +- If your Parquet files are located in GCS, [configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access) for your cluster. -- If your Parquet files are located in Azure Blob Storage, [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). +- If your Parquet files are located in Azure Blob Storage, [configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access) for your cluster. -- If your Parquet files are located in Alibaba Cloud Object Storage Service (OSS), [configure external storage access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-alibaba-cloud-object-storage-service-oss-access). +- If your Parquet files are located in Alibaba Cloud Object Storage Service (OSS), [configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access) for your cluster. -## Step 4. Import Parquet files to TiDB Cloud Serverless +## Step 4. Import Parquet files -To import the Parquet files to TiDB Cloud Serverless, take the following steps: +To import the Parquet files to {{{ .starter }}} or {{{ .essential }}}, take the following steps:
@@ -107,43 +108,44 @@ To import the Parquet files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Amazon S3**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Amazon S3** page, provide the following information for the source Parquet files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **Parquet**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `s3://sampledata/ingest/TableName.01.parquet`. - - When importing multiple files, enter the source file URI and name in the following format `s3://[bucket_name]/[data_source_folder]/`. For example, `s3://sampledata/ingest/`. - - **Bucket Access**: you can use either an AWS Role ARN or an AWS access key to access your bucket. For more information, see [Configure Amazon S3 access](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). + - **Storage Provider**: select **Amazon S3**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `s3://sampledata/ingest/TableName.01.parquet`. + - When importing multiple files, enter the source folder URI in the following format `s3://[bucket_name]/[data_source_folder]/`. For example, `s3://sampledata/ingest/`. + - **Credential**: you can use either an AWS Role ARN or an AWS access key to access your bucket. For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). - **AWS Role ARN**: enter the AWS Role ARN value. - **AWS Access Key**: enter the AWS access key ID and AWS secret access key. -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding Parquet file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `s3://sampledata/ingest/TableName.01.parquet`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **Parquet** as the data format. - - `s3://[bucket_name]/[data_source_folder]/my-data?.parquet`: all Parquet files starting with `my-data` followed by one character (such as `my-data1.parquet` and `my-data2.parquet`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source Parquet files with the target database and table, unselect this option, and then fill in the following fields: - - `s3://[bucket_name]/[data_source_folder]/my-data*.parquet`: all Parquet files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].parquet` format. For example: `TableName.01.parquet`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.parquet`: matches all Parquet files that start with `my-data` followed by a single character, such as `my-data1.parquet` and `my-data2.parquet`. + - `my-data*.parquet`: matches all Parquet files that start with `my-data`, such as `my-data-2023.parquet` and `my-data-final.parquet`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables.
@@ -159,41 +161,42 @@ To import the Parquet files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Google Cloud Storage**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Google Cloud Storage** page, provide the following information for the source Parquet files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **Parquet**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.parquet`. - - When importing multiple files, enter the source file URI and name in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/`. For example, `[gcs|gs]://sampledata/ingest/`. - - **Bucket Access**: you can use a GCS IAM Role to access your bucket. For more information, see [Configure GCS access](/tidb-cloud/serverless-external-storage.md#configure-gcs-access). + - **Storage Provider**: select **Google Cloud Storage**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.parquet`. + - When importing multiple files, enter the source folder URI in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/`. For example, `[gcs|gs]://sampledata/ingest/`. + - **Credential**: you can use a GCS IAM Role Service Account key to access your bucket. For more information, see [Configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding Parquet file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `[gcs|gs]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[gcs|gs]://sampledata/ingest/TableName.01.parquet`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **Parquet** as the data format. - - `[gcs|gs]://[bucket_name]/[data_source_folder]/my-data?.parquet`: all Parquet files starting with `my-data` followed by one character (such as `my-data1.parquet` and `my-data2.parquet`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source Parquet files with the target database and table, unselect this option, and then fill in the following fields: - - `[gcs|gs]://[bucket_name]/[data_source_folder]/my-data*.parquet`: all Parquet files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].parquet` format. For example: `TableName.01.parquet`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.parquet`: matches all Parquet files that start with `my-data` followed by a single character, such as `my-data1.parquet` and `my-data2.parquet`. + - `my-data*.parquet`: matches all Parquet files that start with `my-data`, such as `my-data-2023.parquet` and `my-data-final.parquet`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables. @@ -209,41 +212,42 @@ To import the Parquet files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Azure Blob Storage**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Azure Blob Storage** page, provide the following information for the source Parquet files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **Parquet**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[azure|https]://sampledata/ingest/TableName.01.parquet`. - - When importing multiple files, enter the source file URI and name in the following format `[azure|https]://[bucket_name]/[data_source_folder]/`. For example, `[azure|https]://sampledata/ingest/`. - - **Bucket Access**: you can use a shared access signature (SAS) token to access your bucket. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). + - **Storage Provider**: select **Azure Blob Storage**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[azure|https]://sampledata/ingest/TableName.01.parquet`. + - When importing multiple files, enter the source folder URI in the following format `[azure|https]://[bucket_name]/[data_source_folder]/`. For example, `[azure|https]://sampledata/ingest/`. + - **Credential**: you can use a shared access signature (SAS) token to access your bucket. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding Parquet file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `[azure|https]://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `[azure|https]://sampledata/ingest/TableName.01.parquet`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **Parquet** as the data format. - - `[azure|https]://[bucket_name]/[data_source_folder]/my-data?.parquet`: all Parquet files starting with `my-data` followed by one character (such as `my-data1.parquet` and `my-data2.parquet`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source Parquet files with the target database and table, unselect this option, and then fill in the following fields: - - `[azure|https]://[bucket_name]/[data_source_folder]/my-data*.parquet`: all Parquet files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].parquet` format. For example: `TableName.01.parquet`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.parquet`: matches all Parquet files that start with `my-data` followed by a single character, such as `my-data1.parquet` and `my-data2.parquet`. + - `my-data*.parquet`: matches all Parquet files that start with `my-data`, such as `my-data-2023.parquet` and `my-data-final.parquet`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables. @@ -259,47 +263,48 @@ To import the Parquet files to TiDB Cloud Serverless, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Alibaba Cloud OSS**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Alibaba Cloud OSS** page, provide the following information for the source Parquet files: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: select **One file** or **Multiple files** as needed. - - **Included Schema Files**: this field is only visible when importing multiple files. If the source folder contains the target table schemas, select **Yes**. Otherwise, select **No**. - - **Data Format**: select **Parquet**. - - **File URI** or **Folder URI**: - - When importing one file, enter the source file URI and name in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `oss://sampledata/ingest/TableName.01.parquet`. - - When importing multiple files, enter the source file URI and name in the following format `oss://[bucket_name]/[data_source_folder]/`. For example, `oss://sampledata/ingest/`. - - **Bucket Access**: you can use an AccessKey pair to access your bucket. For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/serverless-external-storage.md#configure-alibaba-cloud-object-storage-service-oss-access). + - **Storage Provider**: select **Alibaba Cloud OSS**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `oss://sampledata/ingest/TableName.01.parquet`. + - When importing multiple files, enter the source folder URI in the following format `oss://[bucket_name]/[data_source_folder]/`. For example, `oss://sampledata/ingest/`. + - **Credential**: you can use an AccessKey pair to access your bucket. For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access). -4. Click **Connect**. +4. Click **Next**. -5. In the **Destination** section, select the target database and table. +5. In the **Destination Mapping** section, specify how source files are mapped to target tables. - When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to define a custom mapping rule for each target table and its corresponding Parquet file. After that, the data source files will be re-scanned using the provided custom mapping rule. + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. - When you enter the source file URI and name in **Source File URIs and Names**, make sure it is in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].parquet`. For example, `oss://sampledata/ingest/TableName.01.parquet`. + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and TiDB Cloud automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. - You can also use wildcards to match the source files. For example: + - To let TiDB Cloud automatically map all source files that follow the [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to their corresponding tables, keep this option selected and select **Parquet** as the data format. - - `oss://[bucket_name]/[data_source_folder]/my-data?.parquet`: all Parquet files starting with `my-data` followed by one character (such as `my-data1.parquet` and `my-data2.parquet`) in that folder will be imported into the same target table. + - To manually configure the mapping rules to associate your source Parquet files with the target database and table, unselect this option, and then fill in the following fields: - - `oss://[bucket_name]/[data_source_folder]/my-data*.parquet`: all Parquet files in the folder starting with `my-data` will be imported into the same target table. + - **Source**: enter the file name pattern in the `[file_name].parquet` format. For example: `TableName.01.parquet`. You can also use wildcards to match multiple files. Only `*` and `?` wildcards are supported. - Note that only `?` and `*` are supported. + - `my-data?.parquet`: matches all Parquet files that start with `my-data` followed by a single character, such as `my-data1.parquet` and `my-data2.parquet`. + - `my-data*.parquet`: matches all Parquet files that start with `my-data`, such as `my-data-2023.parquet` and `my-data-final.parquet`. - > **Note:** - > - > The URI must contain the data source folder. + - **Target Database** and **Target Table**: select the target database and table to import the data to. + +6. Click **Next**. TiDB Cloud scans the source files accordingly. -6. Click **Start Import**. +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. -7. When the import progress shows **Completed**, check the imported tables. +8. When the import progress shows **Completed**, check the imported tables.
-When you run an import task, if any unsupported or invalid conversions are detected, TiDB Cloud Serverless terminates the import job automatically and reports an importing error. +When you run an import task, if any unsupported or invalid conversions are detected, TiDB Cloud terminates the import job automatically and reports an importing error. If you get an importing error, do the following: @@ -313,7 +318,7 @@ If you get an importing error, do the following: ## Supported data types -The following table lists the supported Parquet data types that can be imported to TiDB Cloud Serverless. +The following table lists the supported Parquet data types that can be imported to {{{ .starter }}} and {{{ .essential }}}. | Parquet Primitive Type | Parquet Logical Type | Types in TiDB or MySQL | |---|---|---| diff --git a/tidb-cloud/import-parquet-files.md b/tidb-cloud/import-parquet-files.md index a629428e6b860..0a64ad746f9e3 100644 --- a/tidb-cloud/import-parquet-files.md +++ b/tidb-cloud/import-parquet-files.md @@ -156,7 +156,7 @@ To import the Parquet files to TiDB Cloud, take the following steps: 2. Select **Import data from Cloud Storage**. -3. On the **Import Data from Google Cloud Storage** page, provide the following information for the source Parquet files: +3. On the **Import Data from Cloud Storage** page, provide the following information for the source Parquet files: - **Included Schema Files**: if the source folder contains the target table schema files (such as `${db_name}-schema-create.sql`), select **Yes**. Otherwise, select **No**. - **Data Format**: select **Parquet**. @@ -200,11 +200,31 @@ To import the Parquet files to TiDB Cloud, take the following steps: - **Included Schema Files**: if the source folder contains the target table schema files (such as `${db_name}-schema-create.sql`), select **Yes**. Otherwise, select **No**. - **Data Format**: select **Parquet**. + - **Connectivity Method**: select how TiDB Cloud connects to your Azure Blob Storage: + + - **Public** (default): connects over the public internet. Use this option when the storage account allows public network access. + - **Private Link**: connects through an Azure private endpoint for network-isolated access. Use this option when the storage account blocks public access or when your security policy requires private connectivity. If you select **Private Link**, you also need to fill in the additional field **Azure Blob Storage Resource ID**. To find the resource ID: + + 1. Go to the [Azure portal](https://portal.azure.com/). + 2. Navigate to your storage account and click **Overview** > **JSON View**. + 3. Copy the value of the `id` property. The resource ID is in the format `/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/`. + - **Folder URI**: enter the Azure Blob Storage URI where your source files are located using the format `https://[account_name].blob.core.windows.net/[container_name]/[data_source_folder]/`. The path must end with a `/`. For example, `https://myaccount.blob.core.windows.net/mycontainer/data-ingestion/`. - **SAS Token**: enter an account SAS token to allow TiDB Cloud to access the source files in your Azure Blob Storage container. If you don't have one yet, you can create it using the provided Azure ARM template by clicking **Click here to create a new one with Azure ARM template** and following the instructions on the screen. Alternatively, you can manually create an account SAS token. For more information, see [Configure Azure Blob Storage access](/tidb-cloud/dedicated-external-storage.md#configure-azure-blob-storage-access). 4. Click **Connect**. + If you selected **Private Link** as the connectivity method, TiDB Cloud creates a private endpoint for your storage account. You need to approve this endpoint request in the Azure portal before the connection can proceed: + + 1. Go to the [Azure portal](https://portal.azure.com/) and navigate to your storage account. + 2. Click **Networking** > **Private endpoint connections**. + 3. Find the pending connection request from TiDB Cloud and click **Approve**. + 4. Return to the [TiDB Cloud console](https://tidbcloud.com/). The import wizard proceeds automatically once the endpoint is approved. + + > **Note:** + > + > If the endpoint is not yet approved, TiDB Cloud displays a message indicating that the connection is pending approval. Approve the request in the [Azure portal](https://portal.azure.com/) and retry the connection. + 5. In the **Destination** section, select the target database and table. When importing multiple files, you can use **Advanced Settings** > **Mapping Settings** to customize the mapping of individual target tables to their corresponding Parquet files. For each target database and table: diff --git a/tidb-cloud/import-sample-data-serverless.md b/tidb-cloud/import-sample-data-serverless.md index ebd2cc08c2b1a..fdfc108621791 100644 --- a/tidb-cloud/import-sample-data-serverless.md +++ b/tidb-cloud/import-sample-data-serverless.md @@ -1,15 +1,15 @@ --- -title: Import Sample Data into TiDB Cloud Serverless -summary: Learn how to import sample data into TiDB Cloud Serverless via the UI. +title: Import Sample Data (SQL Files) into {{{ .starter }}} or Essential from Cloud Storage +summary: Learn how to import sample data into {{{ .starter }}} or {{{ .essential }}} via the UI. --- -# Import Sample Data into TiDB Cloud Serverless +# Import Sample Data (SQL Files) into {{{ .starter }}} or Essential from Cloud Storage -This document describes how to import the sample data into TiDB Cloud Serverless via the UI. The sample data used is the system data from Capital Bikeshare, released under the Capital Bikeshare Data License Agreement. Before importing the sample data, you need to have one TiDB cluster. +This document describes how to import the sample data (SQL files) into {{{ .starter }}} or {{{ .essential }}} via the UI. The sample data used is the system data from Capital Bikeshare, released under the Capital Bikeshare Data License Agreement. Before importing the sample data, you need to have one TiDB cluster. > **Note:** > -> TiDB Cloud Serverless currently only supports importing sample data from Amazon S3. +> The sample data used in this document is from Amazon S3. 1. Open the **Import** page for your target cluster. @@ -21,21 +21,25 @@ This document describes how to import the sample data into TiDB Cloud Serverless 2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Import** in the left navigation pane. -2. Select **Import data from Cloud Storage**, and then click **Amazon S3**. +2. Click **Import data from Cloud Storage**. -3. On the **Import Data from Amazon S3** page, configure the following source data information: +3. On the **Import Data from Cloud Storage** page, provide the following information: - - **Import File Count**: for the sample data, select **Multiple files**. - - **Included Schema Files**: for the sample data, select **Yes**. - - **Data Format**: select **SQL**. - - **Folder URI** or **File URI**: enter the sample data URI `s3://tidbcloud-sample-data/data-ingestion/`. - - **Bucket Access**: for the sample data, you can only use a Role ARN to access its bucket. For your own data, you can use either an AWS access key or a Role ARN to access your bucket. - - **AWS Role ARN**: enter `arn:aws:iam::801626783489:role/import-sample-access`. - - **AWS Access Key**: skip this option for the sample data. + - **Storage Provider**: select **Amazon S3**. + - **Source Files URI**: enter the sample data URI `s3://tidbcloud-sample-data/data-ingestion/`. + - **Credential**: + - **AWS Role ARN**: enter `arn:aws:iam::801626783489:role/import-sample-access`. + - **AWS Access Key**: skip this option for the sample data. -4. Click **Connect** > **Start Import**. +4. Click **Next**. -When the data import progress shows **Completed**, you have successfully imported the sample data and the database schema to your database in TiDB Cloud Serverless. +5. In the **Destination Mapping** section, keep the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option selected and select **SQL** as the data format. + +6. Click **Next**. + +7. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. + +8. When the import progress shows **Completed**, check the imported tables. After connecting to the cluster, you can run some queries in your terminal to check the result, for example: diff --git a/tidb-cloud/import-sample-data.md b/tidb-cloud/import-sample-data.md index 2c57cbef5627d..d19a24b1774cc 100644 --- a/tidb-cloud/import-sample-data.md +++ b/tidb-cloud/import-sample-data.md @@ -1,11 +1,11 @@ --- -title: Import Sample Data into TiDB Cloud Dedicated +title: Import Sample Data (SQL Files) from Cloud Storage into TiDB Cloud Dedicated summary: Learn how to import sample data into TiDB Cloud Dedicated via the UI. --- -# Import Sample Data into TiDB Cloud Dedicated +# Import Sample Data (SQL Files) from Cloud Storage into TiDB Cloud Dedicated -This document describes how to import the sample data into TiDB Cloud Dedicated via the UI. The sample data used is the system data from Capital Bikeshare, released under the Capital Bikeshare Data License Agreement. Before importing the sample data, you need to have one TiDB cluster. +This document describes how to import the sample data (SQL files) into TiDB Cloud Dedicated via the UI. The sample data used is the system data from Capital Bikeshare, released under the Capital Bikeshare Data License Agreement. Before importing the sample data, you need to have one TiDB cluster.
@@ -79,6 +79,15 @@ This document describes how to import the sample data into TiDB Cloud Dedicated - **Included Schema Files**: for the sample data, select **Yes**. - **Data Format**: select **SQL**. + - **Connectivity Method**: select how TiDB Cloud connects to your Azure Blob Storage. To import the sample data, you can use the default connectivity method. + + - **Public** (default): connects over the public internet. Use this option when the storage account allows public network access. + - **Private Link**: connects through an Azure private endpoint for network-isolated access. Use this option when the storage account blocks public access or when your security policy requires private connectivity. If you select **Private Link**, you also need to fill in the additional field **Azure Blob Storage Resource ID**. To find the resource ID: + + 1. Go to the [Azure portal](https://portal.azure.com/). + 2. Navigate to your storage account and click **Overview** > **JSON View**. + 3. Copy the value of the `id` property. The resource ID is in the format `/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/`. + - **Folder URI**: enter the sample data URI `https://tcidmsampledata.blob.core.windows.net/sql/`. - **SAS Token**: - For the sample data, use the following **SAS Token**: `sv=2015-04-05&ss=b&srt=co&sp=rl&se=2099-03-01T00%3A00%3A01.0000000Z&sig=cQHvaofmVsUJEbgyf4JFkAwTJGsFOmbQHx03GvVMrNc%3D`. @@ -86,7 +95,20 @@ This document describes how to import the sample data into TiDB Cloud Dedicated If the region of the storage account is different from your cluster, confirm the compliance of cross region. -4. Click **Connect** > **Start Import**. +4. Click **Connect**. + + If you selected **Private Link** as the connectivity method, TiDB Cloud creates a private endpoint for your storage account. You need to approve this endpoint request in the Azure portal before the connection can proceed: + + 1. Go to the [Azure portal](https://portal.azure.com/) and navigate to your storage account. + 2. Click **Networking** > **Private endpoint connections**. + 3. Find the pending connection request from TiDB Cloud and click **Approve**. + 4. Return to the [TiDB Cloud console](https://tidbcloud.com/). The import wizard proceeds automatically once the endpoint is approved. + + > **Note:** + > + > If the endpoint is not yet approved, TiDB Cloud displays a message indicating that the connection is pending approval. Approve the request in the [Azure portal](https://portal.azure.com/) and retry the connection. + +5. Click **Start Import**.
diff --git a/tidb-cloud/import-snapshot-files-serverless.md b/tidb-cloud/import-snapshot-files-serverless.md new file mode 100644 index 0000000000000..28fbbc8e234bf --- /dev/null +++ b/tidb-cloud/import-snapshot-files-serverless.md @@ -0,0 +1,10 @@ +--- +title: Import Snapshot Files into {{{ .starter }}} or Essential +summary: Learn how to import Amazon Aurora or RDS for MySQL snapshot files into {{{ .starter }}} or Essential. +--- + +# Import Snapshot Files into {{{ .starter }}} or Essential + +You can import snapshot files from Amazon Aurora or RDS for MySQL into {{{ .starter }}} or Essential. You need to export these snapshot files from Amazon Aurora or RDS for MySQL as Parquet files first. To ensure a successful import, your data files must follow specific naming conventions. For example, each source data file must have a `.parquet` suffix and be located in a folder named `{db_name}.{table_name}/`. For complete details, see [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md). + +The import process for snapshot files is the same as for other Parquet files. For step-by-step instructions, see [Import Apache Parquet Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-parquet-files-serverless.md). diff --git a/tidb-cloud/import-snapshot-files.md b/tidb-cloud/import-snapshot-files.md new file mode 100644 index 0000000000000..acd0ee2530e6f --- /dev/null +++ b/tidb-cloud/import-snapshot-files.md @@ -0,0 +1,10 @@ +--- +title: Import Snapshot Files into TiDB Cloud Dedicated +summary: Learn how to import Amazon Aurora or RDS for MySQL snapshot files into TiDB Cloud Dedicated. +--- + +# Import Snapshot Files into TiDB Cloud Dedicated + +You can import snapshot files from Amazon Aurora or RDS for MySQL into TiDB Cloud Dedicated. These snapshots are exported as Parquet files. To ensure a successful import, your data files must follow specific naming conventions. For example, each source data file must have a `.parquet` suffix and be located in a folder named `{db_name}.{table_name}/`. For complete details, see [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md). + +The import process for snapshot files is the same as for other Parquet files. For step-by-step instructions, see [Import Apache Parquet Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-parquet-files.md). diff --git a/tidb-cloud/import-with-mysql-cli-serverless.md b/tidb-cloud/import-with-mysql-cli-serverless.md index 50feb0bc7da3c..9d25b8416f6c3 100644 --- a/tidb-cloud/import-with-mysql-cli-serverless.md +++ b/tidb-cloud/import-with-mysql-cli-serverless.md @@ -1,20 +1,20 @@ --- -title: Import Data into TiDB Cloud Serverless via MySQL CLI -summary: Learn how to import Data into TiDB Cloud Serverless via MySQL CLI. +title: Import Data into {{{ .starter }}} or Essential via MySQL CLI +summary: Learn how to import Data into {{{ .starter }}} or {{{ .essential }}} via MySQL CLI. --- -# Import Data into TiDB Cloud Serverless via MySQL CLI +# Import Data into {{{ .starter }}} or Essential via MySQL CLI -This document describes how to import data into TiDB Cloud Serverless via the [MySQL Command-Line Client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html). You can import data from an SQL file or a CSV file. The following sections provide step-by-step instructions for importing data from each type of file. +This document describes how to import data into {{{ .starter }}} or {{{ .essential }}} via the [MySQL Command-Line Client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html). You can import data from an SQL file or a CSV file. The following sections provide step-by-step instructions for importing data from each type of file. ## Prerequisites -Before you can import data via MySQL CLI to TiDB Cloud Serverless, you need the following prerequisites: +Before you can import data via MySQL CLI to {{{ .starter }}} or {{{ .essential }}}, you need the following prerequisites: -- You have access to your TiDB Cloud Serverless cluster. If you do not have, create one following the instructions in [Build a TiDB Cloud Serverless Cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- You have access to your {{{ .starter }}} or {{{ .essential }}} cluster. If you do not have, create one following the instructions in [Build a TiDB Cloud Cluster](/develop/dev-guide-build-cluster-in-cloud.md). - Install MySQL CLI on your local computer. -## Step 1. Connect to your TiDB Cloud Serverless cluster +## Step 1. Connect to your {{{ .starter }}} or {{{ .essential }}} cluster Connect to your TiDB cluster. @@ -53,7 +53,7 @@ INSERT INTO products (product_id, product_name, price) VALUES (3, 'Tablet', 299.99); ``` -## Step 3. Import data from a SQL or CSV file +## Step 3. Import data from an SQL or CSV file You can import data from an SQL file or a CSV file. The following sections provide step-by-step instructions for importing data from each type. diff --git a/tidb-cloud/import-with-mysql-cli.md b/tidb-cloud/import-with-mysql-cli.md index a7e0057918f2a..22e3e800069e8 100644 --- a/tidb-cloud/import-with-mysql-cli.md +++ b/tidb-cloud/import-with-mysql-cli.md @@ -49,7 +49,7 @@ INSERT INTO products (product_id, product_name, price) VALUES (3, 'Tablet', 299.99); ``` -## Step 3. Import data from a SQL or CSV file +## Step 3. Import data from an SQL or CSV file You can import data from an SQL file or a CSV file. The following sections provide step-by-step instructions for importing data from each type. diff --git a/tidb-cloud/integrate-tidbcloud-with-airbyte.md b/tidb-cloud/integrate-tidbcloud-with-airbyte.md index 94558925f6602..21bc0e5e8c20e 100644 --- a/tidb-cloud/integrate-tidbcloud-with-airbyte.md +++ b/tidb-cloud/integrate-tidbcloud-with-airbyte.md @@ -58,11 +58,11 @@ Conveniently, the steps are the same for setting TiDB as the source and the dest 3. Enable **SSL Connection**, and set TLS protocols to **TLSv1.2** or **TLSv1.3** in **JDBC URL Params**. - > Note: + > **Note:** > > - TiDB Cloud supports TLS connection. You can choose your TLS protocols in **TLSv1.2** and **TLSv1.3**, for example, `enabledTLSProtocols=TLSv1.2`. > - If you want to disable TLS connection to TiDB Cloud via JDBC, you need to set useSSL to `false` in JDBC URL Params specifically and close SSL connection, for example, `useSSL=false`. - > - TiDB Cloud Serverless only supports TLS connections. + > - {{{ .starter }}} and {{{ .essential }}} only support TLS connections. 4. Click **Set up source** or **destination** to complete creating the connector. The following screenshot shows the configuration of TiDB as the source. diff --git a/tidb-cloud/integrate-tidbcloud-with-aws-lambda.md b/tidb-cloud/integrate-tidbcloud-with-aws-lambda.md index db4a2c62bcf2d..d122b7a470b37 100644 --- a/tidb-cloud/integrate-tidbcloud-with-aws-lambda.md +++ b/tidb-cloud/integrate-tidbcloud-with-aws-lambda.md @@ -1,20 +1,24 @@ --- -title: Integrate TiDB Cloud Serverless with Amazon Lambda Using AWS CloudFormation -summary: Introduce how to integrate TiDB Cloud Serverless with Amazon Lambda and CloudFormation step by step. +title: Integrate {{{ .starter }}} with Amazon Lambda Using AWS CloudFormation +summary: Introduce how to integrate {{{ .starter }}} with Amazon Lambda and CloudFormation step by step. --- -# Integrate TiDB Cloud Serverless with Amazon Lambda Using AWS CloudFormation +# Integrate {{{ .starter }}} with Amazon Lambda Using AWS CloudFormation -This document provides a step-by-step guide on how to use [AWS CloudFormation](https://aws.amazon.com/cloudformation/) to integrate [TiDB Cloud Serverless](https://www.pingcap.com/tidb-cloud/), a cloud-native distributed SQL database, with [AWS Lambda](https://aws.amazon.com/lambda/), a serverless and event-driven compute service. By integrating TiDB Cloud Serverless with Amazon Lambda, you can leverage the scalability and cost-efficiency of microservices through TiDB Cloud Serverless and AWS Lambda. AWS CloudFormation automates the creation and management of AWS resources, including Lambda functions, API Gateway, and Secrets Manager. +This document provides a step-by-step guide on how to use [AWS CloudFormation](https://aws.amazon.com/cloudformation/) to integrate [{{{ .starter }}}](https://www.pingcap.com/tidb-cloud-starter/), a cloud-native distributed SQL database, with [AWS Lambda](https://aws.amazon.com/lambda/), a serverless and event-driven compute service. By integrating {{{ .starter }}} with Amazon Lambda, you can leverage the scalability and cost-efficiency of microservices through {{{ .starter }}} and AWS Lambda. AWS CloudFormation automates the creation and management of AWS resources, including Lambda functions, API Gateway, and Secrets Manager. + +> **Note:** +> +> In addition to {{{ .starter }}} clusters, the steps in this document also work with {{{ .essential }}} clusters. ## Solution overview In this guide, you will create a fully functional online bookshop with the following components: -- AWS Lambda Function: handles requests and queries data from a TiDB Cloud Serverless cluster using Sequelize ORM and Fastify API framework. -- AWS Secrets Manager SDK: retrieves and manages connection configurations for the TiDB Cloud Serverless cluster. +- AWS Lambda Function: handles requests and queries data from a {{{ .starter }}} cluster using Sequelize ORM and Fastify API framework. +- AWS Secrets Manager SDK: retrieves and manages connection configurations for the {{{ .starter }}} cluster. - AWS API Gateway: handles HTTP request routes. -- TiDB Cloud Serverless: a cloud-native distributed SQL database. +- {{{ .starter }}}: a cloud-native distributed SQL database. AWS CloudFormation is used to create the necessary resources for the project, including the Secrets Manager, API Gateway, and Lambda Functions. @@ -33,7 +37,7 @@ Before getting started, ensure that you have the following: - [Lambda services](https://aws.amazon.com/lambda/) - [S3](https://aws.amazon.com/s3/) - [IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -- A [TiDB Cloud](https://tidbcloud.com) account and a TiDB Cloud Serverless cluster. Get the connection information for your TiDB Cloud Serverless cluster: +- A [TiDB Cloud](https://tidbcloud.com) account and a {{{ .starter }}} cluster. Get the connection information for your {{{ .starter }}} cluster: ![TiDB Cloud connection information](/media/develop/aws-lambda-tidbcloud-connection-info.png) diff --git a/tidb-cloud/integrate-tidbcloud-with-cloudflare.md b/tidb-cloud/integrate-tidbcloud-with-cloudflare.md index c9d78aaea21c9..61e0f65650f9d 100644 --- a/tidb-cloud/integrate-tidbcloud-with-cloudflare.md +++ b/tidb-cloud/integrate-tidbcloud-with-cloudflare.md @@ -7,19 +7,20 @@ summary: Learn how to deploy Cloudflare Workers with TiDB Cloud. [Cloudflare Workers](https://workers.cloudflare.com/) is a platform that allows you to run code in response to specific events, such as HTTP requests or changes to a database. Cloudflare Workers is easy to use and can be used to build a variety of applications, including custom APIs, serverless functions, and microservices. It is particularly useful for applications that require low-latency performance or need to scale quickly. -You may find it hard to connect to TiDB Cloud from Cloudflare Workers because Cloudflare Workers runs on the V8 engine which cannot make direct TCP connections. You can use [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md) to help you connect to Cloudflare Workers over HTTP connection. +You may find it hard to connect to TiDB Cloud from Cloudflare Workers because Cloudflare Workers runs on the V8 engine which cannot make direct TCP connections. You can use [TiDB Cloud serverless driver](/develop/serverless-driver.md) to help you connect to Cloudflare Workers over HTTP connection. This document shows how to connect to Cloudflare Workers with TiDB Cloud serverless driver step by step. > **Note:** > -> TiDB Cloud serverless driver can only be used in TiDB Cloud Serverless. +> TiDB Cloud serverless driver can only be used in {{{ .starter }}} and {{{ .essential }}}. ## Before you begin Before you try the steps in this article, you need to prepare the following things: -- A TiDB Cloud account and a TiDB Cloud Serverless cluster on TiDB Cloud. For more details, see [TiDB Cloud Quick Start](/tidb-cloud/tidb-cloud-quickstart.md#step-1-create-a-tidb-cluster). +- A [TiDB Cloud account](https://tidbcloud.com/signup). +- A {{{ .starter }}} or {{{ .essential }}} cluster. If you do not have one, see [Create a {{{ .starter }}} or Essential Cluster](/tidb-cloud/create-tidb-cluster-serverless.md). - A [Cloudflare Workers account](https://dash.cloudflare.com/login). - [npm](https://docs.npmjs.com/about-npm) is installed. diff --git a/tidb-cloud/integrate-tidbcloud-with-n8n.md b/tidb-cloud/integrate-tidbcloud-with-n8n.md index a45fcd98386d0..c852784ed8c22 100644 --- a/tidb-cloud/integrate-tidbcloud-with-n8n.md +++ b/tidb-cloud/integrate-tidbcloud-with-n8n.md @@ -7,7 +7,11 @@ summary: Learn the use of TiDB Cloud node in n8n. [n8n](https://n8n.io/) is an extendable workflow automation tool. With a [fair-code](https://faircode.io/) distribution model, n8n will always have visible source code, be available to self-host, and allow you to add your custom functions, logic, and apps. -This document introduces how to build an auto-workflow: create a TiDB Cloud Serverless cluster, gather Hacker News RSS, store it to TiDB and send a briefing email. +This document introduces how to build an auto-workflow: create a {{{ .starter }}} cluster, gather Hacker News RSS, store it to TiDB and send a briefing email. + +> **Note:** +> +> In addition to {{{ .starter }}} clusters, the steps in this document also work with {{{ .essential }}} clusters. ## Prerequisites: Get TiDB Cloud API key @@ -17,7 +21,7 @@ This document introduces how to build an auto-workflow: create a TiDB Cloud Serv 4. Enter a description for the API key, and then click **Next**. 5. Copy the created API key for later use in n8n, and then click **Done**. -For more information, see [TiDB Cloud API Overview](/tidb-cloud/api-overview.md). +For more information, see [TiDB Cloud API Overview](https://docs.pingcap.com/api/tidb-cloud-api-overview). ## Step 1: Install n8n @@ -76,9 +80,9 @@ The final workflow should look like the following image. ![img](/media/tidb-cloud/integration-n8n-workflow-rss.jpg) -### (Optional) Create a TiDB Cloud Serverless cluster +### (Optional) Create a {{{ .starter }}} cluster -If you don't have a TiDB Cloud Serverless cluster, you can use this node to create one. Otherwise, feel free to skip this operation. +If you don't have a {{{ .starter }}} cluster, you can use this node to create one. Otherwise, feel free to skip this operation. 1. Navigate to **Workflows** panel, and click **Add workflow**. 2. In new workflow workspace, click **+** in the top right corner and choose **All** field. @@ -93,7 +97,7 @@ If you don't have a TiDB Cloud Serverless cluster, you can use this node to crea > **Note:** > -> It takes several seconds to create a new TiDB Cloud Serverless cluster. +> It takes several seconds to create a new {{{ .starter }}} cluster. ### Create a workflow @@ -203,7 +207,7 @@ This trigger will execute your workflow every morning at 8 AM. After building up the workflow, you can click **Execute Workflow** to test run it. -If the workflow runs as expected, you'll get Hacker News briefing emails. These news contents will be logged to your TiDB Cloud Serverless cluster, so you don't have to worry about losing them. +If the workflow runs as expected, you'll get Hacker News briefing emails. These news contents will be logged to your {{{ .starter }}} cluster, so you don't have to worry about losing them. Now you can activate this workflow in the **Workflows** panel. This workflow will help you get the front-page articles on Hacker News every day. @@ -213,7 +217,7 @@ Now you can activate this workflow in the **Workflows** panel. This workflow wil TiDB Cloud node acts as a [regular node](https://docs.n8n.io/workflows/nodes/#regular-nodes) and only supports the following five operations: -- **Create Serverless Cluster**: creates a TiDB Cloud Serverless cluster. +- **Create Serverless Cluster**: creates a {{{ .starter }}} cluster. - **Execute SQL**: executes an SQL statement in TiDB. - **Delete**: deletes rows in TiDB. - **Insert**: inserts rows in TiDB. diff --git a/tidb-cloud/integrate-tidbcloud-with-netlify.md b/tidb-cloud/integrate-tidbcloud-with-netlify.md index 87e3a1e752904..ebd8ccfc1b553 100644 --- a/tidb-cloud/integrate-tidbcloud-with-netlify.md +++ b/tidb-cloud/integrate-tidbcloud-with-netlify.md @@ -24,7 +24,7 @@ You are expected to have a Netlify account and CLI. If you do not have any, refe You are expected to have an account and a cluster in TiDB Cloud. If you do not have any, refer to the following to create one: -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) +- [Create a {{{ .starter }}} or {{{ .essential }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md) - [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md) One TiDB Cloud cluster can connect to multiple Netlify sites. @@ -33,7 +33,7 @@ One TiDB Cloud cluster can connect to multiple Netlify sites. For TiDB Cloud Dedicated clusters, make sure that the traffic filter of the cluster allows all IP addresses (set to `0.0.0.0/0`) for connection. This is because Netlify deployments use dynamic IP addresses. -TiDB Cloud Serverless clusters allow all IP addresses for connection by default, so you do not need to configure any traffic filter. +{{{ .starter }}} and {{{ .essential }}} clusters allow all IP addresses for connection by default, so you do not need to configure any traffic filter. ## Step 1. Get the example project and the connection string @@ -52,7 +52,7 @@ To help you get started quickly, TiDB Cloud provides a fullstack example app in ### Get the TiDB Cloud connection string -For a TiDB Cloud Serverless cluster, you can get the connection string either from [TiDB Cloud CLI](/tidb-cloud/cli-reference.md) or from [TiDB Cloud console](https://tidbcloud.com/). +For a {{{ .starter }}} or {{{ .essential }}} cluster, you can get the connection string either from [TiDB Cloud CLI](/tidb-cloud/cli-reference.md) or from [TiDB Cloud console](https://tidbcloud.com/). For a TiDB Cloud Dedicated cluster, you can get the connection string only from the TiDB Cloud console. @@ -226,23 +226,23 @@ For a TiDB Cloud Dedicated cluster, you can get the connection string only from ## Use the edge function -The example app mentioned in the section above runs on the Netlify serverless function. This section shows you how to use the edge function with [TiDB Cloud serverless driver](/tidb-cloud/serverless-driver.md). The edge function is a feature provided by Netlify, which allows you to run serverless functions on the edge of the Netlify CDN. +The example app mentioned in the section above runs on the Netlify serverless function. This section shows you how to use the edge function with [TiDB Cloud serverless driver](/develop/serverless-driver.md). The edge function is a feature provided by Netlify, which allows you to run serverless functions on the edge of the Netlify CDN. To use the edge function, take the following steps: -1. Create a directory named `netlify/edge-functions` in the root directory of your project. +1. Create a directory named `netlify/edge-functions` in the root directory of your project. 2. Create a file named `hello.ts` in the directory and add the following code: ```typescript import { connect } from 'https://esm.sh/@tidbcloud/serverless' - + export default async () => { const conn = connect({url: Netlify.env.get('DATABASE_URL')}) const result = await conn.execute('show databases') return new Response(JSON.stringify(result)); } - + export const config = { path: "/api/hello" }; ``` diff --git a/tidb-cloud/integrate-tidbcloud-with-vercel.md b/tidb-cloud/integrate-tidbcloud-with-vercel.md index 71ca03bd86903..e9500016a5ef5 100644 --- a/tidb-cloud/integrate-tidbcloud-with-vercel.md +++ b/tidb-cloud/integrate-tidbcloud-with-vercel.md @@ -18,7 +18,7 @@ This guide describes how to connect your TiDB Cloud clusters to Vercel projects For both of the preceding methods, TiDB Cloud provides the following options for programmatically connecting to your database: -- Cluster: connect your TiDB Cloud cluster to your Vercel project with direct connections or [serverless driver](/tidb-cloud/serverless-driver.md). +- Cluster: connect your TiDB Cloud cluster to your Vercel project with direct connections or [serverless driver](/develop/serverless-driver.md). - [Data App](/tidb-cloud/data-service-manage-data-app.md): access data of your TiDB Cloud cluster through a collection of HTTP endpoints. ## Prerequisites @@ -38,11 +38,11 @@ One Vercel project can only connect to one TiDB Cloud cluster. To change the int You are expected to have an account and a cluster in TiDB Cloud. If you do not have any, refer to the following to create one: -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) +- [Create a {{{ .starter }}} or {{{ .essential }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md) > **Note:** > - > The TiDB Cloud Vercel integration supports creating TiDB Cloud Serverless clusters. You can also create one later during the integration process. + > The TiDB Cloud Vercel integration supports creating {{{ .starter }}} and {{{ .essential }}} clusters. You can also create one later during the integration process. - [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md) @@ -71,7 +71,7 @@ To connect via the TiDB Cloud Vercel integration, go to the [TiDB Cloud integrat > **Note:** > -> This method is only available for TiDB Cloud Serverless clusters. If you want to connect to a TiDB Cloud Dedicated cluster, use the [manual method](#connect-via-manually-setting-environment-variables). +> This method is only available for {{{ .starter }}} and {{{ .essential }}} clusters. If you want to connect to a TiDB Cloud Dedicated cluster, use the [manual method](#connect-via-manually-setting-environment-variables). ### Integration workflow @@ -89,7 +89,7 @@ The detailed steps are as follows: 1. Select your target Vercel projects and click **Next**. 2. Select your target TiDB Cloud organization and project. 3. Select **Cluster** as your connection type. - 4. Select your target TiDB Cloud cluster. If the **Cluster** drop-down list is empty or you want to select a new TiDB Cloud Serverless cluster, click **+ Create Cluster** in the list to create one. + 4. Select your target TiDB Cloud cluster. If the **Cluster** drop-down list is empty or you want to select a new {{{ .starter }}} or {{{ .essential }}} cluster, click **+ Create Cluster** in the list to create one. 5. Select the database that you want to connect to. If the **Database** drop-down list is empty or you want to select a new Database, click **+ Create Database** in the list to create one. 6. Select the framework that your Vercel projects are using. If the target framework is not listed, select **General**. Different frameworks determine different environment variables. 7. Choose whether to enable **Branching** to create new branches for preview environments. @@ -165,22 +165,22 @@ If you have installed [TiDB Cloud Vercel integration](https://vercel.com/integra ![Vercel Integration Configuration Page](/media/tidb-cloud/vercel/integration-vercel-configuration-page.png) - When you remove a connection, the environment variables set by the integration workflow are removed from the Vercel project, too. However, this action does not affect the data of the TiDB Cloud Serverless cluster. + When you remove a connection, the environment variables set by the integration workflow are removed from the Vercel project, too. However, this action does not affect the data of your TiDB Cloud cluster. -### Connect with TiDB Cloud Serverless branching +### Connect with TiDB Cloud branching {#connect-with-branching} -Vercel's [Preview Deployments](https://vercel.com/docs/deployments/preview-deployments) feature allows you to preview changes to your app in a live deployment without merging those changes to your Git project's production branch. With [TiDB Cloud Serverless Branching](/tidb-cloud/branch-overview.md), you can create a new instance for each branch of your Vercel project. This allows you to preview app changes in a live deployment without affecting your production data. +Vercel's [Preview Deployments](https://vercel.com/docs/deployments/preview-deployments) feature allows you to preview changes to your app in a live deployment without merging those changes to your Git project's production branch. With [TiDB Cloud Branching](/tidb-cloud/branch-overview.md), you can create a new instance for each branch of your Vercel project. This allows you to preview app changes in a live deployment without affecting your production data. > **Note:** > -> Currently, TiDB Cloud Serverless branching only supports [Vercel projects associated with GitHub repositories](https://vercel.com/docs/deployments/git/vercel-for-github). +> Currently, TiDB Cloud Branching only supports [Vercel projects associated with GitHub repositories](https://vercel.com/docs/deployments/git/vercel-for-github). -To enable TiDB Cloud Serverless Branching, you need to ensure the following in the [TiDB Cloud Vercel integration workflow](#integration-workflow): +To enable TiDB Cloud Branching, you need to ensure the following in the [TiDB Cloud Vercel integration workflow](#integration-workflow): 1. Select **Cluster** as your connection type. 2. Enable **Branching** to create new branches for preview environments. -After you push changes to the Git repository, Vercel will trigger a preview deployment. TiDB Cloud integration will automatically create a TiDB Cloud Serverless branch for the Git branch and set environment variables. The detailed steps are as follows: +After you push changes to the Git repository, Vercel will trigger a preview deployment. TiDB Cloud integration will automatically create a branch of your TiDB Cloud cluster for the Git branch and set environment variables. The detailed steps are as follows: 1. Create a new branch in your Git repository. @@ -194,15 +194,15 @@ After you push changes to the Git repository, Vercel will trigger a preview depl ![Vercel Preview_Deployment](/media/tidb-cloud/vercel/vercel-preview-deployment.png) - 1. During the deployment, TiDB Cloud integration will automatically create a TiDB Cloud Serverless branch with the same name as the Git branch. If the TiDB Cloud Serverless branch already exists, TiDB Cloud integration will skip this step. + 1. During the deployment, TiDB Cloud integration will automatically create a branch for your cluster with the same name as the Git branch. If the branch already exists, TiDB Cloud integration will skip this step. ![TiDB_Cloud_Branch_Check](/media/tidb-cloud/vercel/tidbcloud-branch-check.png) - 2. After the TiDB Cloud Serverless branch is ready, TiDB Cloud integration will set environment variables in the preview deployment for the Vercel project. + 2. After the branch is ready, TiDB Cloud integration will set environment variables in the preview deployment for the Vercel project. ![Preview_Envs](/media/tidb-cloud/vercel/preview-envs.png) - 3. TiDB Cloud integration will also register a blocking check to wait for the TiDB Cloud Serverless branch to be ready. You can rerun the check manually. + 3. TiDB Cloud integration will also register a blocking check to wait for the branch to be ready. You can rerun the check manually. 4. After the check is passed, you can visit the preview deployment to see the changes. > **Note:** @@ -211,7 +211,7 @@ After you push changes to the Git repository, Vercel will trigger a preview depl > **Note:** > -> For each organization in TiDB Cloud, you can create a maximum of five TiDB Cloud Serverless branches by default. To avoid exceeding the limit, you can delete the TiDB Cloud Serverless branches that are no longer needed. For more information, see [Manage TiDB Cloud Serverless branches](/tidb-cloud/branch-manage.md). +> For each organization in TiDB Cloud, you can create a maximum of five branches for {{{ .starter }}} clusters by default. To avoid exceeding the limit, you can delete the branches of your cluster that are no longer needed. For more information, see [Manage TiDB Cloud branches](/tidb-cloud/branch-manage.md). ## Connect via manually setting environment variables @@ -226,7 +226,7 @@ After you push changes to the Git repository, Vercel will trigger a preview depl ![Vercel Environment Variables](/media/tidb-cloud/vercel/integration-vercel-environment-variables.png) -Here we use a Prisma application as an example. The following is a datasource setting in the Prisma schema file for a TiDB Cloud Serverless cluster: +Here we use a Prisma application and a {{{ .starter }}} cluster as an example. The following is a datasource setting in the Prisma schema file for a {{{ .starter }}} cluster: ``` datasource db { diff --git a/tidb-cloud/integrate-tidbcloud-with-zapier.md b/tidb-cloud/integrate-tidbcloud-with-zapier.md index b31a1b17d0e94..14c1a48c52032 100644 --- a/tidb-cloud/integrate-tidbcloud-with-zapier.md +++ b/tidb-cloud/integrate-tidbcloud-with-zapier.md @@ -27,7 +27,7 @@ Before you start, you need: - A [Zapier account](https://zapier.com/app/login). - A [GitHub account](https://github.com/login). -- A [TiDB Cloud account](https://tidbcloud.com/signup) and a TiDB Cloud Serverless cluster on TiDB Cloud. For more details, see [TiDB Cloud Quick Start](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart#step-1-create-a-tidb-cluster). +- A [TiDB Cloud account](https://tidbcloud.com/signup) and a {{{ .starter }}} cluster on TiDB Cloud. For more details, see [TiDB Cloud Quick Start](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart#step-1-create-a-tidb-cluster). ### Step 1: Get the template @@ -160,16 +160,16 @@ The following table lists the actions supported by TiDB Cloud App. Note that som | Action | Description | Resource | |---|---|---| -| Find Cluster | Finds an existing TiDB Cloud Serverless or TiDB Cloud Dedicated cluster. | None | -| Create Cluster | Creates a new cluster. Only supports creating a TiDB Cloud Serverless cluster. | None | -| Find Database | Finds an existing database. | A TiDB Cloud Serverless cluster | -| Create Database | Creates a new database. | A TiDB Cloud Serverless cluster | -| Find Table | Finds an existing Table. | A TiDB Cloud Serverless cluster and a database | -| Create Table | Creates a new table. | A TiDB Cloud Serverless cluster and a database | -| Create Row | Creates a new row. | A TiDB Cloud Serverless cluster, a database, and a table | -| Update Row | Updates an existing row. | A TiDB Cloud Serverless cluster, a database, and a table | -| Find Row | Finds a row in a table via a lookup column. | A TiDB Cloud Serverless cluster, a database, and a table | -| Find Row (Custom Query) | Finds a row in a table via a custom query the you provide. | A TiDB Cloud Serverless cluster, a database, and a table | +| Find Cluster | Finds an existing {{{ .starter }}} or TiDB Cloud Dedicated cluster. | None | +| Create Cluster | Creates a new cluster. Only supports creating a {{{ .starter }}} cluster. | None | +| Find Database | Finds an existing database. | A {{{ .starter }}} cluster | +| Create Database | Creates a new database. | A {{{ .starter }}} cluster | +| Find Table | Finds an existing Table. | A {{{ .starter }}} cluster and a database | +| Create Table | Creates a new table. | A {{{ .starter }}} cluster and a database | +| Create Row | Creates a new row. | A {{{ .starter }}} cluster, a database, and a table | +| Update Row | Updates an existing row. | A {{{ .starter }}} cluster, a database, and a table | +| Find Row | Finds a row in a table via a lookup column. | A {{{ .starter }}} cluster, a database, and a table | +| Find Row (Custom Query) | Finds a row in a table via a custom query the you provide. | A {{{ .starter }}} cluster, a database, and a table | ## TiDB Cloud App templates diff --git a/tidb-cloud/key-concepts.md b/tidb-cloud/key-concepts.md index 436cb5b068e6f..d4f59f32ab76d 100644 --- a/tidb-cloud/key-concepts.md +++ b/tidb-cloud/key-concepts.md @@ -37,10 +37,10 @@ TiDB Cloud Dedicated lets you adjust its compute and storage resources separatel ## High availability -TiDB Cloud ensures high availability in both TiDB Cloud Serverless and TiDB Cloud Dedicated clusters: +TiDB Cloud ensures high availability in all supported plans: -- [High Availability in TiDB Cloud Serverless](/tidb-cloud/serverless-high-availability.md) -- [High Availability in TiDB Cloud Dedicated](/tidb-cloud/high-availability-with-multi-az.md) +- For {{{ .starter }}} and {{{ .essential }}} {{{ .starter }}}, {{{ .essential }}}, and {{{ .premium }}}, see [High Availability in TiDB Cloud](/tidb-cloud/serverless-high-availability.md). +- For TiDB Cloud Dedicated, see [High Availability in TiDB Cloud Dedicated](/tidb-cloud/high-availability-with-multi-az.md). ## Monitoring diff --git a/tidb-cloud/limitations-and-quotas.md b/tidb-cloud/limitations-and-quotas.md index 04b52ccaff92b..65ed630dc61bb 100644 --- a/tidb-cloud/limitations-and-quotas.md +++ b/tidb-cloud/limitations-and-quotas.md @@ -29,6 +29,7 @@ TiDB Cloud limits how many of each kind of component you can create in a [TiDB C | Maximum number of total TiDB nodes for all clusters in your organization | 10 | | Maximum number of total TiKV nodes for all clusters in your organization | 15 | | Maximum number of total TiFlash nodes for all clusters in your organization | 5 | +| Maximum number of total TiProxy nodes for all clusters in your organization | 10 | > **Note:** > diff --git a/tidb-cloud/limited-sql-features.md b/tidb-cloud/limited-sql-features.md index 2db025050dcdb..efa8101580cb6 100644 --- a/tidb-cloud/limited-sql-features.md +++ b/tidb-cloud/limited-sql-features.md @@ -5,13 +5,13 @@ summary: Learn about the limited SQL features on TiDB Cloud. # Limited SQL features on TiDB Cloud -TiDB Cloud works with almost all workloads that TiDB supports, but there are some feature differences between TiDB Self-Managed and TiDB Cloud Dedicated/Serverless. This document describes the limitations of SQL features on TiDB Cloud. We are constantly filling in the feature gaps between TiDB Self-Managed and TiDB Cloud Dedicated/Serverless. If you require these features or capabilities in the gap, [contact us](/tidb-cloud/tidb-cloud-support.md) for a feature request. +TiDB Cloud works with almost all workloads that TiDB supports, but there are some feature differences between TiDB Self-Managed and TiDB Cloud. This document describes the limitations of SQL features on TiDB Cloud. We are constantly filling in the feature gaps between TiDB Self-Managed and TiDB Cloud. If you require these features or capabilities in the gap, [contact us](/tidb-cloud/tidb-cloud-support.md) for a feature request. ## Statements ### Placement and range management -| Statement | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Statement | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-| | `ALTER PLACEMENT POLICY` | Supported | Not supported [^1] | | `CREATE PLACEMENT POLICY` | Supported | Not supported [^1] | @@ -25,7 +25,7 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som ### Resource groups -| Statement | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Statement | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-| | `ALTER RESOURCE GROUP` | Supported | Not supported [^2] | | `CALIBRATE RESOURCE` | Not supported | Not supported [^2] | @@ -36,18 +36,18 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som ### Others -| Statement | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Statement | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-| | `BACKUP` | Supported | Not supported [^3] | | `SHOW BACKUPS` | Supported | Not supported [^3] | | `RESTORE` | Supported | Not supported [^3] | | `SHOW RESTORES` | Supported | Not supported [^3] | -| `ADMIN RESET TELEMETRY_ID` | Supported | Telemetry is not supported on TiDB Cloud Serverless. | +| `ADMIN RESET TELEMETRY_ID` | Supported | Telemetry is not supported on {{{ .starter }}} or {{{ .essential }}}. | | `ADMIN SHOW TELEMETRY` | Not supported [^4] | Not supported [^4] | | `ADMIN SHOW SLOW` | Supported | Not supported [^5] | | `ADMIN PLUGINS ENABLE` | Supported | Not supported [^8] | | `ADMIN PLUGINS DISABLE` | Supported | Not supported [^8] | -| `ALTER INSTANCE RELOAD TLS` | Supported | TiDB Cloud Serverless automatically refreshes the TLS certificate. | +| `ALTER INSTANCE RELOAD TLS` | Supported | {{{ .starter }}} and {{{ .essential }}} automatically refresh the TLS certificate. | | `LOAD DATA INFILE` | Supports `LOAD DATA LOCAL INFILE`, and `LOAD DATA INFILE` from Amazon S3 or Google Cloud Storage | Only supports `LOAD DATA LOCAL INFILE` | | `CHANGE DRAINER` | Not supported [^7] | Not supported [^7] | | `CHANGE PUMP` | Not supported [^7] | Not supported [^7] | @@ -64,13 +64,13 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som ## Functions and operators -| Function and operator | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Function and operator | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-| | `SLEEP` | No Limitation | The [`SLEEP()` function](https://docs.pingcap.com/tidbcloud/miscellaneous-functions) has a limitation wherein it can only support a maximum sleep time of 300 seconds.| ## System tables -| Database | Table | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Database | Table | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-|:-| | `information_schema` | `ATTRIBUTES` | Supported | Not supported [^1] | | `information_schema` | `CLUSTER_CONFIG` | Not supported [^4] | Not supported [^4] | @@ -94,7 +94,6 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som | `information_schema` | `SLOW_QUERY` | Supported | Not supported [^5] | | `information_schema` | `STATEMENTS_SUMMARY` | Supported | Not supported [^6] | | `information_schema` | `STATEMENTS_SUMMARY_EVICTED` | Supported | Not supported [^6] | -| `information_schema` | `STATEMENTS_SUMMARY_HISTORY` | Supported | Not supported [^6] | | `information_schema` | `TIDB_HOT_REGIONS` | Not supported [^4] | Not supported [^4] | | `information_schema` | `TIDB_HOT_REGIONS_HISTORY` | Supported | Not supported [^1] | | `information_schema` | `TIDB_SERVERS_INFO` | Supported | Not supported [^1] | @@ -122,7 +121,7 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som ## System variables -| Variable | TiDB Cloud Dedicated | TiDB Cloud Serverless | +| Variable | TiDB Cloud Dedicated | {{{ .starter }}} and {{{ .essential }}} | |:-|:-|:-| | `datadir` | No limitation | Not supported [^1] | | `interactive_timeout` | No limitation | Read-only [^10] | @@ -132,6 +131,7 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som | `require_secure_transport` | Not supported [^12] | Read-only [^10] | | `skip_name_resolve` | No limitation | Read-only [^10] | | `sql_log_bin` | No limitation | Read-only [^10] | +| `tidb_analyze_skip_column_types` | No limitation | Read-only [^10] | | `tidb_cdc_write_source` | No limitation | Read-only [^10] | | `tidb_check_mb4_value_in_utf8` | Not supported [^4] | Not supported [^4] | | `tidb_config` | Not supported [^4] | Not supported [^4] | @@ -142,6 +142,7 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som | `tidb_ddl_reorg_batch_size` | No limitation | Read-only [^10] | | `tidb_ddl_reorg_priority` | No limitation | Read-only [^10] | | `tidb_ddl_reorg_worker_cnt` | No limitation | Read-only [^10] | +| `tidb_dml_type` | No limitation | Read-only [^10] | | `tidb_enable_1pc` | No limitation | Read-only [^10] | | `tidb_enable_async_commit` | No limitation | Read-only [^10] | | `tidb_enable_auto_analyze` | No limitation | Read-only [^10] | @@ -209,6 +210,7 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som | `tidb_txn_mode` | No limitation | Read-only [^10] | | `tidb_wait_split_region_finish` | No limitation | Read-only [^10] | | `tidb_wait_split_region_timeout` | No limitation | Read-only [^10] | +| `txn_scope` | No limitation | Read-only [^10] | | `validate_password.enable` | No limitation | Always enabled [^9] | | `validate_password.length` | No limitation | At least `8` [^9] | | `validate_password.mixed_case_count` | No limitation | At least `1` [^9] | @@ -217,26 +219,26 @@ TiDB Cloud works with almost all workloads that TiDB supports, but there are som | `validate_password.special_char_count` | No limitation | At least `1` [^9] | | `wait_timeout` | No limitation | Read-only [^10] | -[^1]: Configuring data placement is not supported on TiDB Cloud Serverless. +[^1]: Configuring data placement is not supported on {{{ .starter }}} or {{{ .essential }}}. -[^2]: Configuring resource groups is not supported on TiDB Cloud Serverless. +[^2]: Configuring resource groups is not supported on {{{ .starter }}} or {{{ .essential }}}. -[^3]: To perform [Back up and Restore](/tidb-cloud/backup-and-restore-serverless.md) operations on TiDB Cloud Serverless, you can use the TiDB Cloud console instead. +[^3]: To perform [Back up and Restore](/tidb-cloud/backup-and-restore-serverless.md) operations on {{{ .starter }}} or {{{ .essential }}}, you can use the TiDB Cloud console instead. [^4]: The feature is unavailable in [Security Enhanced Mode (SEM)](/system-variables.md#tidb_enable_enhanced_security). -[^5]: To track [Slow Query](/tidb-cloud/tune-performance.md#slow-query) on TiDB Cloud Serverless, you can use the TiDB Cloud console instead. +[^5]: To track [Slow Query](/tidb-cloud/tune-performance.md#slow-query) on {{{ .starter }}} or {{{ .essential }}}, you can use the TiDB Cloud console instead. -[^6]: To perform [Statement Analysis](/tidb-cloud/tune-performance.md#statement-analysis) on TiDB Cloud Serverless, you can use the TiDB Cloud console instead. +[^6]: To perform [Statement Analysis](/tidb-cloud/tune-performance.md#statement-analysis) on {{{ .starter }}} or {{{ .essential }}}, you can use the TiDB Cloud console instead. [^7]: Drainer and Pump are not supported on TiDB Cloud. -[^8]: Plugin is not supported on TiDB Cloud Serverless. +[^8]: Plugin is not supported on {{{ .starter }}} or {{{ .essential }}}. -[^9]: TiDB Cloud Serverless enforces strong password policy. +[^9]: {{{ .starter }}} and {{{ .essential }}} enforce strong password policy. -[^10]: The variable is read-only on TiDB Cloud Serverless. +[^10]: The variable is read-only on {{{ .starter }}} and {{{ .essential }}}. -[^11]: TiDB Cloud Serverless does not support downloading the file exported by `PLAN REPLAYER` through `${tidb-server-status-port}` as in the [example](https://docs.pingcap.com/tidb/stable/sql-plan-replayer#examples-of-exporting-cluster-information). Instead, TiDB Cloud Serverless generates a [presigned URL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) for you to download the file. Note that this URL remains valid for 10 hours after generation. +[^11]: {{{ .starter }}} and {{{ .essential }}} do not support downloading the file exported by `PLAN REPLAYER` through `${tidb-server-status-port}` as in the [example](https://docs.pingcap.com/tidb/stable/sql-plan-replayer#examples-of-exporting-cluster-information). Instead, {{{ .starter }}} and {{{ .essential }}} generate a [presigned URL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) for you to download the file. Note that this URL remains valid for 10 hours after generation. [^12]: Not supported. Enabling `require_secure_transport` for TiDB Cloud Dedicated clusters will result in SQL client connection failures. diff --git a/tidb-cloud/manage-serverless-spend-limit.md b/tidb-cloud/manage-serverless-spend-limit.md index 29bf41b859c48..c32478969cf48 100644 --- a/tidb-cloud/manage-serverless-spend-limit.md +++ b/tidb-cloud/manage-serverless-spend-limit.md @@ -1,37 +1,37 @@ --- -title: Manage Spending Limit for TiDB Cloud Serverless Scalable Clusters -summary: Learn how to manage spending limit for your TiDB Cloud Serverless scalable clusters. +title: Manage Spending Limit for {{{ .starter }}} Clusters +summary: Learn how to manage spending limit for your {{{ .starter }}} clusters. --- -# Manage Spending Limit for TiDB Cloud Serverless Scalable Clusters +# Manage Spending Limit for {{{ .starter }}} Clusters > **Note:** > -> The spending limit is only applicable to TiDB Cloud Serverless [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan). +> The spending limit is only applicable to {{{ .starter }}} clusters. -Spending limit refers to the maximum amount of money that you are willing to spend on a particular workload in a month. It is a cost-control mechanism that allows you to set a budget for your TiDB Cloud Serverless scalable clusters. +Spending limit refers to the maximum amount of money that you are willing to spend on a particular workload in a month. It is a cost-control mechanism that allows you to set a budget for your {{{ .starter }}} clusters. -For each organization in TiDB Cloud, you can create a maximum of five [free clusters](/tidb-cloud/select-cluster-tier.md#free-cluster-plan) by default. To create more TiDB Cloud Serverless clusters, you need to add a credit card and create scalable clusters for the usage. But if you delete some of your previous clusters before creating more, the new cluster can still be created without a credit card. +For each organization in TiDB Cloud, you can create a maximum of five [free {{{ .starter }}} clusters](/tidb-cloud/select-cluster-tier.md#starter) by default. To create more {{{ .starter }}} clusters, you need to add a credit card and set a monthly spending limit for the usage. But if you delete some of your previous clusters before creating more, the new cluster can still be created without a credit card. ## Usage quota -For the first five TiDB Cloud Serverless clusters in your organization, whether they are free or scalable, TiDB Cloud provides a free usage quota for each of them as follows: +For the first five {{{ .starter }}} clusters in your organization, whether they are free or scalable, TiDB Cloud provides a free usage quota for each of them as follows: - Row-based storage: 5 GiB - Columnar storage: 5 GiB -- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit): 50 million RUs per month +- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru): 50 million RUs per month Once a cluster reaches its usage quota, it immediately denies any new connection attempts until you [increase the quota](#update-spending-limit) or the usage is reset upon the start of a new month. Existing connections established before reaching the quota will remain active but will experience throttling. For example, when the row-based storage of a cluster exceeds 5 GiB for a free cluster, the cluster automatically restricts any new connection attempts. -To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details). +To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). -If you want to create a TiDB Cloud Serverless cluster with an additional quota, you can edit the spending limit on the cluster creation page. For more information, see [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +If you want to create a {{{ .starter }}} cluster with an additional quota, you can edit the spending limit on the cluster creation page. For more information, see [Create a {{{ .starter }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md). ## Update spending limit -For a TiDB Cloud Serverless free cluster, you can increase the usage quota by upgrading it to a scalable cluster. For an existing scalable cluster, you can adjust the monthly spending limit directly. +For a {{{ .starter }}} free cluster, you can increase the usage quota by setting a monthly spending limit when creating the cluster. For an existing cluster, you can adjust the monthly spending limit directly. -To update the spending limit for a TiDB Cloud Serverless cluster, perform the following steps: +To update the spending limit for a {{{ .starter }}} cluster, perform the following steps: 1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, click the name of your target cluster to go to its overview page. @@ -39,9 +39,9 @@ To update the spending limit for a TiDB Cloud Serverless cluster, perform the fo > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. -2. In the **Usage This Month** area, click **Upgrade to Scalable Cluster**. +2. In the **Capacity used this month** area, click **Set Spending Limit**. - To adjust the spending limit for an existing scalable cluster, click **Edit**. + If you have set the spending limit previously and want to update it, click **Edit**. 3. Edit the monthly spending limit as needed. If you have not added a payment method, you will need to add a credit card after editing the limit. -4. Click **Update Cluster Plan**. +4. Click **Update Spending Limit**. diff --git a/tidb-cloud/manage-user-access.md b/tidb-cloud/manage-user-access.md index 4a821ac015987..3ca4d6026e33c 100644 --- a/tidb-cloud/manage-user-access.md +++ b/tidb-cloud/manage-user-access.md @@ -103,10 +103,9 @@ At the project level, TiDB Cloud defines three roles, in which `Project Owner` c | Manage project settings | ✅ | ❌ | ❌ | ❌ | | Invite users to or remove users from a project, and edit project roles of users. | ✅ | ❌ | ❌ | ❌ | | Manage [database audit logging](/tidb-cloud/tidb-cloud-auditing.md) of the project. | ✅ | ❌ | ❌ | ❌ | -| Manage [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for all TiDB Cloud Serverless clusters in the project. | ✅ | ❌ | ❌ | ❌ | +| Manage [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for all {{{ .starter }}} clusters in the project. | ✅ | ❌ | ❌ | ❌ | | Manage cluster operations in the project, such as cluster creation, modification, and deletion. | ✅ | ❌ | ❌ | ❌ | -| Manage branches for TiDB Cloud Serverless clusters in the project, such as branch creation, connection, and deletion. | ✅ | ❌ | ❌ | ❌ | -| Manage [recovery groups](/tidb-cloud/recovery-group-overview.md) for TiDB Cloud Dedicated clusters in the project, such as recovery group creation and deletion. | ✅ | ❌ | ❌ | ❌ | +| Manage branches for {{{ .starter }}} and {{{ .essential }}} clusters in the project, such as branch creation, connection, and deletion. | ✅ | ❌ | ❌ | ❌ | | Manage cluster data such as data import, data backup and restore, and data migration. | ✅ | ✅ | ❌ | ❌ | | Manage [Data Service](/tidb-cloud/data-service-overview.md) for data read-only operations such as using or creating endpoints to read data. | ✅ | ✅ | ✅ | ❌ | | Manage [Data Service](/tidb-cloud/data-service-overview.md) for data read and write operations. | ✅ | ✅ | ❌ | ❌ | diff --git a/tidb-cloud/migrate-from-mysql-using-aws-dms.md b/tidb-cloud/migrate-from-mysql-using-aws-dms.md index 16a555e70a1a3..8d2a859210e7a 100644 --- a/tidb-cloud/migrate-from-mysql-using-aws-dms.md +++ b/tidb-cloud/migrate-from-mysql-using-aws-dms.md @@ -181,8 +181,8 @@ If you encounter any issues or failures during the migration, you can check the ## See also -- If you want to learn more about how to connect AWS DMS to TiDB Cloud Serverless or TiDB Cloud Dedicated, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). +- If you want to learn more about how to connect AWS DMS to your TiDB Cloud cluster, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). - If you want to migrate from MySQL-compatible databases, such as Aurora MySQL and Amazon Relational Database Service (RDS), to TiDB Cloud, it is recommended to use [Data Migration on TiDB Cloud](/tidb-cloud/migrate-from-mysql-using-data-migration.md). -- If you want to migrate from Amazon RDS for Oracle to TiDB Cloud Serverless Using AWS DMS, see [Migrate from Amazon RDS for Oracle to TiDB Cloud Serverless Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md). +- If you want to migrate from Amazon RDS for Oracle to TiDB Cloud using AWS DMS, see [Migrate from Amazon RDS for Oracle to TiDB Cloud using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md). diff --git a/tidb-cloud/migrate-from-mysql-using-data-migration.md b/tidb-cloud/migrate-from-mysql-using-data-migration.md index 7f119768cbd85..d708402cb6502 100644 --- a/tidb-cloud/migrate-from-mysql-using-data-migration.md +++ b/tidb-cloud/migrate-from-mysql-using-data-migration.md @@ -6,7 +6,15 @@ aliases: ['/tidbcloud/migrate-data-into-tidb','/tidbcloud/migrate-incremental-da # Migrate MySQL-Compatible Databases to TiDB Cloud Using Data Migration -This document guides you through migrating your MySQL databases from Amazon Aurora MySQL, Amazon RDS, Azure Database for MySQL - Flexible Server, Google Cloud SQL for MySQL, or self-managed MySQL instances to TiDB Cloud using the Data Migration feature in the [TiDB Cloud console](https://tidbcloud.com/). +This document guides you through migrating your MySQL databases from Amazon Aurora MySQL, Amazon RDS, Azure Database for MySQL - Flexible Server, Google Cloud SQL for MySQL, or self-managed MySQL instances to {{{ .dedicated }}}{{{ .essential }}} using the Data Migration feature in the [TiDB Cloud console](https://tidbcloud.com/). + + + +> **Note:** +> +> Currently, the Data Migration feature is in beta for {{{ .essential }}}. + + This feature enables you to migrate your existing MySQL data and continuously replicate ongoing changes (binlog) from your MySQL-compatible source databases directly to TiDB Cloud, maintaining data consistency whether in the same region or across different regions. The streamlined process eliminates the need for separate dump and load operations, reducing downtime and simplifying your migration from MySQL to a more scalable platform. @@ -16,38 +24,82 @@ If you only want to replicate ongoing binlog changes from your MySQL-compatible ### Availability -- The Data Migration feature is available only for **TiDB Cloud Dedicated** clusters. +- Currently, the Data Migration feature is not available for {{{ .starter }}}. + + + +- If you don't see the [Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md#step-1-go-to-the-data-migration-page) entry for your {{{ .dedicated }}} cluster in the [TiDB Cloud console](https://tidbcloud.com/), the feature might not be available in your region. To request support for your region, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). -- If you don't see the [Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md#step-1-go-to-the-data-migration-page) entry for your TiDB Cloud Dedicated cluster in the [TiDB Cloud console](https://tidbcloud.com/), the feature might not be available in your region. To request support for your region, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + - Amazon Aurora MySQL writer instances support both existing data and incremental data migration. Amazon Aurora MySQL reader instances only support existing data migration and do not support incremental data migration. ### Maximum number of migration jobs -You can create up to 200 migration jobs for each organization. To create more migration jobs, you need to [file a support ticket](/tidb-cloud/tidb-cloud-support.md). + + +You can create up to 200 migration jobs on {{{ .dedicated }}} clusters for each organization. To create more migration jobs, you need to [file a support ticket](/tidb-cloud/tidb-cloud-support.md). + + + + +You can create up to 100 migration jobs on {{{ .essential }}} clusters for each organization. To create more migration jobs, you need to [file a support ticket](/tidb-cloud/tidb-cloud-support.md). + + ### Filtered out and deleted databases - The system databases will be filtered out and not migrated to TiDB Cloud even if you select all of the databases to migrate. That is, `mysql`, `information_schema`, `performance_schema`, and `sys` will not be migrated using this feature. + + - When you delete a cluster in TiDB Cloud, all migration jobs in that cluster are automatically deleted and not recoverable. + + + + +### Limitations of Alibaba Cloud RDS + +When using Alibaba Cloud RDS as a data source, every table must have an explicit primary key. For tables without one, RDS appends a hidden primary key to the binlog, which leads to a schema mismatch with the source table and causes the migration to fail. + +### Limitations of Alibaba Cloud PolarDB-X + +During full data migration, PolarDB-X schemas might contain keywords that are incompatible with the downstream database, causing the import to fail. + +To prevent this, create the target tables in the downstream database before starting the migration process. + + + ### Limitations of existing data migration - During existing data migration, if the target database already contains the table to be migrated and there are duplicate keys, the rows with duplicate keys will be replaced. -- If your dataset size is smaller than 1 TiB, it is recommended that you use logical mode (the default mode). If your dataset size is larger than 1 TiB, or you want to migrate existing data faster, you can use physical mode. For more information, see [Migrate existing data and incremental data](#migrate-existing-data-and-incremental-data). + + +- For {{{ .dedicated }}}, if your dataset size is smaller than 1 TiB, it is recommended that you use logical mode (the default mode). If your dataset size is larger than 1 TiB, or you want to migrate existing data faster, you can use physical mode. For more information, see [Migrate existing data and incremental data](#migrate-existing-data-and-incremental-data). + + + + +- For {{{ .essential }}}, only logical mode is supported for data migration currently. This mode exports data from MySQL source databases as SQL statements and then executes them on TiDB. In this mode, the target tables before migration can be either empty or non-empty. + + ### Limitations of incremental data migration -- During incremental data migration, if the table to be migrated already exists in the target database with duplicate keys, an error is reported and the migration is interrupted. In this situation, you need to make sure whether the MySQL source data is accurate. If yes, click the "Restart" button of the migration job, and the migration job will replace the target TiDB Cloud cluster's conflicting records with the MySQL source records. +- During incremental data migration, if the table to be migrated already exists in the target database with duplicate keys, an error is reported and the migration is interrupted. In this situation, you need to verify that the MySQL source data is accurate. If it is accurate, click the **Restart** button of the migration job, and the migration job will replace the conflicting records in the target cluster with the MySQL source records. + +- During incremental data migration (migrating ongoing changes to your cluster), if the migration job recovers from an abrupt error, it might open the safe mode for 60 seconds. During the safe mode, `INSERT` statements are migrated as `REPLACE`, `UPDATE` statements as `DELETE` and `REPLACE`, and then these transactions are migrated to the target TiDB Cloud cluster to ensure that all the data during the abrupt error has been migrated smoothly to the target TiDB Cloud cluster. In this scenario, for MySQL source tables without primary keys or non-null unique indexes, some data might be duplicated in the target TiDB Cloud cluster because the data might be inserted repeatedly into the target TiDB Cloud cluster. -- During incremental replication (migrating ongoing changes to your cluster), if the migration job recovers from an abrupt error, it might open the safe mode for 60 seconds. During the safe mode, `INSERT` statements are migrated as `REPLACE`, `UPDATE` statements as `DELETE` and `REPLACE`, and then these transactions are migrated to the target TiDB Cloud cluster to make sure that all the data during the abrupt error has been migrated smoothly to the target TiDB Cloud cluster. In this scenario, for MySQL source tables without primary keys or non-null unique indexes, some data might be duplicated in the target TiDB Cloud cluster because the data might be inserted repeatedly into the target TiDB Cloud cluster. + -- In the following scenarios, if the migration job takes longer than 24 hours, do not purge binary logs in the source database to ensure that Data Migration can get consecutive binary logs for incremental replication: +- In the following scenarios, if the migration job takes longer than 24 hours, do not purge binary logs in the source database. This allows Data Migration to get consecutive binary logs for incremental data migration: - During the existing data migration. - - After the existing data migration is completed and when incremental data migration is started for the first time, the latency is not 0ms. + - After the existing data migration is completed and when incremental data migration is started for the first time, the latency is not 0 ms. + + ## Prerequisites @@ -55,7 +107,9 @@ Before migrating, check whether your data source is supported, enable binary log ### Make sure your data source and version are supported -Data Migration supports the following data sources and versions: + + +For {{{ .dedicated }}}, the Data Migration feature supports the following data sources and versions: | Data source | Supported versions | |:------------|:-------------------| @@ -64,6 +118,23 @@ Data Migration supports the following data sources and versions: | Amazon RDS MySQL | 8.0, 5.7 | | Azure Database for MySQL - Flexible Server | 8.0, 5.7 | | Google Cloud SQL for MySQL | 8.0, 5.7, 5.6 | +| Alibaba Cloud RDS MySQL | 8.0, 5.7 | + + + + +For {{{ .essential }}}, the Data Migration feature supports the following data sources and versions: + +| Data source | Supported versions | +|:-------------------------------------------------|:-------------------| +| Self-managed MySQL (on-premises or public cloud) | 8.0, 5.7 | +| Amazon Aurora MySQL | 8.0, 5.7 | +| Amazon RDS MySQL | 8.0, 5.7 | +| Alibaba Cloud RDS MySQL | 8.0, 5.7 | +| Azure Database for MySQL - Flexible Server | 8.0, 5.7 | +| Google Cloud SQL for MySQL | 8.0, 5.7 | + + ### Enable binary logs in the source MySQL-compatible database for replication @@ -89,7 +160,7 @@ SHOW VARIABLES WHERE Variable_name IN If necessary, change the source MySQL instance configurations to match the required values.
- Configure a self‑managed MySQL instance + Configure a self-managed MySQL instance 1. Open `/etc/my.cnf` and add the following: @@ -152,23 +223,60 @@ For detailed instructions, see [Configure database flags](https://cloud.google.c
+
+ Configure Alibaba Cloud RDS MySQL + +1. In the [ApsaraDB RDS console](https://rds.console.aliyun.com/), select the region of your instance, and then click the ID of your RDS for MySQL instance. + +2. In the left navigation pane, click **Parameters**, search for each parameter, and then set the following values: + + - `binlog_row_image`: `FULL` + +3. In the left navigation pane, click **Backup and Restoration**, and then select **Backup Strategy**. To ensure DM can access consecutive binlog files during migration, configure the backup strategy with the following constraints: + + - Retention Period: Set to at least 3 days (7 days recommended). + + - Retained Files: Ensure the "Max number of files" is sufficient to prevent older logs from being overwritten prematurely. + + - Storage Safeguard: Monitor Storage Usage closely. Note that RDS will automatically purge the earliest binlogs if the disk space usage reaches the system threshold, regardless of the retention period setting. + +4. After applying the changes (and restarting if needed), connect to the instance and run the `SHOW VARIABLES` statement in this section to verify the configuration. + +For more information, see [Set instance parameters](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/modify-the-parameters-of-an-apsaradb-rds-for-mysql-instance). + +
+ ### Ensure network connectivity Before creating a migration job, you need to plan and set up proper network connectivity between your source MySQL instance, the TiDB Cloud Data Migration (DM) service, and your target TiDB Cloud cluster. -The available connection methods are as follows: + + +For {{{ .dedicated }}}, the available connection methods are as follows: | Connection method | Availability | Recommended for | |:---------------------|:-------------|:----------------| | Public endpoints or IP addresses | All cloud providers supported by TiDB Cloud | Quick proof-of-concept migrations, testing, or when private connectivity is unavailable | -| Private links or private endpoints | AWS and Azure only | Production workloads without exposing data to the public internet | +| Private links or private endpoints | AWS and Azure only | Production workloads without exposing data to the public internet | | VPC peering | AWS and Google Cloud only | Production workloads that need low-latency, intra-region connections and have non-overlapping VPC/VNet CIDRs | + + + +For {{{ .essential }}}, the available connection methods are as follows: + +| Connection method | Availability | Recommended for | +|:---------------------|:-------------|:----------------| +| Public endpoints or IP addresses | All cloud providers supported by TiDB Cloud | Quick proof-of-concept migrations, testing, or when private connectivity is unavailable | +| Private links or private endpoints | AWS and Alibaba Cloud only | Production workloads without exposing data to the public internet | + + + Choose a connection method that best fits your cloud provider, network topology, and security requirements, and then follow the setup instructions for that method. #### End-to-end encryption over TLS/SSL -Regardless of the connection method, it is strongly recommended to use TLS/SSL for end-to-end encryption. While private endpoints and VPC peering secure the network path, TLS/SSL secures the data itself and helps meet compliance requirements. +Regardless of the connection method, it is strongly recommended to use TLS/SSL for end-to-end encryption. While private endpoints and VPC peering secure the network path, TLS/SSL secures the data itself and helps meet compliance requirements.
Download and store the cloud provider's certificates for TLS/SSL encrypted connections @@ -176,6 +284,7 @@ Regardless of the connection method, it is strongly recommended to use TLS/SSL f - Amazon Aurora MySQL or Amazon RDS MySQL: [Using SSL/TLS to encrypt a connection to a DB instance or cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) - Azure Database for MySQL - Flexible Server: [Connect with encrypted connections](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/how-to-connect-tls-ssl) - Google Cloud SQL for MySQL: [Manage SSL/TLS certificates](https://cloud.google.com/sql/docs/mysql/manage-ssl-instance) +- Alibaba Cloud RDS MySQL: [Configure the SSL encryption feature](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/configure-a-cloud-certificate-to-enable-ssl-encryption)
@@ -183,12 +292,24 @@ Regardless of the connection method, it is strongly recommended to use TLS/SSL f When using public endpoints, you can verify network connectivity and access both now and later during the DM job creation process. TiDB Cloud will provide specific egress IP addresses and prompt instructions at that time. + + +> **Note**: +> +> The egress IP range for your firewall is available only during Data Migration task creation. You cannot obtain this IP range in advance. Before you begin, ensure that you: +> +> - Have permissions to modify firewall rules. +> - Can access your cloud provider's console during the setup process. +> - Can pause the task creation workflow to configure your firewall. + + + 1. Identify and record the source MySQL instance's endpoint hostname (FQDN) or public IP address. -2. Ensure you have the required permissions to modify the firewall or security group rules for your database. Refer to your cloud provider's documentation for guidance as follows: +2. Ensure you have the required permissions to modify the firewall or security group rules for your database. Refer to your cloud provider's documentation for guidance. - Amazon Aurora MySQL or Amazon RDS MySQL: [Controlling access with security groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Overview.RDSSecurityGroups.html). - Azure Database for MySQL - Flexible Server: [Public Network Access](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/concepts-networking-public) - - Google Cloud SQL for MySQL: [Authorized Networks](https://cloud.google.com/sql/docs/mysql/configure-ip#authorized-networks). + - Google Cloud SQL for MySQL: [Authorized Networks](https://cloud.google.com/sql/docs/mysql/configure-ip?__hstc=86493575.39bd75fe158e3a694e276e9709c7bc82.1766498597248.1768349165136.1768351956126.50&__hssc=86493575.1.1768351956126&__hsfp=3e9153f1372737b813f3fefb5bbb2ddf#authorized-networks). 3. Optional: Verify connectivity to your source database from a machine with public internet access using the appropriate certificate for in-transit encryption: @@ -196,9 +317,11 @@ When using public endpoints, you can verify network connectivity and access both mysql -h -P -u -p --ssl-ca= -e "SELECT version();" ``` -4. Later, during the Data Migration job setup, TiDB Cloud will provide an egress IP range. At that time, you need to add this IP range to your database's firewall or security‑group rules following the same procedure above. +4. Later, during the Data Migration job setup, TiDB Cloud will provide an egress IP range. At that time, you need to add this IP range to your database's firewall or security-group rules following the same procedure above. + +#### Private link or private endpoint -#### Private link or private endpoint + If you use a provider-native private link or private endpoint, create a private endpoint for your source MySQL instance (RDS, Aurora, or Azure Database for MySQL). @@ -207,11 +330,11 @@ If you use a provider-native private link or private endpoint, create a private AWS does not support direct PrivateLink access to RDS or Aurora. Therefore, you need to create a Network Load Balancer (NLB) and publish it as an endpoint service associated with your source MySQL instance. -1. In the [Amazon EC2 console](https://console.aws.amazon.com/ec2/), create an NLB in the same subnet(s) as your RDS or Aurora writer. Configure the NLB with a TCP listener on port `3306` that forwards traffic to the database endpoint. +1. In the [Amazon EC2 console](https://console.aws.amazon.com/ec2/), create an NLB in the same subnet(s) as your RDS or Aurora writer. Configure the NLB with a TCP listener on port `3306` that forwards traffic to the database endpoint. For detailed instructions, see [Create a Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-network-load-balancer.html) in AWS documentation. -2. In the [Amazon VPC console](https://console.aws.amazon.com/vpc/), click **Endpoint Services** in the left navigation pane, and then create an endpoint service. During the setup, select the NLB created in the previous step as the backing load balancer, and enable the **Require acceptance for endpoint** option. After the endpoint service is created, copy the service name (in the `com.amazonaws.vpce-svc-xxxxxxxxxxxxxxxxx` format) for later use. +2. In the [Amazon VPC console](https://console.aws.amazon.com/vpc/), click **Endpoint Services** in the left navigation pane, and then create an endpoint service. During the setup, select the NLB created in the previous step as the backing load balancer, and enable the **Require acceptance for endpoint** option. After the endpoint service is created, copy the service name (in the `com.amazonaws.vpce-svc-xxxxxxxxxxxxxxxxx` format) for later use. For detailed instructions, see [Create an endpoint service](https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html) in AWS documentation. @@ -245,12 +368,21 @@ To add a new private endpoint, take the following steps: mysql -h -P 3306 -u -p --ssl-ca= -e "SELECT version();" ``` -4. In the [Azure portal](https://portal.azure.com/), return to the overview page of your MySQL Flexible Server instance (not the private endpoint object), click **JSON View** for the **Essentials** section, and then copy the resource ID for later use. The resource ID is in the `/subscriptions//resourceGroups//providers/Microsoft.DBforMySQL/flexibleServers/` format. You will use this resource ID (not the private endpoint ID) to configure TiDB Cloud DM. +4. In the [Azure portal](https://portal.azure.com/), return to the overview page of your MySQL Flexible Server instance (not the private endpoint object), click **JSON View** for the **Essentials** section, and then copy the resource ID for later use. The resource ID is in the `/subscriptions//resourceGroups//providers/Microsoft.DBforMySQL/flexibleServers/` format. You will use this resource ID (not the private endpoint ID) to configure TiDB Cloud DM. 5. Later, when configuring TiDB Cloud DM to connect via PrivateLink, you will need to return to the Azure portal and approve the pending connection request from TiDB Cloud to this private endpoint. + + + +If you use a provider-native private link or private endpoint, create a [Private Link Connection](/tidb-cloud/serverless-private-link-connection.md) for your source MySQL instance. + + + + + #### VPC peering If you use AWS VPC peering or Google Cloud VPC network peering, see the following instructions to configure the network. @@ -288,6 +420,8 @@ If your MySQL service is in a Google Cloud VPC, take the following steps: + + ### Grant required privileges for migration Before starting migration, you need to set up appropriate database users with the required privileges on both the source and target databases. These privileges enable TiDB Cloud DM to read data from MySQL, replicate changes, and write to your TiDB Cloud cluster securely. Because the migration involves both full data dumps for existing data and binlog replication for incremental changes, your migration user requires specific permissions beyond basic read access. @@ -302,7 +436,7 @@ For production workloads, it is recommended to have a dedicated user for data du |:----------|:------|:--------| | `SELECT` | Tables | Allows reading data from all tables | | `RELOAD` | Global | Ensures consistent snapshots during full dump | -| `REPLICATION SLAVE` | Global | Enables binlog streaming for incremental replication | +| `REPLICATION SLAVE` | Global | Enables binlog streaming for incremental data migration | | `REPLICATION CLIENT` | Global | Provides access to binlog position and server status | For example, you can use the following `GRANT` statement in your source MySQL instance to grant corresponding privileges: @@ -322,12 +456,12 @@ For production workloads, it is recommended to have a dedicated user for replica | `CREATE` | Databases, Tables | Creates schema objects in the target | | `SELECT` | Tables | Verifies data during migration | | `INSERT` | Tables | Writes migrated data | -| `UPDATE` | Tables | Modifies existing rows during incremental replication | +| `UPDATE` | Tables | Modifies existing rows during incremental data migration | | `DELETE` | Tables | Removes rows during replication or updates | | `ALTER` | Tables | Modifies table definitions when schema changes | | `DROP` | Databases, Tables | Removes objects during schema sync | | `INDEX` | Tables | Creates and modifies indexes | -| `CREATE VIEW` | View | Create views used by migration | +| `CREATE VIEW` | Views | Creates views used by migration | For example, you can execute the following `GRANT` statement in your target TiDB Cloud cluster to grant corresponding privileges: @@ -343,7 +477,7 @@ GRANT CREATE, SELECT, INSERT, UPDATE, DELETE, ALTER, DROP, INDEX ON *.* TO 'dm_t > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. -2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Migration** in the left navigation pane. +2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Data Migration** in the left navigation pane. 3. On the **Data Migration** page, click **Create Migration Job** in the upper-right corner. The **Create Migration Job** page is displayed. @@ -356,15 +490,44 @@ On the **Create Migration Job** page, configure the source and target connection 2. Fill in the source connection profile. - **Data source**: the data source type. + + + - **Connectivity method**: select a connection method for your data source based on your security requirements and cloud provider: + - **Public IP**: available for all cloud providers (recommended for testing and proof-of-concept migrations). - **Private Link**: available for AWS and Azure only (recommended for production workloads requiring private connectivity). - **VPC Peering**: available for AWS and Google Cloud only (recommended for production workloads needing low-latency, intra-region connections with non-overlapping VPC/VNet CIDRs). + + + + + - **Connectivity method**: select a connection method for your data source based on your security requirements and cloud provider: + + - **Public**: available for all cloud providers (recommended for testing and proof-of-concept migrations). + - **Private Link**: available for AWS and Alibaba Cloud only (recommended for production workloads requiring private connectivity). + + + + + - Based on the selected **Connectivity method**, do the following: + - If **Public IP** or **VPC Peering** is selected, fill in the **Hostname or IP address** field with the hostname or IP address of the data source. - If **Private Link** is selected, fill in the following information: - **Endpoint Service Name** (available if **Data source** is from AWS): enter the VPC endpoint service name (format: `com.amazonaws.vpce-svc-xxxxxxxxxxxxxxxxx`) that you created for your RDS or Aurora instance. - **Private Endpoint Resource ID** (available if **Data source** is from Azure): enter the resource ID of your MySQL Flexible Server instance (format: `/subscriptions//resourceGroups//providers/Microsoft.DBforMySQL/flexibleServers/`). + + + + + - Based on the selected **Connectivity method**, do the following: + + - If **Public** is selected, fill in the **Hostname or IP address** field with the hostname or IP address of the data source. + - If **Private Link** is selected, select the private link connection that you created in the [Private link or private endpoint](#private-link-or-private-endpoint) section. + + + - **Port**: the port of the data source. - **User Name**: the username of the data source. - **Password**: the password of the username. @@ -380,7 +543,7 @@ On the **Create Migration Job** page, configure the source and target connection - Option 2: Client certificate authentication - - If your MySQL server is configured for client certificate authentication, upload **Client Certificate** and **Client private key**. + - If your MySQL server is configured for client certificate authentication, upload **Client Certificate** and **Client private key**. - In this option, TiDB Cloud presents its certificate to the MySQL server for authentication, but TiDB Cloud does not verify the MySQL server's certificate. - This option is typically used when the MySQL server is configured with options such as `REQUIRE SUBJECT '...'` or `REQUIRE ISSUER '...'` without `REQUIRE X509`, allowing it to check specific attributes of the client certificate without full CA validation of that client certificate. - This option is often used when the MySQL server accepts client certificates in self-signed or custom PKI environments. Note that this configuration is vulnerable to man-in-the-middle attacks and is not recommended for production environments unless other network-level controls guarantee server authenticity. @@ -405,17 +568,38 @@ On the **Create Migration Job** page, configure the source and target connection 5. Take action according to the message you see: + + - If you use **Public IP** or **VPC Peering** as the connectivity method, you need to add the Data Migration service's IP addresses to the IP Access List of your source database and firewall (if any). - If you use **Private Link** as the connectivity method, you are prompted to accept the endpoint request: - For AWS: go to the [AWS VPC console](https://us-west-2.console.aws.amazon.com/vpc/home), click **Endpoint services**, and accept the endpoint request from TiDB Cloud. - For Azure: go to the [Azure portal](https://portal.azure.com), search for your MySQL Flexible Server by name, click **Setting** > **Networking** in the left navigation pane, locate the **Private endpoint** section on the right side, and then approve the pending connection request from TiDB Cloud. + + + + If you use Public IP, you need to add the Data Migration service's IP addresses to the IP Access List of your source database and firewall (if any). + + + ## Step 3: Choose migration job type -In the **Choose the objects to be migrated** step, you can choose existing data migration, incremental data migration, or both. + + +In the **Choose migration job type** step, you can choose to migrate both existing data and incremental data, migrate only existing data, or migrate only incremental data. + + + + + +In the **Choose migration job type** step, you can choose to migrate both existing data and incremental data, or migrate only incremental data. + + ### Migrate existing data and incremental data + + To migrate data to TiDB Cloud once and for all, choose both **Existing data migration** and **Incremental data migration**, which ensures data consistency between the source and target databases. You can use **physical mode** or **logical mode** to migrate **existing data** and **incremental data**. @@ -438,11 +622,24 @@ Physical mode exports the MySQL source data as fast as possible, so [different s | 8 RCUs | 365.5 MiB/s | 28.9% | | 16 RCUs | 424.6 MiB/s | 46.7% | + + + +To migrate data to TiDB Cloud once and for all, choose both **Full + Incremental** and **Incremental data migration**, which ensures data consistency between the source and target databases. + +Currently you can only use **logical mode** to migrate **existing data**. This mode exports data from MySQL source databases as SQL statements and then executes them on TiDB. In this mode, the target tables before migration can be either empty or non-empty. + + + + + ### Migrate only existing data To migrate only existing data of the source database to TiDB Cloud, choose **Existing data migration**. -You can only use logical mode to migrate existing data. For more information, see [Migrate existing data and incremental data](#migrate-existing-data-and-incremental-data). +You can use physical mode or logical mode to migrate existing data. For more information, see [Migrate existing data and incremental data](#migrate-existing-data-and-incremental-data). + + ### Migrate only incremental data @@ -456,15 +653,15 @@ For detailed instructions about incremental data migration, see [Migrate Only In - If you click **All**, the migration job will migrate the existing data from the whole source database instance to TiDB Cloud and migrate ongoing changes after the full migration. Note that it happens only if you have selected the **Existing data migration** and **Incremental data migration** checkboxes in the previous step. - If you click **Customize** and select some databases, the migration job will migrate the existing data and migrate ongoing changes of the selected databases to TiDB Cloud. Note that it happens only if you have selected the **Existing data migration** and **Incremental data migration** checkboxes in the previous step. - - If you click **Customize** and select some tables under a dataset name, the migration job will only migrate the existing data and migrate ongoing changes of the selected tables. Tables created afterwards in the same database will not be migrated. + - If you click **Customize** and select some tables under a database name, the migration job will only migrate the existing data and migrate ongoing changes of the selected tables. Tables created afterwards in the same database will not be migrated. 2. Click **Next**. ## Step 5: Precheck -On the **Precheck** page, you can view the precheck results. If the precheck fails, you need to operate according to **Failed** or **Warning** details, and then click **Check again** to recheck. +On the **Precheck** page, you can view the precheck results. If the precheck fails, you need to resolve the issues according to the **Failed** or **Warning** details, and then click **Check again** to recheck. -If there are only warnings on some check items, you can evaluate the risk and consider whether to ignore the warnings. If all warnings are ignored, the migration job will automatically go on to the next step. +If there are only warnings on some check items, you can evaluate the risk and consider whether to ignore the warnings. If all warnings are ignored, the migration job will automatically proceed to the next step. For more information about errors and solutions, see [Precheck errors and solutions](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#precheck-errors-and-solutions). @@ -472,6 +669,24 @@ For more information about precheck items, see [Migration Task Precheck](https:/ If all check items show **Pass**, click **Next**. + + +## Step 6: View the migration progress + +After the migration job is created, you can view the migration progress on the **Migration Job Details** page. The migration progress is displayed in the **Stage and Status** area. + +You can pause or delete a migration job when it is running. + +If a migration job has failed, you can resume it after solving the problem. + +You can delete a migration job in any status. + +If you encounter any problems during the migration, see [Migration errors and solutions](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#migration-errors-and-solutions). + + + + + ## Step 6: Choose a spec and start migration On the **Choose a Spec and Start Migration** page, select an appropriate migration specification according to your performance requirements. For more information about the specifications, see [Specifications for Data Migration](/tidb-cloud/tidb-cloud-billing-dm.md#specifications-for-data-migration). @@ -492,7 +707,7 @@ If you encounter any problems during the migration, see [Migration errors and so ## Scale a migration job specification -TiDB Cloud supports scaling up or down a migration job specification to meet your performance and cost requirements in different scenarios. +TiDB Cloud Dedicated supports scaling up or down a migration job specification to meet your performance and cost requirements in different scenarios. Different migration specifications have different performances. Your performance requirements might vary at different stages as well. For example, during the existing data migration, you want the performance to be as fast as possible, so you choose a migration job with a large specification, such as 8 RCU. Once the existing data migration is completed, the incremental migration does not require such a high performance, so you can scale down the job specification, for example, from 8 RCU to 2 RCU, to save cost. @@ -512,8 +727,10 @@ When scaling a migration job specification, note the following: 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. -2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Migration** in the left navigation pane. +2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Data Migration** in the left navigation pane. 3. On the **Data Migration** page, locate the migration job you want to scale. In the **Action** column, click **...** > **Scale Up/Down**. 4. In the **Scale Up/Down** window, select the new specification you want to use, and then click **Submit**. You can view the new price of the specification at the bottom of the window. + + diff --git a/tidb-cloud/migrate-from-op-tidb.md b/tidb-cloud/migrate-from-op-tidb.md index 475c428af75e8..d5452af1f15dd 100644 --- a/tidb-cloud/migrate-from-op-tidb.md +++ b/tidb-cloud/migrate-from-op-tidb.md @@ -5,7 +5,7 @@ summary: Learn how to migrate data from TiDB Self-Managed to TiDB Cloud. # Migrate from TiDB Self-Managed to TiDB Cloud -This document describes how to migrate data from your TiDB Self-Managed clusters to TiDB Cloud (AWS) through Dumpling and TiCDC. +This document describes how to migrate data from your TiDB Self-Managed clusters to TiDB Cloud (on AWS) through Dumpling and TiCDC. The overall procedure is as follows: @@ -13,7 +13,7 @@ The overall procedure is as follows: 2. Migrate full data. The process is as follows: 1. Export data from TiDB Self-Managed to Amazon S3 using Dumpling. 2. Import data from Amazon S3 to TiDB Cloud. -3. Replicate incremental data by using TiCDC. +3. Replicate incremental data using TiCDC. 4. Verify the migrated data. ## Prerequisites @@ -201,11 +201,10 @@ Do the following to export data from the upstream TiDB cluster to Amazon S3 usin After you export data from the TiDB Self-Managed cluster to Amazon S3, you need to migrate the data to TiDB Cloud. -1. Get the Account ID and External ID of the cluster in the TiDB Cloud console. For more information, see [Step 2. Configure Amazon S3 access](/tidb-cloud/tidb-cloud-auditing.md#step-2-configure-amazon-s3-access). +1. In the [TiDB Cloud console](https://tidbcloud.com/), get the Account ID and External ID of your target cluster according to the following documentation: - The following screenshot shows how to get the Account ID and External ID: - - ![Get the Account ID and External ID](/media/tidb-cloud/op-to-cloud-get-role-arn.png) + - For TiDB Cloud Dedicated clusters, see [Configure Amazon S3 access using a Role ARN](/tidb-cloud/dedicated-external-storage.md#configure-amazon-s3-access-using-a-role-arn). + - For {{{ .starter }}} or {{{ .essential }}} clusters, see [Configure Amazon S3 access using a Role ARN](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access-using-a-role-arn). 2. Configure access permissions for Amazon S3. Usually you need the following read-only permissions: @@ -269,7 +268,10 @@ After you export data from the TiDB Self-Managed cluster to Amazon S3, you need 5. Get the Role-ARN. Go to [AWS Console > IAM > Access Management > Roles](https://console.aws.amazon.com/iamv2/home#/roles). Switch to your region. Click the role you have created, and note down the ARN. You will use it when importing data into TiDB Cloud. -6. Import data to TiDB Cloud. See [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). +6. Import data to TiDB Cloud. + + - For TiDB Cloud Dedicated clusters, see [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). + - For {{{ .starter }}} or {{{ .essential }}} clusters, see [Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-csv-files-serverless.md). ## Replicate incremental data diff --git a/tidb-cloud/migrate-from-oracle-using-aws-dms.md b/tidb-cloud/migrate-from-oracle-using-aws-dms.md index 0832b357e7b1f..6339fee66fe04 100644 --- a/tidb-cloud/migrate-from-oracle-using-aws-dms.md +++ b/tidb-cloud/migrate-from-oracle-using-aws-dms.md @@ -1,11 +1,15 @@ --- title: Migrate from Amazon RDS for Oracle to TiDB Cloud Using AWS DMS -summary: Learn how to migrate data from Amazon RDS for Oracle into TiDB Cloud Serverless using AWS Database Migration Service (AWS DMS). +summary: Learn how to migrate data from Amazon RDS for Oracle into {{{ .starter }}} using AWS Database Migration Service (AWS DMS). --- # Migrate from Amazon RDS for Oracle to TiDB Cloud Using AWS DMS -This document describes a step-by-step example of how to migrate data from Amazon RDS for Oracle to [TiDB Cloud Serverless](https://tidbcloud.com/clusters/create-cluster) using AWS Database Migration Service (AWS DMS). +This document describes a step-by-step example of how to migrate data from Amazon RDS for Oracle to [{{{ .starter }}}](https://tidbcloud.com/clusters/create-cluster) using AWS Database Migration Service (AWS DMS). + +> **Tip:** +> +> In addition to {{{ .starter }}} clusters, the steps in this document also work with {{{ .essential }}} clusters. If you are interested in learning more about TiDB Cloud and AWS DMS, see the following: @@ -24,7 +28,7 @@ If you want to migrate data from heterogeneous databases, such as PostgreSQL, Or At a high level, follow the following steps: 1. Set up the source Amazon RDS for Oracle. -2. Set up the target [TiDB Cloud Serverless](https://tidbcloud.com/project/clusters/create-cluster). +2. Set up the target [{{{ .starter }}}](https://tidbcloud.com/project/clusters/create-cluster). 3. Set up data migration (full load) using AWS DMS. The following diagram illustrates the high-level architecture. @@ -69,11 +73,11 @@ After you finish executing the SQL script, check the data in Oracle. The followi ![Oracle RDS Data](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-3.png) -## Step 4. Create a TiDB Cloud Serverless cluster +## Step 4. Create a {{{ .starter }}} cluster 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/project/clusters). -2. [Create a TiDB Cloud Serverless cluster](/tidb-cloud/tidb-cloud-quickstart.md). +2. [Create a {{{ .starter }}} cluster](/tidb-cloud/tidb-cloud-quickstart.md). 3. In the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the target cluster name to go to its overview page. @@ -91,7 +95,7 @@ After you finish executing the SQL script, check the data in Oracle. The followi > **Note:** > -> For detailed steps on creating an AWS DMS replication instance to work with TiDB Cloud Serverless, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). +> For detailed steps on creating an AWS DMS replication instance to work with {{{ .starter }}}, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). ## Step 6. Create DMS endpoints @@ -109,7 +113,7 @@ After you finish executing the SQL script, check the data in Oracle. The followi > **Note:** > -> For detailed steps on creating a TiDB Cloud Serverless DMS endpoint, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). +> For detailed steps on creating a {{{ .starter }}} DMS endpoint, see [Connect AWS DMS to TiDB Cloud clusters](/tidb-cloud/tidb-cloud-connect-aws-dms.md). ## Step 7. Migrate the schema @@ -139,7 +143,7 @@ For more information, see [Migrating your source schema to your target database ## Step 9. Check data in the downstream TiDB cluster -Connect to the [TiDB Cloud Serverless cluster](https://tidbcloud.com/clusters/create-cluster) and check the `admin.github_event` table data. As shown in the following screenshot, DMS successfully migrated table `github_events` and 10000 rows of data. +Connect to the [{{{ .starter }}} cluster](https://tidbcloud.com/clusters/create-cluster) and check the `admin.github_event` table data. As shown in the following screenshot, DMS successfully migrated table `github_events` and 10000 rows of data. ![Check Data In TiDB](/media/tidb-cloud/aws-dms-from-oracle-to-tidb-14.png) diff --git a/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md b/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md index b911077512e88..50e8d07d0cfd8 100644 --- a/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md +++ b/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md @@ -1,11 +1,19 @@ --- title: Migrate Only Incremental Data from MySQL-Compatible Databases to TiDB Cloud Using Data Migration -summary: Learn how to migrate incremental data from MySQL-compatible databases hosted in Amazon Aurora MySQL, Amazon Relational Database Service (RDS), Google Cloud SQL for MySQL, Azure Database for MySQL, or a local MySQL instance to TiDB Cloud using Data Migration. +summary: Learn how to migrate incremental data from MySQL-compatible databases hosted in Amazon Aurora MySQL, Amazon Relational Database Service (RDS), Google Cloud SQL for MySQL, Azure Database for MySQL, or Alibaba Cloud RDS, or a local MySQL instance to TiDB Cloud using Data Migration. --- # Migrate Only Incremental Data from MySQL-Compatible Databases to TiDB Cloud Using Data Migration -This document describes how to migrate incremental data from a MySQL-compatible database on a cloud provider (Amazon Aurora MySQL, Amazon Relational Database Service (RDS), Google Cloud SQL for MySQL, or Azure Database for MySQL) or self-hosted source database to TiDB Cloud using the Data Migration feature of the TiDB Cloud console. +This document describes how to migrate incremental data from a MySQL-compatible database on a cloud provider (Amazon Aurora MySQL, Amazon Relational Database Service (RDS), Google Cloud SQL for MySQL, Azure Database for MySQL, or Alibaba Cloud RDS) or self-hosted source database to {{{ .dedicated }}}{{{ .essential }}} using the Data Migration feature of the TiDB Cloud console. + + + +> **Note:** +> +> Currently, the Data Migration feature is in beta for {{{ .essential }}}. + + For instructions about how to migrate existing data or both existing data and incremental data, see [Migrate MySQL-Compatible Databases to TiDB Cloud Using Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md). @@ -22,7 +30,7 @@ For instructions about how to migrate existing data or both existing data and in 00000000-0000-0000-0000-00000000000000000], endLocation: [position: (mysql_bin.000016, 5162), gtid-set: 0000000-0000-0000 0000-0000000000000:0]: cannot fetch downstream table schema of - zm`.'table1' to initialize upstream schema 'zm'.'table1' in sschema + zm`.'table1' to initialize upstream schema 'zm'.'table1' in schema tracker Raw Cause: Error 1146: Table 'zm.table1' doesn't exist ``` @@ -44,7 +52,7 @@ If you want to use GTID to specify the start position, make sure that the GTID i ### For Amazon RDS and Amazon Aurora MySQL -For Amazon RDS and Amazon Aurora MySQL, you need to create a new modifiable parameter group (that is, not the default parameter group) and then modify the following parameters in the parameter group and restart the instance application. +For Amazon RDS and Amazon Aurora MySQL, you need to create a new modifiable parameter group (that is, not the default parameter group), modify the following parameters in the parameter group, and then restart the instance to apply the changes. - `gtid_mode` - `enforce_gtid_consistency` @@ -71,7 +79,19 @@ If the result is `ON` or `ON_PERMISSIVE`, the GTID mode is successfully enabled. ### For Azure Database for MySQL -The GTID mode is enabled by default for Azure Database for MySQL (versions 5.7 and later). You can check if the GTID mode has been successfully enabled by executing the following SQL statement: +The GTID mode is enabled by default for Azure Database for MySQL (versions 5.7 and later) and does not support disabling GTID mode. + +In addition, ensure that the `binlog_row_image` server parameter is set to `FULL`. You can check this by executing the following SQL statement: + +```sql +SHOW VARIABLES LIKE 'binlog_row_image'; +``` + +If the result is not `FULL`, you need to configure this parameter for your Azure Database for MySQL instance using the [Azure portal](https://portal.azure.com/) or [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/). + +### For Alibaba Cloud RDS MySQL + +The GTID mode is enabled by default for Alibaba Cloud RDS MySQL. You can check if the GTID mode has been successfully enabled by executing the following SQL statement: ```sql SHOW VARIABLES LIKE 'gtid_mode'; @@ -85,7 +105,7 @@ In addition, ensure that the `binlog_row_image` server parameter is set to `FULL SHOW VARIABLES LIKE 'binlog_row_image'; ``` -If the result is not `FULL`, you need to configure this parameter for your Azure Database for MySQL instance using the [Azure portal](https://portal.azure.com/) or [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/). +If the result is not `FULL`, you need to configure this parameter for your Alibaba Cloud RDS MySQL instance using the [RDS console](https://rds.console.aliyun.com/). ### For a self-hosted MySQL instance @@ -128,7 +148,7 @@ To enable the GTID mode for a self-hosted MySQL instance, follow these steps: > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. -2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Migration** in the left navigation pane. +2. Click the name of your target cluster to go to its overview page, and then click **Data** > **Data Migration** in the left navigation pane. 3. On the **Data Migration** page, click **Create Migration Job** in the upper-right corner. The **Create Migration Job** page is displayed. @@ -140,18 +160,30 @@ On the **Create Migration Job** page, configure the source and target connection 2. Fill in the source connection profile. - - **Data source**: the data source type. - - **Region**: the region of the data source, which is required for cloud databases only. - - **Connectivity method**: the connection method for the data source. Currently, you can choose public IP, VPC Peering, or Private Link according to your connection method. - - **Hostname or IP address** (for public IP and VPC Peering): the hostname or IP address of the data source. - - **Service Name** (for Private Link): the endpoint service name. - - **Port**: the port of the data source. - - **Username**: the username of the data source. - - **Password**: the password of the username. - - **SSL/TLS**: if you enable SSL/TLS, you need to upload the certificates of the data source, including any of the following: + - **Data source**: the data source type. + - **Region**: the region of the data source, which is required for cloud databases only. + - **Connectivity method**: the connection method for the data source. Currently, you can choose public IP, VPC Peering, or Private Link according to your connection method.You can choose public IP or Private Link according to your connection method. + + + + - **Hostname or IP address** (for public IP and VPC Peering): the hostname or IP address of the data source. + - **Service Name** (for Private Link): the endpoint service name. + + + + + - **Hostname or IP address** (for public IP): the hostname or IP address of the data source. + - **Private Link Connection** (for Private Link): the private link connection that you created in the [Private Link Connections](/tidb-cloud/serverless-private-link-connection.md) section. + + + + - **Port**: the port of the data source. + - **Username**: the username of the data source. + - **Password**: the password of the username. + - **SSL/TLS**: if you enable SSL/TLS, you need to upload the certificates of the data source, including any of the following: - only the CA certificate - the client certificate and client key - - the CA certificate, client certificate and client key + - the CA certificate, client certificate, and client key 3. Fill in the target connection profile. @@ -162,9 +194,18 @@ On the **Create Migration Job** page, configure the source and target connection 5. Take action according to the message you see: + + - If you use Public IP or VPC Peering, you need to add the Data Migration service's IP addresses to the IP Access List of your source database and firewall (if any). - If you use AWS Private Link, you are prompted to accept the endpoint request. Go to the [AWS VPC console](https://us-west-2.console.aws.amazon.com/vpc/home), and click **Endpoint services** to accept the endpoint request. + + + + If you use Public IP, you need to add the Data Migration service's IP addresses to the IP Access List of your source database and firewall (if any). + + + ## Step 3: Choose migration job type To migrate only the incremental data of the source database to TiDB Cloud, select **Incremental data migration** and do not select **Existing data migration**. In this way, the migration job only migrates ongoing changes of the source database to TiDB Cloud. @@ -215,7 +256,7 @@ If there is data in the target database, make sure the binlog position is correc On the **Precheck** page, you can view the precheck results. If the precheck fails, you need to operate according to **Failed** or **Warning** details, and then click **Check again** to recheck. -If there are only warnings on some check items, you can evaluate the risk and consider whether to ignore the warnings. If all warnings are ignored, the migration job will automatically go on to the next step. +If there are only warnings on some check items, you can evaluate the risk and consider whether to ignore the warnings. If all warnings are ignored, the migration job will automatically proceed to the next step. For more information about errors and solutions, see [Precheck errors and solutions](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#precheck-errors-and-solutions). @@ -223,6 +264,24 @@ For more information about precheck items, see [Migration Task Precheck](https:/ If all check items show **Pass**, click **Next**. + + +## Step 6: View the migration progress + +After the migration job is created, you can view the migration progress on the **Migration Job Details** page. The migration progress is displayed in the **Stage and Status** area. + +You can pause or delete a migration job when it is running. + +If a migration job has failed, you can resume it after solving the problem. + +You can delete a migration job in any status. + +If you encounter any problems during the migration, see [Migration errors and solutions](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#migration-errors-and-solutions). + + + + + ## Step 6: Choose a spec and start migration On the **Choose a Spec and Start Migration** page, select an appropriate migration specification according to your performance requirements. For more information about the specifications, see [Specifications for Data Migration](/tidb-cloud/tidb-cloud-billing-dm.md#specifications-for-data-migration). @@ -240,3 +299,5 @@ If a migration job has failed, you can resume it after solving the problem. You can delete a migration job in any status. If you encounter any problems during the migration, see [Migration errors and solutions](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#migration-errors-and-solutions). + + \ No newline at end of file diff --git a/tidb-cloud/migrate-metrics-integrations.md b/tidb-cloud/migrate-metrics-integrations.md new file mode 100644 index 0000000000000..48bced6a312c7 --- /dev/null +++ b/tidb-cloud/migrate-metrics-integrations.md @@ -0,0 +1,42 @@ +--- +title: Migrate Datadog and New Relic Integrations +summary: Learn how to migrate from the legacy project-level metrics integration to the new cluster-level integration for Datadog and New Relic. +--- + +# Migrate Datadog and New Relic Integrations + +TiDB Cloud now manages Datadog and New Relic integrations at the cluster level, offering more granular control and configuration. The legacy project-level Datadog and New Relic integrations will be deprecated on October 31, 2025. If your organization is still using these legacy integrations, follow this guide to migrate to the new cluster-level integrations and minimize disruptions to your metrics-related services. + +## Prerequisites + +- To set up third-party metrics integration for TiDB Cloud, you must have the `Organization Owner` or `Project Owner` access in TiDB Cloud. + +## Migration steps + +### Step 1. Delete the legacy project-level Datadog and New Relic integrations + +1. In the [TiDB Cloud console](https://tidbcloud.com/), switch to the target project using the combo box in the upper-left corner. + +2. In the left navigation panel, click **Project Settings** > **Integrations**. + +3. On the **Integrations** page, click **Delete** next to **Integration to Datadog** or **Integration to New Relic**. + +4. In the displayed dialog, type `Delete` to confirm the removal of the legacy integration. + +### Step 2. Create the new Datadog or New Relic integration for each cluster + +Repeat the following steps for each [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster in the project. + +1. In the [TiDB Cloud console](https://tidbcloud.com/), switch to the target cluster using the combo box in the upper-left corner. + +2. In the left navigation panel, click **Settings** > **Integrations**. + +3. On the **Integrations** page, create new integrations as needed. For more information, see [Integrate TiDB Cloud with Datadog](/tidb-cloud/monitor-datadog-integration.md) and [Integrate TiDB Cloud with New Relic](/tidb-cloud/monitor-new-relic-integration.md). + +## Impact statement + +Deleting the project-level integration immediately stops all clusters in the project from sending metrics. This results in a temporary loss of downstream data and interrupts integration-related services (such as monitoring and alerts) until you create new cluster-level integrations. + +## Contact support + +For assistance, contact TiDB Cloud support at support@pingcap.com or reach out to your Technical Account Manager (TAM). diff --git a/tidb-cloud/migrate-prometheus-metrics-integrations.md b/tidb-cloud/migrate-prometheus-metrics-integrations.md new file mode 100644 index 0000000000000..70e3f6d2ad1b6 --- /dev/null +++ b/tidb-cloud/migrate-prometheus-metrics-integrations.md @@ -0,0 +1,44 @@ +--- +title: Migrate Prometheus Integrations +summary: Learn how to migrate from the legacy project-level Prometheus integration to the new cluster-level Prometheus integrations. +--- + +# Migrate Prometheus Integrations + +TiDB Cloud now manages [Prometheus integrations](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) at the cluster level, offering more granular control and configuration. The legacy project-level Prometheus integrations (Beta) will be deprecated on January 9, 2026. If your organization is still using these legacy integrations, follow this guide to migrate them to the new cluster-level Prometheus integrations to minimize disruptions to your metrics-related services. + +## Prerequisites + +- To set up third-party metrics integration for TiDB Cloud, you must have the `Organization Owner` or `Project Owner` access in TiDB Cloud. + +## Migration steps + +Do the following to migrate the Prometheus integration. + +### Step 1. Delete the legacy project-level Prometheus integrations (Beta) + +1. In the [TiDB Cloud console](https://tidbcloud.com/), switch to the target project using the combo box in the upper-left corner. + +2. In the left navigation panel, click **Project Settings** > **Integrations**. + +3. On the **Integrations** > **Integration to Prometheus (BETA)** module, select **Scrape_config Files** and click **Delete**. + +4. In the displayed dialog, type `Delete` to confirm the removal of the legacy integration. + +### Step 2. Create a new cluster-level Prometheus integration for each cluster + +Repeat the following steps for each [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster in the project. + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. + +2. In the left navigation panel, click **Settings** > **Integrations**. + +3. On the **Integrations** page, create a new Prometheus integration. For more information, see [Integrate TiDB Cloud with Prometheus and Grafana](/tidb-cloud/monitor-prometheus-and-grafana-integration.md). + +## Impact of deleting the project-level Prometheus integration (Beta) + +Deleting the project-level Prometheus integration (Beta) immediately stops all clusters in the project from exposing metrics to the Prometheus endpoint. This results in a temporary loss of downstream data and interrupts integration-related services (such as monitoring and alerts) until you configure new cluster-level Prometheus integrations. + +## Contact support + +For assistance, contact TiDB Cloud support at support@pingcap.com or reach out to your Technical Account Manager (TAM). diff --git a/tidb-cloud/migrate-sql-shards.md b/tidb-cloud/migrate-sql-shards.md index dfe94cff880d7..5eec10e3e00f7 100644 --- a/tidb-cloud/migrate-sql-shards.md +++ b/tidb-cloud/migrate-sql-shards.md @@ -21,7 +21,7 @@ The environment information of the upstream cluster is as follows: - MySQL instance1: - schema `store_01` and table `[sale_01, sale_02]` - schema `store_02` and table `[sale_01, sale_02]` -- MySQL instance 2: +- MySQL instance2: - schema `store_01`and table `[sale_01, sale_02]` - schema `store_02`and table `[sale_01, sale_02]` - Table structure: @@ -189,7 +189,7 @@ After configuring the Amazon S3 access, you can perform the data import task in 3. On the **Import Data from Amazon S3** page, fill in the following information: - - **Import File Count**: for TiDB Cloud Serverless, select **Multiple files**. This field is not available in TiDB Cloud Dedicated. + - **Import File Count**: for {{{ .starter }}} or {{{ .essential }}}, select **Multiple files**. This field is not available in TiDB Cloud Dedicated. - **Included Schema Files**: select **No**. - **Data Format**: select **CSV**. - **Folder URI**: fill in the bucket URI of your source data. You can use the second-level directory corresponding to tables, `s3://dumpling-s3/store/sales/` in this example, so that TiDB Cloud can import and merge the data in all MySQL instances into `store.sales` in one go. diff --git a/tidb-cloud/monitor-alert-email.md b/tidb-cloud/monitor-alert-email.md index 1d51d53a4e741..c12cd1257408a 100644 --- a/tidb-cloud/monitor-alert-email.md +++ b/tidb-cloud/monitor-alert-email.md @@ -5,11 +5,11 @@ summary: Learn how to monitor your TiDB cluster by getting alert notifications v # Subscribe via Email -TiDB Cloud provides you with an easy way to subscribe to alert notifications via email, [Slack](/tidb-cloud/monitor-alert-slack.md), and [Zoom](/tidb-cloud/monitor-alert-zoom.md). This document describes how to subscribe to alert notifications via email. +TiDB Cloud provides you with an easy way to subscribe to alert notifications via email, [Slack](/tidb-cloud/monitor-alert-slack.md), [Zoom](/tidb-cloud/monitor-alert-zoom.md), [Flashduty](/tidb-cloud/monitor-alert-flashduty.md), and [PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md). This document describes how to subscribe to alert notifications via email. > **Note:** > -> Currently, alert subscription is only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. ## Prerequisites @@ -17,12 +17,14 @@ TiDB Cloud provides you with an easy way to subscribe to alert notifications via ## Subscribe to alert notifications +To receive alert notifications, take the following steps. The steps vary by cluster plan. + + + > **Tip:** > > The alert subscription is for all alerts in the current project. If you have multiple clusters in the project, you just need to subscribe once. -To get alert notifications of clusters in your project, take the following steps: - 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. 3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. @@ -34,16 +36,51 @@ To get alert notifications of clusters in your project, take the following steps - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and then retry the connection. 7. Click **Save** to complete the subscription. + + + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current cluster. If you have multiple clusters, you need to subscribe to each cluster individually. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **Email** from the **Subscriber Type** drop-down list. +5. Enter your email address. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and then retry the connection. + +7. Click **Save** to complete the subscription. + + -Alternatively, you can also click **Subscribe** in the upper-right corner of the [**Alert**](/tidb-cloud/monitor-built-in-alerting.md#view-alerts) page of the cluster. You will be directed to the **Alert Subscriber** page. +Alternatively, you can also click **Subscribe** in the upper-right corner of the [**Alert**](/tidb-cloud/monitor-built-in-alerting.md#view-alerts) page of the cluster. You will be directed to the **Alert Subscription** page. If an alert condition remains unchanged, the alert sends email notifications every three hours. ## Unsubscribe from alert notifications -If you no longer want to receive alert notifications of clusters in your project, take the following steps: +If you no longer want to receive alert notifications, please take the following steps. The steps vary by cluster plan. + + 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. 3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. -4. Click **Unsubscribe** to confirm the unsubscription. \ No newline at end of file +4. Click **Unsubscribe** to confirm the unsubscription. + + + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + diff --git a/tidb-cloud/monitor-alert-flashduty.md b/tidb-cloud/monitor-alert-flashduty.md new file mode 100644 index 0000000000000..7792dcb6e39dc --- /dev/null +++ b/tidb-cloud/monitor-alert-flashduty.md @@ -0,0 +1,97 @@ +--- +title: Subscribe via Flashduty +summary: Learn how to monitor your TiDB cluster by getting alert notifications via Flashduty. +--- + +# Subscribe via Flashduty + +TiDB Cloud provides you with an easy way to subscribe to alert notifications via Flashduty, [Slack](/tidb-cloud/monitor-alert-slack.md), [email](/tidb-cloud/monitor-alert-email.md), [Zoom](/tidb-cloud/monitor-alert-zoom.md), and [PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md). This document describes how to subscribe to alert notifications via Flashduty. + +> **Note:** +> +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + +## Prerequisites + +- The subscribing via Flashduty feature is only available for organizations that subscribe to the **Enterprise** or **Premium** [support plan](/tidb-cloud/connected-care-overview.md). + +- To subscribe to alert notifications of TiDB Cloud, you must have the `Organization Owner` access to your organization or `Project Owner` access to the target project in TiDB Cloud. + +## Subscribe to alert notifications + +To receive alert notifications of clusters, take the following steps: + +### Step 1. Generate a Flashduty webhook URL + +1. Generate a webhook URL by following the instructions in [Flashduty Prometheus Integration](https://docs.flashcat.cloud/en/flashduty/prometheus-integration-guide). +2. Save the generated webhook URL to use in the next step. + +### Step 2. Subscribe from TiDB Cloud + +Alert notification subscriptions vary by cluster plan. + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current project. If you have multiple clusters in the project, you just need to subscribe once. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. +2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **Flashduty** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field and your Flashduty webhook URL in the **Webhook URL** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current cluster. If you have multiple clusters, you need to subscribe to each cluster individually. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **Flashduty** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field and your Flashduty webhook URL in the **Webhook URL** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + +Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscription** page. + +If an alert condition remains unchanged, the alert sends notifications every three hours. + +## Unsubscribe from alert notifications + +If you no longer want to receive alert notifications, take the following steps. The steps vary by cluster plan. + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. +2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + diff --git a/tidb-cloud/monitor-alert-pagerduty.md b/tidb-cloud/monitor-alert-pagerduty.md new file mode 100644 index 0000000000000..4cdb3628c25eb --- /dev/null +++ b/tidb-cloud/monitor-alert-pagerduty.md @@ -0,0 +1,97 @@ +--- +title: Subscribe via PagerDuty +summary: Learn how to monitor your TiDB cluster by getting alert notifications via PagerDuty. +--- + +# Subscribe via PagerDuty + +TiDB Cloud provides you with an easy way to subscribe to alert notifications via PagerDuty, [Slack](/tidb-cloud/monitor-alert-slack.md), [email](/tidb-cloud/monitor-alert-email.md), [Zoom](/tidb-cloud/monitor-alert-zoom.md), and [Flashduty](/tidb-cloud/monitor-alert-flashduty.md). This document describes how to subscribe to alert notifications via PagerDuty. + +> **Note:** +> +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + +## Prerequisites + +- The subscribing via PagerDuty feature is only available for organizations that subscribe to the **Enterprise** or **Premium** [support plan](/tidb-cloud/connected-care-overview.md). + +- To subscribe to alert notifications of TiDB Cloud, you must have the `Organization Owner` access to your organization or `Project Owner` access to the target project in TiDB Cloud. + +## Subscribe to alert notifications + +To receive alert notifications, take the following steps: + +### Step 1. Generate a PagerDuty integration key + +1. Generate an integration key of type **Events API v2** by following the instructions in the [PagerDuty Events API v2 Overview](https://developer.pagerduty.com/docs/events-api-v2-overview#getting-started). +2. Save the generated integration key to use in the next step. + +### Step 2. Subscribe from TiDB Cloud + +Alert notification subscriptions vary by cluster plan. + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current project. If you have multiple clusters in the project, you just need to subscribe once. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. +2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **PagerDuty** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field and your PagerDuty integration key in the **Integration Key** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current cluster. If you have multiple clusters, you need to subscribe to each cluster individually. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **PagerDuty** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field and your PagerDuty integration key in the **Integration Key** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + +Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscription** page. + +If an alert condition remains unchanged, the alert sends notifications every three hours. + +## Unsubscribe from alert notifications + +If you no longer want to receive alert notifications, take the following steps. The steps vary by cluster plan. + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. +2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + diff --git a/tidb-cloud/monitor-alert-slack.md b/tidb-cloud/monitor-alert-slack.md index a9c109688465a..b2c748f26d0a0 100644 --- a/tidb-cloud/monitor-alert-slack.md +++ b/tidb-cloud/monitor-alert-slack.md @@ -5,15 +5,11 @@ summary: Learn how to monitor your TiDB cluster by getting alert notifications v # Subscribe via Slack -TiDB Cloud provides you with an easy way to subscribe to alert notifications via [Slack](https://slack.com/), [email](/tidb-cloud/monitor-alert-email.md), and [Zoom](/tidb-cloud/monitor-alert-zoom.md). This document describes how to subscribe to alert notifications via Slack. - -The following screenshot shows two example alerts. - -![TiDB Cloud Alerts in Slack](/media/tidb-cloud/tidb-cloud-alert-subscription.png) +TiDB Cloud provides you with an easy way to subscribe to alert notifications via Slack, [email](/tidb-cloud/monitor-alert-email.md), [Zoom](/tidb-cloud/monitor-alert-zoom.md), [Flashduty](/tidb-cloud/monitor-alert-flashduty.md), and [PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md). This document describes how to subscribe to alert notifications via Slack. > **Note:** > -> Currently, alert subscription is only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. ## Prerequisites @@ -35,6 +31,10 @@ You can see a new entry under the **Webhook URLs for Your Workspace** section in ### Step 2. Subscribe from TiDB Cloud +Alert notification subscriptions vary by cluster plan. + + + > **Tip:** > > The alert subscription is for all alerts in the current project. If you have multiple clusters in the project, you just need to subscribe once. @@ -51,15 +51,50 @@ You can see a new entry under the **Webhook URLs for Your Workspace** section in 7. Click **Save** to complete the subscription. -Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscriber** page. + + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current cluster. If you have multiple clusters, you need to subscribe to each cluster individually. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **Slack** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field and your Slack webhook URL in the **URL** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + +Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscription** page. If an alert condition remains unchanged, the alert sends notifications every three hours. ## Unsubscribe from alert notifications -If you no longer want to receive alert notifications of clusters in your project, take the following steps: +If you no longer want to receive alert notifications, take the following steps. The steps vary by cluster plan. + + 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. 3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. -4. Click **Unsubscribe** to confirm the unsubscription. \ No newline at end of file +4. Click **Unsubscribe** to confirm the unsubscription. + + + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + diff --git a/tidb-cloud/monitor-alert-zoom.md b/tidb-cloud/monitor-alert-zoom.md index b01ce188bad8d..46966a1274c46 100644 --- a/tidb-cloud/monitor-alert-zoom.md +++ b/tidb-cloud/monitor-alert-zoom.md @@ -5,11 +5,11 @@ summary: Learn how to monitor your TiDB cluster by getting alert notifications v # Subscribe via Zoom -TiDB Cloud provides you with an easy way to subscribe to alert notifications via [Zoom](https://www.zoom.com/), [Slack](/tidb-cloud/monitor-alert-slack.md), and [email](/tidb-cloud/monitor-alert-email.md). This document describes how to subscribe to alert notifications via Zoom. +TiDB Cloud provides you with an easy way to subscribe to alert notifications via Zoom, [Slack](/tidb-cloud/monitor-alert-slack.md), [email](/tidb-cloud/monitor-alert-email.md), [Flashduty](/tidb-cloud/monitor-alert-flashduty.md), and [PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md). This document describes how to subscribe to alert notifications via Zoom. > **Note:** > -> Currently, alert subscription is only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. ## Prerequisites @@ -45,6 +45,10 @@ TiDB Cloud provides you with an easy way to subscribe to alert notifications via ### Step 3. Subscribe from TiDB Cloud +Alert notification subscriptions vary by cluster plan. + + + > **Tip:** > > The alert subscription is for all alerts in the current project. If you have multiple clusters in the project, you just need to subscribe once. @@ -61,15 +65,50 @@ TiDB Cloud provides you with an easy way to subscribe to alert notifications via 7. Click **Save** to complete the subscription. -Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscriber** page. + + + + +> **Tip:** +> +> The alert subscription is for all alerts in the current cluster. If you have multiple clusters, you need to subscribe to each cluster individually. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, click **Add Subscriber** in the upper-right corner. +4. Select **Zoom** from the **Subscriber Type** drop-down list. +5. Enter a name in the **Name** field, your Zoom webhook URL in the **URL** field, and the verification token in the **Token** field. +6. Click **Test Connection**. + + - If the test succeeds, the **Save** button is displayed. + - If the test fails, an error message is displayed. Follow the message to troubleshoot the issue and retry the connection. + +7. Click **Save** to complete the subscription. + + + +Alternatively, you can also click **Subscribe** in the upper-right corner of the **Alert** page of the cluster. You will be directed to the **Alert Subscription** page. If an alert condition remains unchanged, the alert sends notifications every three hours. ## Unsubscribe from alert notifications -If you no longer want to receive alert notifications of clusters in your project, take the following steps: +If you no longer want to receive alert notifications, take the following steps. The steps vary by cluster plan. + + 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Alert Subscription**. 3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. 4. Click **Unsubscribe** to confirm the unsubscription. + + + + + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target cluster using the combo box in the upper-left corner. +2. In the left navigation pane, click **Settings** > **Alert Subscription**. +3. On the **Alert Subscription** page, locate the row of your target subscriber to be deleted, and then click **...** > **Unsubscribe**. +4. Click **Unsubscribe** to confirm the unsubscription. + + diff --git a/tidb-cloud/monitor-built-in-alerting.md b/tidb-cloud/monitor-built-in-alerting.md index 8892caffc11c8..29c04d1eb3192 100644 --- a/tidb-cloud/monitor-built-in-alerting.md +++ b/tidb-cloud/monitor-built-in-alerting.md @@ -11,7 +11,7 @@ This document describes how to do these operations and provides the TiDB Cloud b > **Note:** > -> Currently, the alert feature is only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. +> Currently, alert subscription is available for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. ## View alerts @@ -47,6 +47,8 @@ In TiDB Cloud, you can subscribe to alert notifications via one of the following - [Email](/tidb-cloud/monitor-alert-email.md) - [Slack](/tidb-cloud/monitor-alert-slack.md) - [Zoom](/tidb-cloud/monitor-alert-zoom.md) +- [Flashduty](/tidb-cloud/monitor-alert-flashduty.md) +- [PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md) ## TiDB Cloud built-in alert conditions @@ -56,7 +58,11 @@ The following table provides the TiDB Cloud built-in alert conditions and the co > > - While these alert conditions do not necessarily mean there is a problem, they are often early warning indicators of emerging issues. Therefore, taking the recommended action is advised. > - You can edit the thresholds of the alerts on the TiDB Cloud console. -> - Some alert rules are disabled by default. You can enable them as needed. +> - Some alert rules are disabled by default. You can enable them as needed. + +TiDB Cloud provides different alert rules for each cluster plan, based on the features available in that plan. + + ### Resource usage alerts @@ -85,10 +91,34 @@ The following table provides the TiDB Cloud built-in alert conditions and the co | Data migration job has been paused for more than 6 hours during incremental migration | Data migration job has been paused for more than 6 hours during data incremental migration. The binlog in the upstream database might be purged (depending on your database binlog purge strategy) and might cause incremental migration to fail. See [Troubleshoot data migration](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#migration-errors-and-solutions) for help. | | Replication lag is larger than 10 minutes and still increasing for more than 20 minutes | See [Troubleshoot data migration](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md#migration-errors-and-solutions) for help. | -### Changefeed alerts +### Changefeed alerts for {{{ .dedicated }}} | Condition | Recommended Action | |:--------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| The changefeed latency exceeds 600 seconds. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
Possible reasons that can trigger this alert include:
  • The overall traffic in the upstream has increased, causing the existing changefeed specification to be insufficient to handle it. If the traffic increase is temporary, the changefeed latency will automatically recover after the traffic returns to normal. If the traffic increase is continuous, you need to scale up the changefeed.
  • The downstream or network is abnormal. In this case, resolve this abnormality first.
  • Tables lack indexes if the downstream is RDS, which might cause low write performance and high latency. In this case, you need to add the necessary indexes to the upstream or downstream.
If the problem cannot be fixed from your side, you can contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance. | -| The changefeed status is `FAILED`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, you can contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance. | -| The changefeed status is `WARNING`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, you can contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance. | +| The changefeed latency exceeds 600 seconds. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
Possible reasons that can trigger this alert include:
  • The overall traffic in the upstream has increased, causing the existing changefeed specification to be insufficient to handle it. If the traffic increase is temporary, the changefeed latency will automatically recover after the traffic returns to normal. If the traffic increase is continuous, you need to scale up the changefeed.
  • The downstream or network is abnormal. In this case, resolve this abnormality first.
  • Tables lack indexes if the downstream is RDS, which might cause low write performance and high latency. In this case, you need to add the necessary indexes to the upstream or downstream.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance. | +| The changefeed status is `FAILED`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance.| +| The changefeed status is `WARNING`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance.| + +
+ + + +### Performance overview alerts + +| Condition | Recommended Action | +|:--- |:--- | +| Request units per second (RU/s) exceed 80% of the maximum RCU |
  1. Review RU metrics to determine whether the increase is gradual or a sudden spike.
  2. If the increase is gradual, check whether query duration has increased. If so, the current maximum RCU might be insufficient.
  3. Scale capacity by manually increasing the maximum RCU in the TiDB Cloud console.

If you cannot resolve the issue, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md).| +| QPS drops by 80% |
  1. Check whether the drop is caused by increasing query latency.
  2. Verify that your application is operating normally. If the drop is intentional, ignore this alert. If the drop is unintentional and you cannot identify the root cause, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) immediately.
| +| Query P99 latency exceeds 200 ms |
  1. Investigate slow queries: go to the Slow Query page and filter by a recent time range to identify newly introduced or slower-running queries.
  2. Review recent changes, such as application deployments, schema changes, or data import jobs, that might have affected traffic patterns.

If you cannot identify the root cause, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) immediately.| +| Query P95 latency exceeds 200 ms |
  1. Investigate slow queries: go to the Slow Query page and filter by a recent time range to identify newly introduced or slower-running queries.
  2. Review recent changes, such as application deployments, schema changes, or data import jobs, that might have affected traffic patterns.

If you cannot identify the root cause, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) immediately.| +| Request error rate exceeds 10% | Review recent errors and the overall statement execution status for the cluster.| + +### Changefeed alerts for {{{ .essential }}} + +| Condition | Recommended Action | +|:--------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| The changefeed latency exceeds 600 seconds. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
Possible reasons that can trigger this alert include:
  • The overall traffic in the upstream has increased, causing the existing changefeed specification to be insufficient to handle it. If the traffic increase is temporary, the changefeed latency will automatically recover after the traffic returns to normal. If the traffic increase is continuous, you need to scale up the changefeed.
  • The downstream or network is abnormal. In this case, resolve this abnormality first.
  • Tables lack indexes if the downstream is RDS, which might cause low write performance and high latency. In this case, you need to add the necessary indexes to the upstream or downstream.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance. | +| The changefeed status is `FAILED`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance.| +| The changefeed status is `WARNING`. | Check the changefeed status on the **Changefeed** page and **Changefeed Detail** page of the TiDB Cloud console, where you can find some error messages to help diagnose this issue.
If the problem cannot be fixed from your side, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) for further assistance.| + +
diff --git a/tidb-cloud/monitor-datadog-integration.md b/tidb-cloud/monitor-datadog-integration.md index 31175f813527d..47ecec88888af 100644 --- a/tidb-cloud/monitor-datadog-integration.md +++ b/tidb-cloud/monitor-datadog-integration.md @@ -1,11 +1,18 @@ --- -title: Integrate TiDB Cloud with Datadog (Beta) +title: Integrate TiDB Cloud with Datadog summary: Learn how to monitor your TiDB cluster with the Datadog integration. --- -# Integrate TiDB Cloud with Datadog (Beta) +# Integrate TiDB Cloud with Datadog -TiDB Cloud supports Datadog integration (beta). You can configure TiDB Cloud to send metric data about your TiDB clusters to [Datadog](https://www.datadoghq.com/). After that, you can view these metrics in your Datadog dashboards directly. +TiDB Cloud supports Datadog integration. You can configure TiDB Cloud to send metrics about your TiDB clusters to [Datadog](https://www.datadoghq.com/). After that, you can view these metrics in your Datadog dashboards directly. + +## Datadog integration version + +TiDB Cloud has supported the project-level Datadog integration (Beta) since March 4, 2022. Starting from July 31, 2025, TiDB Cloud introduces the cluster-level Datadog integration (Preview). Starting from September 30, 2025, the cluster-level Datadog integration becomes generally available (GA). + +- **Cluster-level Datadog integration**: if no legacy project-level Datadog or New Relic integration remains undeleted within your organization by July 31, 2025, TiDB Cloud provides the cluster-level Datadog integration for your organization to experience the latest enhancements. +- **Legacy project-level Datadog integration (Beta)**: if at least one legacy project-level Datadog or New Relic integration remains undeleted within your organization by July 31, 2025, TiDB Cloud retains both existing and new integrations at the project level for your organization to avoid affecting current dashboards. Note that the legacy project-level Datadog integrations will be deprecated on October 31, 2025. If your organization is still using these legacy integrations, follow [Migrate Datadog and New Relic Integrations](/tidb-cloud/migrate-metrics-integrations.md) to migrate to the new cluster-level integrations and minimize disruptions to your metrics-related services. ## Prerequisites @@ -13,47 +20,82 @@ TiDB Cloud supports Datadog integration (beta). You can configure TiDB Cloud to If you do not have a Datadog account, sign up at [https://app.datadoghq.com/signup](https://app.datadoghq.com/signup). -- To edit third-party integration settings of TiDB Cloud, you must have the `Organization Owner` access to your organization or `Project Member` access to the target project in TiDB Cloud. +- To set up third-party metrics integration for TiDB Cloud, you must have the `Organization Owner` or `Project Owner` access in TiDB Cloud. To view the integration page or access configured dashboards via the provided links, you need at least the `Project Viewer` role to access the target clusters under your project in TiDB Cloud. ## Limitation -- You cannot use the Datadog integration in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Datadog integrations are now only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - Datadog integrations are not available when the cluster status is **CREATING**, **RESTORING**, **PAUSED**, or **RESUMING**. +- When a cluster with Datadog integration is deleted, its associated integration services are also removed. + ## Steps ### Step 1. Integrate with your Datadog API Key +Depending on your [Datadog integration version](#datadog-integration-version), the steps to access the integration page are different. + + +
+ +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. In the left navigation pane, click **Settings** > **Integrations**. +3. On the **Integrations** page, click **Integration to Datadog**. +4. Enter your Datadog API key and choose your Datadog site. +5. Click **Test Integration**. + + - If the test succeeds, the **Confirm** button is displayed. + - If the test fails, an error message is displayed. Follow the message for troubleshooting and retry the integration. + +6. Click **Confirm** to complete the integration. + +
+
+ 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Integrations**. 3. On the **Integrations** page, click **Integration to Datadog (BETA)**. -4. Enter your API key of Datadog and choose the site of Datadog. +4. Enter your Datadog API key and choose your Datadog site. 5. Click **Test Integration**. - - If the test successes, the **Confirm** button is displayed. + - If the test succeeds, the **Confirm** button is displayed. - If the test fails, an error message is displayed. Follow the message for troubleshooting and retry the integration. 6. Click **Confirm** to complete the integration. +
+
+ ### Step 2. Install TiDB Cloud Integration in Datadog +> **Note:** +> +> If you have already installed the TiDB Cloud integration in Datadog, you can skip the following steps in this section. The [**TiDB Cloud Dynamic Tracker**](https://app.datadoghq.com/dash/integration/32021/tidb-cloud-dynamic-tracker) or [**TiDB Cloud Cluster Overview**](https://app.datadoghq.com/dash/integration/30586/tidbcloud-cluster-overview) dashboard is automatically available in your Datadog [**Dashboard List**](https://app.datadoghq.com/dashboard/lists). + 1. Log in to [Datadog](https://app.datadoghq.com). -2. Go to the **TiDB Cloud Integration** page ([https://app.datadoghq.com/account/settings#integrations/tidb-cloud](https://app.datadoghq.com/account/settings#integrations/tidb-cloud)) in Datadog. -3. In the **Configuration** tab, click **Install Integration**. The [**TiDBCloud Cluster Overview**](https://app.datadoghq.com/dash/integration/30586/tidbcloud-cluster-overview) dashboard is displayed in your [**Dashboard List**](https://app.datadoghq.com/dashboard/lists). +2. Go to the [**TiDB Cloud Integration** page](https://app.datadoghq.com/account/settings#integrations/tidb-cloud) in Datadog. +3. On the **Configuration** tab, click **Install Integration**. + + - For cluster-level Datadog integration, the [**TiDB Cloud Dynamic Tracker**](https://app.datadoghq.com/dash/integration/32021/tidb-cloud-dynamic-tracker) dashboard appears in your [**Dashboard List**](https://app.datadoghq.com/dashboard/lists). + - For legacy project-level Datadog integration (Beta), the [**TiDB Cloud Cluster Overview**](https://app.datadoghq.com/dash/integration/30586/tidbcloud-cluster-overview) dashboard appears in your [**Dashboard List**](https://app.datadoghq.com/dashboard/lists). -## Pre-built dashboard +## View the pre-built dashboard -Click the **Dashboard** link in the **Datadog** card of the integrations. You can see the pre-built dashboard of your TiDB clusters. +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the **Integrations** page. +2. Click the **Dashboard** link in the **Datadog** section. + + - For cluster-level Datadog integration, the **Dashboard** link opens the new dashboard, which includes the latest metrics introduced in the enhanced version. + - For legacy project-level Datadog integration (Beta), the **Dashboard** link opens the legacy dashboard, which does not include the latest metrics introduced in the cluster-level Datadog integration. ## Metrics available to Datadog -Datadog tracks the following metric data for your TiDB clusters. +Datadog tracks the following metrics for your TiDB clusters. | Metric name | Metric type | Labels | Description | | :------------| :---------- | :------| :----------------------------------------------------- | | tidb_cloud.db_database_time| gauge | sql_type: Select\|Insert\|...
cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The total time consumed by all SQL statements running in TiDB per second, including the CPU time of all processes and the non-idle waiting time. | -| tidb_cloud.db_query_per_second| gauge | type: Select\|Insert\|...
cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The number of SQL statements executed per second on all TiDB instances, which is counted according to SELECT, INSERT, UPDATE, and other types of statements. | +| tidb_cloud.db_query_per_second| gauge | type: Select\|Insert\|...
cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The number of SQL statements executed per second on all TiDB instances, counted by the statement type (`SELECT`, `INSERT`, or `UPDATE`). | | tidb_cloud.db_average_query_duration| gauge | sql_type: Select\|Insert\|...
cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The duration between the time that the client's network request is sent to TiDB and the time that the request is returned to the client after TiDB has executed it. | | tidb_cloud.db_failed_queries| gauge | type: executor:xxxx\|parser:xxxx\|...
cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The statistics of error types (such as syntax errors and primary key conflicts) according to the SQL execution errors that occur per second on each TiDB instance. | | tidb_cloud.db_total_connection| gauge | cluster_name: ``
instance: tidb-0\|tidb-1…
component: `tidb` | The number of current connections in your TiDB server. | @@ -68,3 +110,24 @@ Datadog tracks the following metric data for your TiDB clusters. | tidb_cloud.node_cpu_capacity_cores | gauge | cluster_name: ``
instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…
component: tidb\|tikv\|tiflash | The limit on CPU cores of TiDB/TiKV/TiFlash nodes. | | tidb_cloud.node_memory_used_bytes | gauge | cluster_name: ``
instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…
component: tidb\|tikv\|tiflash | The used memory of TiDB/TiKV/TiFlash nodes, in bytes. | | tidb_cloud.node_memory_capacity_bytes | gauge | cluster_name: ``
instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…
component: tidb\|tikv\|tiflash | The memory capacity of TiDB/TiKV/TiFlash nodes, in bytes. | + +For cluster-level Datadog integration, the following additional metrics are also available: + +| Metric name | Metric type | Labels | Description | +| :------------| :---------- | :------| :----------------------------------------------------- | +| tidb_cloud.node_storage_available_bytes | gauge | instance: `tidb-0\|tidb-1\|...`
component: `tikv\|tiflash`
cluster_name: `` | The available disk space in bytes for TiKV/TiFlash nodes. | +| tidb_cloud.node_disk_read_latency | gauge | instance: `tidb-0\|tidb-1\|...`
component: `tikv\|tiflash`
cluster_name: ``
`device`: `nvme.*\|dm.*` | The read latency in seconds per storage device. | +| tidb_cloud.node_disk_write_latency | gauge | instance: `tidb-0\|tidb-1\|...`
component: `tikv\|tiflash`
cluster_name: ``
`device`: `nvme.*\|dm.*` | The write latency in seconds per storage device. | +| tidb_cloud.db_kv_request_duration | gauge | instance: `tidb-0\|tidb-1\|...`
component: `tikv`
cluster_name: ``
`type`: `BatchGet\|Commit\|Prewrite\|...` | The duration in seconds of TiKV requests by type. | +| tidb_cloud.db_component_uptime | gauge | instance: `tidb-0\|tidb-1\|...`
component: `tidb\|tikv\|tiflash`
cluster_name: `` | The uptime (in seconds) of TiDB components. | +| tidb_cloud.cdc_changefeed_latency (AKA cdc_changefeed_checkpoint_ts_lag) | gauge | changefeed_id: ``
cluster_name: ``| The checkpoint timestamp lag (in seconds) for changefeed owner. | +| tidb_cloud.cdc_changefeed_resolved_ts_lag | gauge | changefeed_id: ``
cluster_name: `` | The resolved timestamp lag (in seconds) for the changefeed owner. | +| tidb_cloud.cdc_changefeed_status | gauge | changefeed_id: ``
cluster_name: `` | Changefeed status:
`-1`: Unknown
`0`: Normal
`1`: Warning
`2`: Failed
`3`: Stopped
`4`: Finished
`6`: Warning
`7`: Other | +| tidb_cloud.resource_manager_resource_unit_read_request_unit | gauge | cluster_name: ``
resource_group: `` | The read request units (RUs) consumed by Resource Manager. | +| tidb_cloud.resource_manager_resource_unit_write_request_unit | gauge | cluster_name: ``
resource_group: `` | The write request units (RUs) consumed by Resource Manager. | +| tidb_cloud.dm_task_state | gauge | instance: `instance`
task: `task`
cluster_name: `` | Task State of Data Migration:
0: Invalid
1: New
2: Running
3: Paused
4: Stopped
5: Finished
15: Error | +| tidb_cloud.dm_syncer_replication_lag_bucket | gauge | instance: `instance`
cluster_name: `` | Replicate lag (bucket) of Data Migration. | +| tidb_cloud.dm_syncer_replication_lag_gauge | gauge | instance: `instance`
task: `task`
cluster_name: `` | Replicate lag (gauge) of Data Migration. | +| tidb_cloud.dm_relay_read_error_count | gauge | instance: `instance`
cluster_name: `` | Fail to read binlog from master. | +| tidb_cloud.node_memory_available_bytes | gauge | cluster_name: ``
instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…
component: tidb\|tikv\|tiflash | The available memory of TiDB/TiKV/TiFlash nodes, in bytes. | +| tidb_cloud.cdc_changefeed_replica_rows | gauge | changefeed_id: ``
cluster_name: `` | The number of events that TiCDC nodes write to the downstream per second. | diff --git a/tidb-cloud/monitor-new-relic-integration.md b/tidb-cloud/monitor-new-relic-integration.md index 333097dca05d7..0c01d49cca382 100644 --- a/tidb-cloud/monitor-new-relic-integration.md +++ b/tidb-cloud/monitor-new-relic-integration.md @@ -1,28 +1,58 @@ --- -title: Integrate TiDB Cloud with New Relic (Beta) +title: Integrate TiDB Cloud with New Relic summary: Learn how to monitor your TiDB cluster with the New Relic integration. --- -# Integrate TiDB Cloud with New Relic (Beta) +# Integrate TiDB Cloud with New Relic -TiDB Cloud supports New Relic integration (beta). You can configure TiDB Cloud to send metric data of your TiDB clusters to [New Relic](https://newrelic.com/). After that, you can directly view these metrics in your New Relic dashboards. +TiDB Cloud supports New Relic integration. You can configure TiDB Cloud to send metrics of your TiDB clusters to [New Relic](https://newrelic.com/). After that, you can directly view these metrics in your New Relic dashboards. + +## New Relic integration version + +TiDB Cloud has supported the project-level New Relic integration (Beta) since April 11, 2023. Starting from July 31, 2025, TiDB Cloud introduces the cluster-level New Relic integration (Preview). Starting from September 30, 2025, the cluster-level New Relic integration becomes generally available (GA). + +- **Cluster-level New Relic integration**: if no legacy project-level Datadog or New Relic integration remains undeleted within your organization by July 31, 2025, TiDB Cloud provides the cluster-level New Relic integration for your organization to experience the latest enhancements. +- **Legacy project-level New Relic integration (Beta)**: if at least one legacy project-level Datadog or New Relic integration remains undeleted within your organization by July 31, 2025, TiDB Cloud retains both existing and new integrations at the project level for your organization to avoid affecting current dashboards. Note that the legacy project-level New Relic integrations will be deprecated on October 31, 2025. If your organization is still using these legacy integrations, follow [Migrate Datadog and New Relic Integrations](/tidb-cloud/migrate-metrics-integrations.md) to migrate to the new cluster-level integrations and minimize disruptions to your metrics-related services. ## Prerequisites -- To integrate TiDB Cloud with New Relic, you must have a New Relic account and a [New Relic API key](https://one.newrelic.com/admin-portal/api-keys/home?). New Relic grants you an API key when you first create a New Relic account. +- To integrate TiDB Cloud with New Relic, you must have a [New Relic](https://newrelic.com/) account and [create a New Relic API key](https://one.newrelic.com/admin-portal/api-keys/home?) of the `Ingest - License` type. If you do not have a New Relic account, sign up [here](https://newrelic.com/signup). -- To edit third-party integration settings for TiDB Cloud, you must have the **Organization Owner** access to your organization or **Project Member** access to the target project in TiDB Cloud. +- To set up third-party metrics integration for TiDB Cloud, you must have the `Organization Owner` or `Project Owner` access in TiDB Cloud. To view the integration page or access configured dashboards via the provided links, you need at least the `Project Viewer` role to access the target clusters under your project in TiDB Cloud. ## Limitation -You cannot use the New Relic integration in [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +- New Relic integrations are now only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + +- New Relic integrations are not available when the cluster status is **CREATING**, **RESTORING**, **PAUSED**, or **RESUMING**. + +- When a cluster with New Relic integration is deleted, its associated integration services are also removed. ## Steps ### Step 1. Integrate with your New Relic API Key +Depending on your [New Relic integration version](#new-relic-integration-version), the steps to access the integration page are different. + + +
+ +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. In the left navigation pane, click **Settings** > **Integrations**. +3. On the **Integrations** page, click **Integration to New Relic**. +4. Enter your API key of New Relic and choose the site of New Relic. +5. Click **Test Integration**. + + - If the test succeeds, the **Confirm** button is displayed. + - If the test fails, an error message is displayed. Follow the message for troubleshooting and retry the integration. + +6. Click **Confirm** to complete the integration. + +
+
+ 1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Integrations**. 3. On the **Integrations** page, click **Integration to New Relic (BETA)**. @@ -34,19 +64,73 @@ You cannot use the New Relic integration in [TiDB Cloud Serverless clusters](/ti 6. Click **Confirm** to complete the integration. -### Step 2. Add TiDB Cloud Dashboard in New Relic +
+
+ +### Step 2. Add TiDB Cloud dashboard in New Relic + +Depending on your [New Relic integration version](#new-relic-integration-version), the steps are different. + + +
+ +A new TiDB Cloud dashboard will be available in New Relic after the pending [PR](https://github.com/newrelic/newrelic-quickstarts/pull/2681) is merged by New Relic. Before that, you can manually import the dashboard to New Relic by taking the following steps: + +1. Prepare the JSON file for the new dashboard. + + 1. Download the template JSON file [here](https://github.com/pingcap/diag/blob/integration/integration/dashboards/newrelic-dashboard.json). + 2. In the JSON file, add `"permissions": "PUBLIC_READ_WRITE"` to line 4 as follows: + + ```json + { + "name": "TiDB Cloud Dynamic Tracker", + "description": null, + "permissions": "PUBLIC_READ_WRITE", + ... + } + ``` + + 3. Add your New Relic account ID to all `"accountIds": []` fields in the JSON file. + + For example: + + ```json + "accountIds": [ + 1234567 + ], + ``` + + > **Note**: + > + > To avoid integration errors, make sure your account ID is added in all `"accountIds"` fields in the JSON file. + +2. Log in to [New Relic](https://one.newrelic.com/), click **Dashboards** in the left navigation bar, and then click **Import dashboard** in the upper-right corner. +3. In the displayed dialog, paste all the content in the prepared JSON file to the text area, and then click **Import dashboard**. + +
+
1. Log in to [New Relic](https://one.newrelic.com/). 2. Click **Add Data**, search for `TiDB Cloud`, and then go to the **TiDB Cloud Monitoring** page. Alternatively, you can click the [link](https://one.newrelic.com/marketplace?state=79bf274b-0c01-7960-c85c-3046ca96568e) to directly access the page. 3. Choose your account ID and create the dashboard in New Relic. -## Pre-built dashboard +
+
+ +## View the pre-built dashboard + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the **Integrations** page. + +2. Click the **Dashboard** link in the **New Relic** section to view the pre-built dashboard of your TiDB clusters. + +3. Depending on your [New Relic integration version](#new-relic-integration-version), do one of the following: -Click the **Dashboard** link in the **New Relic** card of the integrations. You can see the pre-built dashboard of your TiDB clusters. + - For cluster-level New Relic integration, click **TiDB Cloud Dynamic Tracker** to view the new dashboard. + - For legacy project-level New Relic integration (Beta), click **TiDB Cloud Monitoring** to view the legacy dashboard. ## Metrics available to New Relic -New Relic tracks the following metric data for your TiDB clusters. +New Relic tracks the following metrics for your TiDB clusters. | Metric name | Metric type | Labels | Description | | :------------| :---------- | :------| :----------------------------------------------------- | @@ -62,7 +146,29 @@ New Relic tracks the following metric data for your TiDB clusters. | tidb_cloud.db_transaction_per_second| gauge | txn_mode: pessimistic\|optimistic

type: abort\|commit\|...

cluster_name: ``

instance: tidb-0\|tidb-1…

component: `tidb` | The number of transactions executed per second. | | tidb_cloud.node_storage_used_bytes | gauge | cluster_name: ``

instance: tikv-0\|tikv-1…\|tiflash-0\|tiflash-1…

component: tikv\|tiflash | The disk usage of TiKV/TiFlash nodes, in bytes. | | tidb_cloud.node_storage_capacity_bytes | gauge | cluster_name: ``

instance: tikv-0\|tikv-1…\|tiflash-0\|tiflash-1…

component: tikv\|tiflash | The disk capacity of TiKV/TiFlash nodes, in bytes. | -| tidb_cloud.node_cpu_seconds_total | count | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The CPU usage of TiDB/TiKV/TiFlash nodes. | +| tidb_cloud.node_cpu_seconds_total (Beta only) | count | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The CPU usage of TiDB/TiKV/TiFlash nodes. | | tidb_cloud.node_cpu_capacity_cores | gauge | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The limit on CPU cores of TiDB/TiKV/TiFlash nodes. | | tidb_cloud.node_memory_used_bytes | gauge | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The used memory of TiDB/TiKV/TiFlash nodes, in bytes. | | tidb_cloud.node_memory_capacity_bytes | gauge | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The memory capacity of TiDB/TiKV/TiFlash nodes, in bytes. | + +For cluster-level New Relic integrations, the following additional metrics are also available: + +| Metric name | Metric type | Labels | Description | +| :------------| :---------- | :------| :----------------------------------------------------- | +| tidb_cloud.node_storage_available_bytes | gauge | instance: `tidb-0\|tidb-1\|...`

component: `tikv\|tiflash`

cluster_name: `` | The available disk space in bytes for TiKV or TiFlash nodes. | +| tidb_cloud.node_disk_read_latency | gauge | instance: `tidb-0\|tidb-1\|...`

component: `tikv\|tiflash`

cluster_name: ``

`device`: `nvme.*\|dm.*` | The read latency (in seconds) per storage device. | +| tidb_cloud.node_disk_write_latency | gauge | instance: `tidb-0\|tidb-1\|...`

component: `tikv\|tiflash`

cluster_name: ``

`device`: `nvme.*\|dm.*` | The write latency (in seconds) per storage device. | +| tidb_cloud.db_kv_request_duration | gauge | instance: `tidb-0\|tidb-1\|...`

component: `tikv`

cluster_name: ``

`type`: `BatchGet\|Commit\|Prewrite\|...` | The duration (in seconds) of TiKV requests by type. | +| tidb_cloud.db_component_uptime | gauge | instance: `tidb-0\|tidb-1\|...`

component: `tidb\|tikv\|tiflash`

cluster_name: `` | The uptime (in seconds) of TiDB components. | +| tidb_cloud.cdc_changefeed_latency (AKA cdc_changefeed_checkpoint_ts_lag) | gauge | changefeed_id: ``

cluster_name: ``| The checkpoint timestamp lag (in seconds) for changefeed owner. | +| tidb_cloud.cdc_changefeed_resolved_ts_lag | gauge | changefeed_id: ``

cluster_name: `` | The resolved timestamp lag (in seconds) for the changefeed owner. | +| tidb_cloud.cdc_changefeed_status | gauge | changefeed_id: ``

cluster_name: `` | Changefeed status:

`-1`: Unknown

`0`: Normal

`1`: Warning

`2`: Failed

`3`: Stopped

`4`: Finished

`6`: Warning

`7`: Other | +| tidb_cloud.resource_manager_resource_unit_read_request_unit | gauge | cluster_name: ``

resource_group: `` | The read request units (RUs) consumed by Resource Manager. | +| tidb_cloud.resource_manager_resource_unit_write_request_unit | gauge | cluster_name: ``

resource_group: `` | The write request units (RUs) consumed by Resource Manager. | +| tidb_cloud.dm_task_state | gauge | instance: `instance`

task: `task`

cluster_name: `` | Task State of Data Migration:

`0`: Invalid

`1`: New

`2`: Running

`3`: Paused

`4`: Stopped

`5`: Finished

`15`: Error | +| tidb_cloud.dm_syncer_replication_lag_bucket | gauge | instance: `instance`

cluster_name: `` | Replicate lag (bucket) of Data Migration. | +| tidb_cloud.dm_syncer_replication_lag_gauge | gauge | instance: `instance`

task: `task`

cluster_name: `` | Replicate lag (gauge) of Data Migration. | +| tidb_cloud.dm_relay_read_error_count | gauge | instance: `instance`

cluster_name: `` | Fail to read binlog from master. | +| tidb_cloud.node_memory_available_bytes | gauge | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The available memory of TiDB/TiKV/TiFlash nodes, in bytes. | +| tidb_cloud.cdc_changefeed_replica_rows | gauge | changefeed_id: ``

cluster_name: `` | The number of events that TiCDC nodes write to the downstream per second. | +| tidb_cloud.node_cpu_seconds_total_rate | gauge | cluster_name: ``

instance: tidb-0\|tidb-1…\|tikv-0…\|tiflash-0…

component: tidb\|tikv\|tiflash | The CPU usage of TiDB/TiKV/TiFlash nodes. | diff --git a/tidb-cloud/monitor-prometheus-and-grafana-integration-tidb-cloud-dynamic-tracker.json b/tidb-cloud/monitor-prometheus-and-grafana-integration-tidb-cloud-dynamic-tracker.json deleted file mode 100644 index 58f8939c79c4a..0000000000000 --- a/tidb-cloud/monitor-prometheus-and-grafana-integration-tidb-cloud-dynamic-tracker.json +++ /dev/null @@ -1,3358 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.3.2" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "This dashboard provides a high-level overview of your TiDB clusters.", - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 1, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 46, - "panels": [], - "title": "Uptime", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 39, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_component_uptime{cluster_name=\"$Cluster_name\"}", - "instant": false, - "interval": "30s", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Uptime", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 9 - }, - "id": 18, - "panels": [], - "title": "Query Performance", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "database time consumed by SQL statements per second, which is collected by SQL types, such as SELECT, INSERT, and UPDATE.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "bars", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "database time" - }, - "properties": [ - { - "id": "custom.drawStyle", - "value": "line" - }, - { - "id": "custom.lineWidth", - "value": 3 - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 10 - }, - "id": 12, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(cluster_name) (rate(tidbcloud_db_query_duration_seconds_sum{cluster_name=\"$Cluster_name\"}[2m]))", - "legendFormat": "database time", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(sql_type) (rate(tidbcloud_db_query_duration_seconds_sum{cluster_name=\"$Cluster_name\"}[2m]))", - "hide": false, - "legendFormat": "{{sql_type}}", - "range": true, - "refId": "B" - } - ], - "title": "Database Time", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The number of SQL statements executed per second in all TiDB instances, which is collected by SQL types, such as SELECT, INSERT, and UPDATE.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "timezone": [ - "" - ], - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "sum by(cluster_name) (rate(tidbcloud_db_queries_total{cluster_name=\"$Cluster_name\"}[2m]))", - "instant": false, - "interval": "", - "legendFormat": "total - QPS", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "exemplar": false, - "expr": "sum by(sql_type) (rate(tidbcloud_db_queries_total{cluster_name=\"$Cluster_name\"}[2m]))", - "hide": false, - "instant": false, - "interval": "", - "legendFormat": "{{sql_type}}", - "range": true, - "refId": "B" - } - ], - "title": "QPS", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The duration from receiving a request from the client to TiDB till TiDB executing the request and returning the result to the client.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 18 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(cluster_name) (rate(tidbcloud_db_query_duration_seconds_sum{cluster_name=\"$Cluster_name\"}[2m])) / sum by(cluster_name) (rate(tidbcloud_db_query_duration_seconds_count{cluster_name=\"$Cluster_name\"}[2m]))", - "legendFormat": "__auto", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(sql_type) (rate(tidbcloud_db_query_duration_seconds_sum{cluster_name=\"$Cluster_name\"}[2m])) / sum by(sql_type) (rate(tidbcloud_db_query_duration_seconds_count{cluster_name=\"$Cluster_name\"}[2m]))", - "hide": false, - "legendFormat": "{{sql_type}}", - "range": true, - "refId": "B" - } - ], - "title": "Average Query Duration", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The duration from receiving a request from the client to TiDB till TiDB executing the request and returning the result to the client.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "histogram_quantile(0.99, sum by(le, cluster_name) (rate(tidbcloud_db_query_duration_seconds_bucket{cluster_name=\"$Cluster_name\"}[2m])))", - "interval": "", - "legendFormat": "All Query P99", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "histogram_quantile(0.99, sum by(le, sql_type) (rate(tidbcloud_db_query_duration_seconds_bucket{cluster_name=\"$Cluster_name\"}[2m])))", - "hide": false, - "interval": "", - "legendFormat": "{{sql_type}}", - "range": true, - "refId": "B" - } - ], - "title": "P99 Query Duration", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The statistics of error types (such as syntax errors and primary key conflicts) according to the SQL statement execution errors per minute on each TiDB instance. It contains the module in which an error occurs and the error code.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 26 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(type) (rate(tidbcloud_db_failed_queries_total{cluster_name=\"$Cluster_name\"}[2m]))", - "interval": "", - "legendFormat": "{{type}}", - "range": true, - "refId": "A" - } - ], - "title": "Failed Queries", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Current number of connections in your TiDB server\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 26 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_db_connections{cluster_name=\"$Cluster_name\"}", - "legendFormat": "{{cluster_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Connections", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 34 - }, - "id": 41, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_disk_read_latency{cluster_name=\"$Cluster_name\"}", - "instant": false, - "interval": "30s", - "legendFormat": "{{exported_instance}} - {{device}}", - "range": true, - "refId": "A" - } - ], - "title": "Disk Read Latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "tikv-2 - nvme1n1" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 34 - }, - "id": 40, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_disk_write_latency{cluster_name=\"$Cluster_name\"}", - "instant": false, - "interval": "30s", - "legendFormat": "{{exported_instance}} - {{device}}", - "range": true, - "refId": "A" - } - ], - "title": "Disk Write Latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 42 - }, - "id": 42, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_kv_request_duration{cluster_name=\"$Cluster_name\"}", - "instant": false, - "interval": "30s", - "legendFormat": "{{type}}", - "range": true, - "refId": "A" - } - ], - "title": "Average TiDB KV Request Duration", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 50 - }, - "id": 32, - "panels": [], - "title": "ChangeFeed", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 51 - }, - "id": 34, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_changefeed_latency", - "instant": false, - "interval": "30s", - "legendFormat": "{{changefeed_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "rows/s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 51 - }, - "id": 35, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_changefeed_Replica_rows", - "interval": "30s", - "legendFormat": "{{changefeed_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Replica rows Per Sec", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 59 - }, - "id": 44, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_ticdc_owner_resolved_ts_lag", - "instant": false, - "interval": "30s", - "legendFormat": "{{changefeed_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Changefeed Resolved Ts Lag", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [ - { - "options": { - "0": { - "index": 1, - "text": "normal" - }, - "1": { - "index": 2, - "text": "warning" - }, - "2": { - "index": 3, - "text": "failed" - }, - "3": { - "index": 4, - "text": "stopped" - }, - "4": { - "index": 5, - "text": "finished" - }, - "6": { - "index": 6, - "text": "warning" - }, - "7": { - "index": 7, - "text": "other" - }, - "-1": { - "index": 0, - "text": "unknown" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 59 - }, - "id": 45, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "max(tidbcloud_changefeed_status{cluster_name=\"$Cluster_name\"}) by (changefeed_id)", - "instant": false, - "interval": "30s", - "legendFormat": "{{changefeed_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Changefeed Status", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 67 - }, - "id": 20, - "panels": [], - "title": "Server - TiDB", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The statistics of CPU usage of each TiDB instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 68 - }, - "id": 14, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(exported_instance) (rate(tidbcloud_node_cpu_seconds_total{cluster_name=\"$Cluster_name\", component=\"tidb\"}[2m]))", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_cpu_capacity_cores{cluster_name=\"$Cluster_name\", component=\"tidb\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiDB CPU", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The memory usage statistics of each TiDB instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 68 - }, - "id": 22, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_used_bytes{cluster_name=\"$Cluster_name\", component=\"tidb\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_capacity_bytes{cluster_name=\"$Cluster_name\", component=\"tidb\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiDB Memory", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 76 - }, - "id": 27, - "panels": [], - "title": "Server - TiKV", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The statistics of CPU usage of each TiKV instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 77 - }, - "id": 15, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(exported_instance) (rate(tidbcloud_node_cpu_seconds_total{cluster_name=\"$Cluster_name\", component=\"tikv\"}[2m]))", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_cpu_capacity_cores{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiKV CPU", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The memory usage statistics of each TiKV instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 77 - }, - "id": 23, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_used_bytes{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_capacity_bytes{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiKV Memory", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The storage size per TiKV instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 85 - }, - "id": 25, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_storage_used_bytes{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_storage_capacity_bytes{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiKV Storage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The available storage size per TiKV instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 85 - }, - "id": 38, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "tidbcloud_node_storage_available_bytes{cluster_name=\"$Cluster_name\", component=\"tikv\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "TiKV Available Storage", - "type": "timeseries" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 93 - }, - "id": 29, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The statistics of CPU usage of each TiFlash instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percentunit" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 198 - }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "sum by(exported_instance) (rate(tidbcloud_node_cpu_seconds_total{cluster_name=\"$Cluster_name\", component=\"tiflash\"}[2m]))", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_cpu_capacity_cores{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiFlash CPU", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The memory usage statistics of each TiKV instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 198 - }, - "id": 24, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_used_bytes{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_memory_capacity_bytes{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiFlash Memory", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The storage size per TiFlash instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 214 - }, - "id": 30, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_storage_used_bytes{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "tidbcloud_node_storage_capacity_bytes{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "hide": false, - "interval": "", - "legendFormat": "limit-{{exported_instance}}", - "range": true, - "refId": "B" - } - ], - "title": "TiFlash Storage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The storage size per TiFlash instance.\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bits" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 214 - }, - "id": 43, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_node_storage_available_bytes{cluster_name=\"$Cluster_name\", component=\"tiflash\"}", - "interval": "", - "legendFormat": "{{exported_instance}}", - "range": true, - "refId": "A" - } - ], - "title": "TiFlash Available Storage", - "type": "timeseries" - } - ], - "title": "Server - TiFlash", - "type": "row" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 94 - }, - "id": 36, - "panels": [], - "title": "Resource Unit", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "The metrics about the request unit cost for all resource groups.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 95 - }, - "id": 37, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "sum (tidbcloud_resource_manager_resource_unit_read_request_unit{cluster_name=\"$Cluster_name\"}) by (resource_group) + sum (tidbcloud_resource_manager_resource_unit_write_request_unit{cluster_name=\"$Cluster_name\"}) by (resource_group)", - "interval": "", - "legendFormat": "{{resource_group}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "sum (tidbcloud_resource_manager_resource_unit_read_request_unit{cluster_name=\"$Cluster_name\"}) + sum (tidbcloud_resource_manager_resource_unit_write_request_unit{cluster_name=\"$Cluster_name\"}) ", - "hide": false, - "instant": false, - "legendFormat": "total", - "range": true, - "refId": "B" - } - ], - "title": "Resource Group RU", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 103 - }, - "id": 47, - "panels": [], - "title": "DM", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "DM Relay Read Error Count", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 104 - }, - "id": 48, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_dm_relay_read_error_count{cluster_name=\"$Cluster_name\"}", - "interval": "", - "legendFormat": "{{cluster_name}} ---> {{job}}", - "range": true, - "refId": "A" - } - ], - "title": "DM Relay Read Error Count", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "DM Syncer Replication Lag Bucket", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 104 - }, - "id": 49, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_dm_syncer_replication_lag_bucket{cluster_name=\"$Cluster_name\"}", - "interval": "", - "legendFormat": "{{cluster_name}} ---> {{job}} ---> {{task}}", - "range": true, - "refId": "A" - } - ], - "title": "DM Syncer Replication Lag Bucket", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "DM Syncer Replication Lag Gauge", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 112 - }, - "id": 50, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_dm_syncer_replication_lag_gauge{cluster_name=\"$Cluster_name\"}", - "interval": "", - "legendFormat": "{{cluster_name}} ---> {{job}} ---> {{task}}", - "range": true, - "refId": "A" - } - ], - "title": "DM Syncer Replication Lag Gauge", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "DM Task State", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [ - { - "options": { - "0": { - "index": 0, - "text": "Invalid" - }, - "1": { - "index": 1, - "text": "New" - }, - "2": { - "index": 2, - "text": "Running" - }, - "3": { - "index": 3, - "text": "Paused" - }, - "4": { - "index": 4, - "text": "Stopped" - }, - "5": { - "index": 5, - "text": "Finished" - }, - "15": { - "index": 6, - "text": "Error" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 112 - }, - "id": 51, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "12.0.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "tidbcloud_dm_task_status{cluster_name=\"$Cluster_name\"}", - "interval": "", - "legendFormat": "{{cluster_name}} ---> {{job}} ---> {{task}}", - "range": true, - "refId": "A" - } - ], - "title": "DM Task State", - "type": "timeseries" - } - ], - "preload": false, - "refresh": "30s", - "schemaVersion": 41, - "tags": [], - "templating": { - "list": [ - { - - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "tidbcloud_db_connections", - "includeAll": false, - "name": "Cluster_name", - "options": [], - "query": { - "query": "tidbcloud_db_connections", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "/.*cluster_name=\"([^\"]*).*/", - "type": "query" - } - ] - }, - "time": { - "from": "now-30m", - "to": "now" - }, - "timepicker": { - "nowDelay": "" - }, - "timezone": "", - "title": "TiDB Cloud Overview - Cluster", - "uid": "lnHrQHp4u", - "version": 7 -} diff --git a/tidb-cloud/monitor-prometheus-and-grafana-integration.md b/tidb-cloud/monitor-prometheus-and-grafana-integration.md index fbb92288f1499..3edb7b28543d7 100644 --- a/tidb-cloud/monitor-prometheus-and-grafana-integration.md +++ b/tidb-cloud/monitor-prometheus-and-grafana-integration.md @@ -1,44 +1,69 @@ --- -title: Integrate TiDB Cloud with Prometheus and Grafana (Beta) +title: Integrate TiDB Cloud with Prometheus and Grafana summary: Learn how to monitor your TiDB cluster with the Prometheus and Grafana integration. --- -# Integrate TiDB Cloud with Prometheus and Grafana (Beta) +# Integrate TiDB Cloud with Prometheus and Grafana -TiDB Cloud provides a [Prometheus](https://prometheus.io/) API endpoint (beta). If you have a Prometheus service, you can monitor key metrics of TiDB Cloud from the endpoint easily. +TiDB Cloud provides a [Prometheus](https://prometheus.io/) API endpoint. If you have a Prometheus service, you can monitor key metrics of TiDB Cloud from the endpoint easily. This document describes how to configure your Prometheus service to read key metrics from the TiDB Cloud endpoint and how to view the metrics using [Grafana](https://grafana.com/). +## Prometheus integration versions + +TiDB Cloud has supported the project-level Prometheus integration (Beta) since March 15, 2022. Starting from October 21, 2025, TiDB Cloud introduces the cluster-level Prometheus integration (Preview). Starting from December 2, 2025, the cluster-level Prometheus integration becomes generally available (GA). + +- **Cluster-level Prometheus integration**: if no legacy project-level Prometheus integration remains undeleted within your organization by October 21, 2025, TiDB Cloud provides the cluster-level Prometheus integration for your organization to experience the latest enhancements. + +- **Legacy project-level Prometheus integration (Beta)**: if at least one legacy project-level Prometheus integration remains undeleted within your organization by October 21, 2025, TiDB Cloud retains both existing and new integrations at the project level for your organization to avoid affecting current dashboards. + + > **Note** + > + > The legacy project-level Prometheus integrations will be deprecated on January 9, 2026. If your organization is still using these legacy integrations, follow [Migrate Prometheus Integrations](/tidb-cloud/migrate-prometheus-metrics-integrations.md) to migrate to the new cluster-level integrations and minimize disruptions to your metrics-related services. + ## Prerequisites - To integrate TiDB Cloud with Prometheus, you must have a self-hosted or managed Prometheus service. -- To edit third-party integration settings of TiDB Cloud, you must have the `Organization Owner` access to your organization or `Project Member` access to the target project in TiDB Cloud. +- To set up third-party metrics integration for TiDB Cloud, you must have the `Organization Owner` or `Project Owner` access in TiDB Cloud. To view the integration page, you need at least the `Project Viewer` role to access the target clusters under your project in TiDB Cloud. ## Limitation -- You cannot use the Prometheus and Grafana integration in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. - +- Prometheus and Grafana integrations now are only available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - Prometheus and Grafana integrations are not available when the cluster status is **CREATING**, **RESTORING**, **PAUSED**, or **RESUMING**. ## Steps ### Step 1. Get a scrape_config file for Prometheus -Before configuring your Prometheus service to read metrics of TiDB Cloud, you need to generate a `scrape_config` YAML file in TiDB Cloud first. The `scrape_config` file contains a unique bearer token that allows the Prometheus service to monitor any database clusters in the current project. +Before configuring your Prometheus service to read metrics of TiDB Cloud, you need to generate a `scrape_config` YAML file in TiDB Cloud first. The `scrape_config` file contains a unique bearer token that allows the Prometheus service to monitor your target clusters. + +Depending on your [Prometheus integration version](#prometheus-integration-versions), the steps to get the `scrape_config` file for Prometheus and access the integration page are different. + + +
+ +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. In the left navigation pane, click **Settings** > **Integrations**. +3. On the **Integrations** page, click **Integration to Prometheus**. +4. Click **Add File** to generate and show the `scrape_config` file for the current cluster. +5. Make a copy of the `scrape_config` file content for later use. -To get the `scrape_config` file for Prometheus, do the following: +
+
1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. 2. In the left navigation pane, click **Project Settings** > **Integrations**. 3. On the **Integrations** page, click **Integration to Prometheus (BETA)**. 4. Click **Add File** to generate and show the scrape_config file for the current project. - 5. Make a copy of the `scrape_config` file content for later use. - > **Note:** - > - > For security reasons, TiDB Cloud only shows a newly generated `scrape_config` file once. Ensure that you copy the content before closing the file window. If you forget to do so, you need to delete the `scrape_config` file in TiDB Cloud and generate a new one. To delete a `scrape_config` file, select the file, click **...**, and then click **Delete**. +
+
+ +> **Note:** +> +> For security reasons, TiDB Cloud only shows a newly generated `scrape_config` file once. Ensure that you copy the content before closing the file window. If you forget to do so, you need to delete the `scrape_config` file in TiDB Cloud and generate a new one. To delete a `scrape_config` file, select the file, click **...**, and then click **Delete**. ### Step 2. Integrate with Prometheus @@ -54,10 +79,13 @@ To get the `scrape_config` file for Prometheus, do the following: After your Prometheus service is reading metrics from TiDB Cloud, you can use Grafana GUI dashboards to visualize the metrics as follows: -1. Download the Grafana dashboard JSON of TiDB Cloud [here](https://github.com/pingcap/docs/blob/master/tidb-cloud/monitor-prometheus-and-grafana-integration-grafana-dashboard-UI.json). +1. Depending on your [Prometheus integration version](#prometheus-integration-versions), the link to download the Grafana dashboard JSON of TiDB Cloud for Prometheus is different. + + - For cluster-level Prometheus integration, download the Grafana dashboard JSON file [here](https://github.com/pingcap/docs/blob/master/tidb-cloud/monitor-prometheus-and-grafana-integration-tidb-cloud-dynamic-tracker.json). + - For legacy project-level Prometheus integration (Beta), download the Grafana dashboard JSON file [here](https://github.com/pingcap/docs/blob/master/tidb-cloud/monitor-prometheus-and-grafana-integration-grafana-dashboard-UI.json). + +2. [Import this JSON to your own Grafana GUI](https://grafana.com/docs/grafana/v8.5/dashboards/export-import/#import-dashboard) to visualize the metrics. -2. [Import this JSON to your own Grafana GUI](https://grafana.com/docs/grafana/v8.5/dashboards/export-import/#import-dashboard) to visualize the metrics. - > **Note:** > > If you are already using Prometheus and Grafana to monitor TiDB Cloud and want to incorporate the newly available metrics, it is recommended that you create a new dashboard instead of directly updating the JSON of the existing one. @@ -73,7 +101,7 @@ To improve data security, it is a general best practice to periodically rotate ` 1. Follow [Step 1](#step-1-get-a-scrape_config-file-for-prometheus) to create a new `scrape_config` file for Prometheus. 2. Add the content of the new file to your Prometheus configuration file. 3. Once you have confirmed that your Prometheus service is still able to read from TiDB Cloud, remove the content of the old `scrape_config` file from your Prometheus configuration file. -4. On the **Integrations** page of your project, delete the corresponding old `scrape_config` file to block anyone else from using it to read from the TiDB Cloud Prometheus endpoint. +4. On the **Integrations** page of your project or cluster, delete the corresponding old `scrape_config` file to block anyone else from using it to read from the TiDB Cloud Prometheus endpoint. ## Metrics available to Prometheus @@ -98,12 +126,21 @@ Prometheus tracks the following metric data for your TiDB clusters. | tidbcloud_disk_read_latency | histogram | instance: `tidb-0\|tidb-1\|...`
component: `tikv\|tiflash`
cluster_name: ``
`device`: `nvme.*\|dm.*` | The read latency in seconds per storage device | | tidbcloud_disk_write_latency | histogram | instance: `tidb-0\|tidb-1\|...`
component: `tikv\|tiflash`
cluster_name: ``
`device`: `nvme.*\|dm.*` | The write latency in seconds per storage device | | tidbcloud_kv_request_duration | histogram | instance: `tidb-0\|tidb-1\|...`
component: `tikv`
cluster_name: ``
`type`: `BatchGet\|Commit\|Prewrite\|...` | The duration in seconds of TiKV requests by type | -| tidbcloud_component_uptime | histogram | instance: `tidb-0\|tidb-1\|...`
component: `tidb\|tikv\|pd\|...`
cluster_name: `` | The uptime in seconds of TiDB components | +| tidbcloud_component_uptime | histogram | instance: `tidb-0\|tidb-1\|...`
component: `tidb\|tikv\|tiflash`
cluster_name: `` | The uptime in seconds of TiDB components | | tidbcloud_ticdc_owner_resolved_ts_lag | gauge | changefeed_id: ``
cluster_name: `` | The resolved timestamp lag in seconds for changefeed owner | | tidbcloud_changefeed_status | gauge | changefeed_id: ``
cluster_name: `` | Changefeed status:
`-1`: Unknown
`0`: Normal
`1`: Warning
`2`: Failed
`3`: Stopped
`4`: Finished
`6`: Warning
`7`: Other | | tidbcloud_resource_manager_resource_unit_read_request_unit | gauge | cluster_name: ``
resource_group: `` | The read request units consumed by Resource Manager | | tidbcloud_resource_manager_resource_unit_write_request_unit | gauge | cluster_name: ``
resource_group: `` | The write request units consumed by Resource Manager | +For cluster-level Prometheus integration, the following additional metrics are also available: + +| Metric name | Metric type | Labels | Description | +|:--- |:--- |:--- |:--- | +| tidbcloud_dm_task_status | gauge | instance: `instance`
task: `task`
cluster_name: `` | Task state of Data Migration:
0: Invalid
1: New
2: Running
3: Paused
4: Stopped
5: Finished
15: Error | +| tidbcloud_dm_syncer_replication_lag_bucket | gauge | instance: `instance`
cluster_name: `` | Replicate lag (bucket) of Data Migration. | +| tidbcloud_dm_syncer_replication_lag_gauge | gauge | instance: `instance`
task: `task`
cluster_name: `` | Replicate lag (gauge) of Data Migration. | +| tidbcloud_dm_relay_read_error_count | count | instance: `instance`
cluster_name: `` | The number of failed attempts to read binlog from the master. | + ## FAQ - Why does the same metric have different values on Grafana and the TiDB Cloud console at the same time? diff --git a/tidb-cloud/monitor-tidb-cluster.md b/tidb-cloud/monitor-tidb-cluster.md index aa941f6165acf..0dc5499ab4a82 100644 --- a/tidb-cloud/monitor-tidb-cluster.md +++ b/tidb-cloud/monitor-tidb-cluster.md @@ -32,6 +32,8 @@ You can see the current status of each running cluster on the cluster page. > > The TiDB node status is only available for TiDB Cloud Dedicated clusters. +The node names starting with `tidb` are TiDB nodes, and those starting with `tiproxy` are TiProxy nodes. + | TiDB node status | Description | |:--|:--| | **Available** | The TiDB node is healthy and available. | diff --git a/tidb-cloud/monitoring-concepts.md b/tidb-cloud/monitoring-concepts.md index 89a95e0da6e5f..a456d55fd94cb 100644 --- a/tidb-cloud/monitoring-concepts.md +++ b/tidb-cloud/monitoring-concepts.md @@ -33,16 +33,12 @@ In TiDB Cloud, an event indicates a change in your TiDB Cloud cluster. TiDB Clou For more information, see [TiDB Cloud Cluster Event](/tidb-cloud/tidb-cloud-events.md). -## Third-party metrics integrations (Beta) +## Third-party metrics integrations TiDB Cloud lets you integrate any of the following third-party metrics services to receive TiDB Cloud alerts and view the performance metrics of your TiDB cluster. -- Datadog integration +- [Datadog integration](/tidb-cloud/monitor-datadog-integration.md) -- Prometheus and Grafana integration +- [Prometheus and Grafana integration](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) -- New Relic integration - -Currently, these third-party metrics integrations are in beta. - -For more information, see [Third-Party Metrics Integration (Beta)](/tidb-cloud/third-party-monitoring-integrations.md). \ No newline at end of file +- [New Relic integration](/tidb-cloud/monitor-new-relic-integration.md) diff --git a/tidb-cloud/notifications.md b/tidb-cloud/notifications.md index 6e9328744f359..79f86e757b393 100644 --- a/tidb-cloud/notifications.md +++ b/tidb-cloud/notifications.md @@ -37,13 +37,15 @@ The following table lists the notifications available in TiDB Cloud, along with | Notification | Trigger event | Notification recipient | | --- | --- | --- | -| TiDB Cloud Serverless cluster creation | A [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster is created. | All project members | -| TiDB Cloud Serverless cluster deletion | A TiDB Cloud Serverless cluster is deleted. | All project members | +| {{{ .starter }}} cluster creation | A [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) cluster is created. | All project members | +| {{{ .starter }}} cluster deletion | A {{{ .starter }}} cluster is deleted. | All project members | +| {{{ .essential }}} cluster creation | A [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) cluster is created. | All project members | +| {{{ .essential }}} cluster deletion | A [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) cluster is deleted. | All project members | | TiDB Cloud Dedicated cluster creation | A [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster is created. | All project members | | TiDB Cloud Dedicated cluster deletion | A TiDB Cloud Dedicated cluster is deleted. | All project members | | Organization Budget threshold alert | The organization [budget threshold](/tidb-cloud/tidb-cloud-budget.md) is reached. | `Organization Owner`, `Organization Billing Manager`, and `Organization Billing Viewer` | | Project Budget threshold alert | The project [budget threshold](/tidb-cloud/tidb-cloud-budget.md) is reached. | `Organization Owner`, `Organization Billing Manager`, `Organization Billing Viewer`, and `Project Owner` | -| Serverless cluster spending limit threshold alert | The [spending limit threshold](/tidb-cloud/manage-serverless-spend-limit.md) for TiDB Cloud Serverless clusters in the organization is reached. | `Organization Owner`, `Organization Billing Manager`, `Organization Billing Viewer`, and `Project Owner` | +| Starter cluster spending limit threshold alert | The [spending limit threshold](/tidb-cloud/manage-serverless-spend-limit.md) for {{{ .starter }}} clusters in the organization is reached. | `Organization Owner`, `Organization Billing Manager`, `Organization Billing Viewer`, and `Project Owner` | | Credits update | [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) for the organization are applied, fully used, reclaimed, or expired. | `Organization Owner`, `Organization Billing Manager`, and `Organization Billing Viewer` | | Discount update | [Discounts](/tidb-cloud/tidb-cloud-billing.md#discounts) for the organization are applied, reclaimed, or expired. | `Organization Owner`, `Organization Billing Manager`, and `Organization Billing Viewer` | | Marketplace update | The organization has a subscription or unsubscription through a cloud provider marketplace. | All organization members | diff --git a/tidb-cloud/optimize-resource-allocation.md b/tidb-cloud/optimize-resource-allocation.md new file mode 100644 index 0000000000000..f61503e2c64ff --- /dev/null +++ b/tidb-cloud/optimize-resource-allocation.md @@ -0,0 +1,38 @@ +--- +title: Optimize Resource Allocation for TiDB Cloud Dedicated +summary: Learn about how to optimize your resource allocation for TiDB Cloud Dedicated clusters. +--- + +# Optimize Resource Allocation for TiDB Cloud Dedicated + +As a Hybrid Transactional and Analytical Processing (HTAP) database, [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters can support multiple business applications, each with different quality of service (QoS) requirements. In some cases, you might need to allocate more resources to high-priority applications to maintain acceptable latency levels. + +TiDB Cloud Dedicated offers resource optimization features, including [Resource Control](/tidb-resource-control-ru-groups.md) and the [TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md) feature. These features help you allocate resources efficiently in multi-business scenarios. + +## Use Resource Control + +[Resource Control](/tidb-resource-control-ru-groups.md) lets you divide the storage nodes (TiKV or TiFlash) of a TiDB Cloud Dedicated cluster into multiple logical groups. In systems with mixed workloads, you can assign workloads to separate resource groups to ensure resource isolation and meet QoS requirements. + +If the cluster experiences unexpected SQL performance issues, you can use [SQL bindings](/sql-statements/sql-statement-create-binding.md) or [manage runaway queries](/tidb-resource-control-runaway-queries.md) alongside resource groups to temporarily limit the resource consumption of specific SQL statements. + +By using Resource Control effectively, you can reduce the number of clusters, simplify operations and maintenance, and lower management costs. + +## Use TiDB Node Group + +The [TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md) feature physically groups the computing nodes (TiDB layer) of a TiDB Cloud Dedicated cluster. Each group is configured with a specific number of TiDB nodes, ensuring the physical separation of computing resources between groups. + +You can divide computing nodes into multiple TiDB node groups based on business requirements and assign unique connection endpoints to each group. Your applications connect to the cluster through their respective endpoints, and requests route to the corresponding node group for processing. This ensures that resource overuse in one group does not affect other groups. + +## Choose between Resource Control and TiDB Node Group + +You can use Resource Control, the TiDB Node Group feature, or a combination of both based on your application needs and budget to achieve resource isolation. + +The following table compares the features of Resource Control and TiDB Node Group: + +| Comparison item | Resource Control | TiDB Node Group | +|--------------------------|---------------------------|------------------------| +| Isolation level | TiKV or TiFlash logical layer | TiDB node physical layer | +| Flow control | Controls the flow of user read and write requests based on quotas set for resource groups. | Not supported. | +| Configuration method | Configured using SQL statements | Configured through the TiDB Cloud console | +| Distinguishing workloads | Supports binding resources at the following levels:
  • User level.
  • Session level (set the resource group per session).
  • Statement level (set the resource group per statement).
| Provides different connection endpoints for different workloads. | +| Cost | No extra cost | Cost associated with adding TiDB nodes, but no extra cost for creating TiDB node groups. | diff --git a/tidb-cloud/pause-or-resume-tidb-cluster.md b/tidb-cloud/pause-or-resume-tidb-cluster.md index 9d964d35fede7..75848db442ac4 100644 --- a/tidb-cloud/pause-or-resume-tidb-cluster.md +++ b/tidb-cloud/pause-or-resume-tidb-cluster.md @@ -13,7 +13,7 @@ Comparing with backup and restore, pausing and resuming a cluster takes less tim > **Note:** > -> You cannot pause a [TiDB Cloud Serverless cluster](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +> You cannot pause a [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) or [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) cluster. ## Limitations diff --git a/tidb-cloud/premium/_index.md b/tidb-cloud/premium/_index.md new file mode 100644 index 0000000000000..c9a80a48a4cfd --- /dev/null +++ b/tidb-cloud/premium/_index.md @@ -0,0 +1,138 @@ +--- +title: TiDB Cloud Documentation +hide_sidebar: true +hide_commit: true +summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings everything great about TiDB to your cloud. It offers guides, samples, and references for learning, trying, developing, maintaining, migrating, monitoring, tuning, securing, billing, integrating, and referencing. +--- + + + + + +[Why TiDB Cloud](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-intro/?plan=premium) + +[Key Concepts](https://docs-preview.pingcap.com/tidbcloud/key-concepts/?plan=premium) + +[FAQ](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-faq/?plan=premium) + + + + + +[Try Out TiDB Cloud](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-quickstart/?plan=premium) + +[Try Out TiDB + AI](https://docs-preview.pingcap.com/tidbcloud/vector-search-get-started-using-python/?plan=premium) + +[Try Out HTAP](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-htap-quickstart/?plan=premium) + +[Try Out TiDB Cloud CLI](https://docs-preview.pingcap.com/tidbcloud/get-started-with-cli/?plan=premium) + + + + + +[Developer Guide Overview](https://docs-preview.pingcap.com/tidbcloud/dev-guide-overview/?plan=premium) + +[Quick Start](https://docs-preview.pingcap.com/tidbcloud/dev-guide-build-cluster-in-cloud/?plan=premium) + +[Example Application](https://docs-preview.pingcap.com/tidbcloud/dev-guide-sample-application-spring-boot/?plan=premium) + + + + + +[Create a TiDB Instance](https://docs-preview.pingcap.com/tidbcloud/create-tidb-instance-premium/?plan=premium) + +[Connect to a TiDB Instance](https://docs-preview.pingcap.com/tidbcloud/connect-to-tidb-instance/?plan=premium) + +[Use an HTAP Cluster](https://docs-preview.pingcap.com/tidbcloud/tiflash-overview/?plan=premium) + +[Back Up and Restore Data](https://docs-preview.pingcap.com/tidbcloud/backup-and-restore-premium/?plan=premium) + +[Use API (Beta)](https://docs-preview.pingcap.com/tidbcloud/api-overview/?plan=premium) + +[Use TiDB Cloud CLI](https://docs-preview.pingcap.com/tidbcloud/get-started-with-cli/?plan=premium) + + + + + +[From Amazon RDS for Oracle](https://docs-preview.pingcap.com/tidbcloud/migrate-from-oracle-using-aws-dms/?plan=premium) + +[Import Sample Data](https://docs-preview.pingcap.com/tidbcloud/import-sample-data-serverless/?plan=premium) + +[Import CSV Files](https://docs-preview.pingcap.com/tidbcloud/import-csv-files-premium/?plan=premium) + +[Import Parquet Files](https://docs-preview.pingcap.com/tidbcloud/import-parquet-files-serverless/?plan=premium) + +[With MySQL CLI](https://docs-preview.pingcap.com/tidbcloud/import-with-mysql-cli-premium/?plan=premium) + + + + + +[Status and Metrics](https://docs-preview.pingcap.com/tidbcloud/monitor-tidb-cluster/?plan=premium) + +[Built-in Monitoring](https://docs-preview.pingcap.com/tidbcloud/built-in-monitoring-premium/?plan=premium) + + + + + +[Tuning Overview](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-tune-performance-overview/?plan=premium) + +[Analyze Performance](https://docs-preview.pingcap.com/tidbcloud/tune-performance/?plan=premium) + +[Tune SQL Performance](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-sql-tuning-overview/?plan=premium) + +[Tune TiFlash Performance](https://docs-preview.pingcap.com/tidbcloud/tune-tiflash-performance/?plan=premium) + + + + + +[Password Authentication](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-password-authentication/?plan=premium) + +[User Roles](https://docs-preview.pingcap.com/tidbcloud/manage-user-access-premium/?plan=premium/#user-roles) + +[Manage User Profiles](https://docs-preview.pingcap.com/tidbcloud/manage-user-access-premium/?plan=premium/#manage-user-profiles) + +[Manage Organization Access](https://docs-preview.pingcap.com/tidbcloud/manage-user-access-premium/?plan=premium/#manage-organization-access) + +[Configure an IP Access List](https://docs-preview.pingcap.com/tidbcloud/configure-ip-access-list-premium/?plan=premium) + + + + + +[Pricing](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-billing/?plan=premium/#pricing-for-premium) + +[Invoices](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-billing/?plan=premium/#invoices) + +[Credits](https://docs-preview.pingcap.com/tidbcloud/tidb-cloud-billing/?plan=premium/#credits) + + + + + +[Airbyte](https://docs-preview.pingcap.com/tidbcloud/integrate-tidbcloud-with-airbyte/?plan=premium) + +[Zapier](https://docs-preview.pingcap.com/tidbcloud/integrate-tidbcloud-with-zapier/?plan=premium) + +[Vercel](https://docs-preview.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel/?plan=premium) + +[Terraform](https://docs-preview.pingcap.com/tidbcloud/terraform-tidbcloud-provider-overview/?plan=premium) + +[Amazon AppFlow](https://docs-preview.pingcap.com/tidbcloud/dev-guide-aws-appflow-integration/?plan=premium) + + + + + +[SQL Reference](https://docs-preview.pingcap.com/tidbcloud/basic-sql-operations/?plan=premium) + +[System Variables](https://docs-preview.pingcap.com/tidbcloud/system-variables/?plan=premium) + + + + diff --git a/tidb-cloud/premium/backup-and-restore-premium.md b/tidb-cloud/premium/backup-and-restore-premium.md new file mode 100644 index 0000000000000..392acfacbd4c0 --- /dev/null +++ b/tidb-cloud/premium/backup-and-restore-premium.md @@ -0,0 +1,309 @@ +--- +title: Back Up and Restore {{{ .premium }}} Data +summary: Learn how to back up and restore your {{{ .premium }}} instances. +aliases: ['/tidbcloud/restore-deleted-tidb-cluster'] +--- + +# Back Up and Restore {{{ .premium }}} Data + +This document describes how to back up and restore your data on {{{ .premium }}} instances. {{{ .premium }}} supports automatic backup and lets you restore backup data to a new instance as needed. + +Backup files can originate from the following sources: + +- Active {{{ .premium }}} instances +- The Recycle Bin for backups from deleted Premium instances + +> **Tip:** +> +> - To learn how to back up and restore data on {{{ .dedicated }}} clusters, see [Back Up and Restore {{{ .dedicated }}} Data](/tidb-cloud/backup-and-restore.md). +> - To learn how to back up and restore data on {{{ .starter }}} or {{{ .essential }}} clusters, see [Back Up and Restore {{{ .starter }}} or Essential Data](/tidb-cloud/backup-and-restore-serverless.md). + +## View the Backup page + +1. On the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, click the name of your target instance to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + +2. In the left navigation pane, click **Data** > **Backup**. + +## Automatic backups + +{{{ .premium }}} provides enhanced automatic backup capabilities for production environments. It combines high-frequency snapshots with log backups to ensure data reliability. + +### Automatic backup policies + +{{{ .premium }}} instances use a multi-layer backup architecture to protect your data, as described in the following table: + +| Backup type | Retention period | Restore granularity | +| --- | --- | --- | +| **Point-in-time recovery (PITR)** | 7 days | Restore to any specific point in time within the 7-day window. | +| **Hourly snapshot** | 7 days | Restore from any hourly snapshot generated within the last 7 days. | +| **Daily snapshot** | 33 days | Restore from any daily snapshot generated within the last 33 days. By default, daily snapshots are captured at 00:00 UTC. | + +### Backup execution rules + +- **Backup cycle**: {{{ .premium }}} instances perform both hourly and daily automatic backups. + +- **Backup schedule**: + + - Hourly backups run at the start of every hour. + - Daily backups run at 00:00 UTC each day. + - Currently, you cannot customize or manage backup schedules. + +- **Retention behavior**: backups expire automatically when they exceed their retention period (7 days or 33 days) and cannot be restored. + +> **Note:** +> +> - Automatic backup storage costs depend on the backup data volume and the retention period. +> - To extend the backup retention period beyond the default limits, contact [TiDB Cloud Support](https://docs.pingcap.com/tidbcloud/tidb-cloud-support). + +### Delete backup files + +To delete an existing backup file for your {{{ .premium }}} instance, perform the following steps: + +1. Navigate to the [**Backup**](#view-the-backup-page) page of your instance. + +2. Locate the corresponding backup file you want to delete, and click **...** > **Delete** in the **Action** column. + +## Restore + +TiDB Cloud provides restore functionality to help recover data in case of accidental loss or corruption. You can restore from backups of active instances or from deleted instances in the Recycle Bin. + +### Restore mode + +TiDB Cloud supports snapshot restore and point-in-time restore for your instance. + +- **Snapshot Restore**: restores your instance from a specific backup snapshot. + +- **Point-in-Time Restore**: restores your instance to a specific point in time. + + - Premium instances: can be restored to any time within the last 33 days, but not earlier than the instance creation time or later than one minute before the current time. + +### Restore destination + +TiDB Cloud supports restoring data to a new instance. + +### Restore to a new instance + +To restore your data to a new instance, take the following steps: + +1. Navigate to the [**Backup**](#view-the-backup-page) page of your instance. + +2. Click **Restore**. + +3. On the **Select Backup** page, choose the **Restore Mode** you want to use. You can restore from a specific backup snapshot or restore to a specific point in time. + + +
+ + To restore from a selected backup snapshot, take the following steps: + + 1. Click **Snapshot Restore**. + 2. Select the backup snapshot you want to restore from. + +
+
+ + To restore to a specific point in time for a Premium instance, take the following steps: + + 1. Click **Point-in-Time Restore**. + 2. Select the date and time you want to restore to. + +
+
+ +4. Click **Next** to proceed to the **Restore to a New Instance** page. + +5. Configure your new TiDB instance for restoration. The steps are the same as [creating a new TiDB instance](/tidb-cloud/premium/create-tidb-instance-premium.md). + + > **Note:** + > + > The new instance uses the same cloud provider and region as the backup by default. + +6. Click **Restore** to start the restore process. + + When the restore process starts, the instance status first changes to **Creating**. After the creation is complete, it changes to **Restoring**. The instance remains unavailable until the restore finishes and the status changes to **Available**. + +### Restore from Recycle Bin + +To restore a deleted instance from the Recycle Bin, take the following steps: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com), and then navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. In the top-right corner, click **Recycle Bin**. + +2. On the **Recycle Bin** page, locate the TiDB instance you want to restore: + + - Click the **>** button to expand instance details. + - Find the desired backup, click **...** in the **Action** column, and then select **Restore**. + +3. On the **Restore** page, follow the same steps as [Restore to a new instance](#restore-to-a-new-instance) to restore the backup to a new instance. + +### Restore backups from a different plan type + +Currently, you can only restore backups from a {{{ .dedicated }}} cluster hosted on AWS to a new {{{ .premium }}} instance. + +To restore a backup generated by a {{{ .dedicated }}} cluster, follow these steps: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com), and then navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. In the upper-right corner, click **...**, and then click **Restore from Another Plan**. + +2. On the **Select Backup** page, select the project that contains the target {{{ .dedicated }}} cluster. Select the cluster, select the backup snapshot that you want to restore, and then click **Next**. + + > **Note:** + > + > - Ensure that the cluster that contains the backup snapshot is in either the **Active** or **Deleted** status within the selected project. + > - The snapshot must be located in a region that {{{ .premium }}} supports. If the region is not supported, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) to open a new region for {{{ .premium }}}, or select another backup snapshot. + +3. On the **Restore** page, follow the same steps as [Restore to a new instance](#restore-to-a-new-instance) to restore the backup to a new instance. + +### Restore backups from cloud storage + +{{{ .premium }}} supports restoring backups from cloud storage (such as Amazon S3 and Alibaba Cloud Object Storage Service (OSS)) to a new instance. This feature is compatible with backups generated from {{{ .dedicated }}} clusters or TiDB Self-Managed clusters. + +>**Note:** +> +> - Currently, only backups located in **Amazon S3** and **Alibaba Cloud OSS** are supported for restore. +> - You can restore backups only to a new instance hosted by the same cloud provider as your storage bucket. +> - If the instance and the storage bucket are located in different regions, additional cross-region data transfer fees might apply. + +#### Steps + +Before you begin, ensure that you have an access key and secret key with sufficient permissions to access the backup files. + +To restore backups from cloud storage, do the following: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com), and then navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. In the upper-right corner, click **...** , and then click **Restore from Cloud Storage**. + +2. On the **Select Backup Storage Location** page, provide the following information: + + - **Cloud Provider**: select the cloud provider where your backup files are stored. + - **Region**: if your cloud provider is Alibaba Cloud OSS, select a region. + - **Backup Files URI**: enter the URI of the top-level folder that contains your backup files. + - **Access Key ID**: enter your access key ID. + - **Access Key Secret**: enter your access key secret. + + > **Tip:** + > + > To create an access key for your storage bucket, see [Configure Amazon S3 access using an AWS access key](#configure-amazon-s3-access-using-an-aws-access-key) and [Configure Alibaba Cloud OSS access](#configure-alibaba-cloud-oss-access). + +3. Click **Verify Backup and Next**. + +4. If the verification is successful, the **Restore to a New Instance** page appears. Review the backup information displayed at the top of the page, and then follow the steps in [Create a {{{ .premium }}} Instance](/tidb-cloud/premium/create-tidb-instance-premium.md) to restore the backup to a new instance. + + If the backup information is incorrect, click **Previous** to return to the previous page, and then enter the correct information. + +5. Click **Restore** to restore the backup. + +## Limitations + +Currently, manual backups are not supported for {{{ .premium }}} instances. + +## References + +This section describes how to configure access for Amazon S3 and Alibaba Cloud OSS. + +### Configure Amazon S3 access using an AWS access key + +It is recommended that you use an IAM user, rather than the AWS account root user, to create an access key. + +Take the following steps to configure an access key: + +1. Create an IAM user and access key. + + 1. Create an IAM user. For more information, see [Create an IAM user in your AWS account](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html#id_users_create_console). + 2. Sign in to the [IAM console](https://console.aws.amazon.com/iam) using your AWS account ID or account alias, and your IAM user name and password. + 3. Create an access key. For more information, see [Manage access keys for IAM users](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). + +2. Grant permissions to the IAM user. + + Create a policy with only the permissions required for your task and attach it to the IAM user. To restore data to a {{{ .premium }}} instance, grant the `s3:GetObject`, `s3:GetBucketLocation`, and `s3:ListBucket` permissions. + + The following is an example policy that allows TiDB Cloud to restore data from a specific folder in your Amazon S3 bucket. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowGetBucketLocation", + "Effect": "Allow", + "Action": "s3:GetBucketLocation", + "Resource": "arn:aws:s3:::" + }, + { + "Sid": "AllowListPrefix", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::", + "Condition": { + "StringLike": { + "s3:prefix": "/*" + } + } + }, + { + "Sid": "AllowReadObjectsInPrefix", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3::://*" + } + ] + } + ``` + + In the preceding policy, replace `` and `` with your actual bucket name and backup directory. This configuration follows the principle of least privilege by limiting access to only the necessary backup files. + +> **Note:** +> +> TiDB Cloud does not store your access keys. To maintain security, [delete the access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) after the import or export task is complete. + +### Configure Alibaba Cloud OSS access + +To grant TiDB Cloud access to your Alibaba Cloud OSS bucket, you need to create an AccessKey pair for the bucket. + +Take the following steps to configure an AccessKey pair: + +1. Create a RAM user and obtain the AccessKey pair. For more information, see [Create a RAM user](https://www.alibabacloud.com/help/en/ram/user-guide/create-a-ram-user). + + In the **Access Mode** section, select **Using permanent AccessKey to access**. + +2. Create a custom policy with the required permissions. For more information, see [Create custom policies](https://www.alibabacloud.com/help/en/ram/user-guide/create-a-custom-policy). + + - In the **Effect** section, select **Allow**. + - In the **Service** section, select **Object Storage Service**. + - In the **Action** section, select the required permissions. To restore a backup to a TiDB Cloud instance, grant the `oss:ListObjects` and `oss:GetObject` permissions. + + > **Tip:** + > + > To enhance security for restore operations, you can restrict access to the specific folder (`oss:Prefix`) where your backup files are stored rather than granting access to the entire bucket. + + The following JSON example shows a policy for a restore task. This policy restricts access to a specific bucket and backup folder. + + ```json + { + "Version": "1", + "Statement": [ + { + "Effect": "Allow", + "Action": "oss:ListObjects", + "Resource": "acs:oss:*:*:", + "Condition": { + "StringLike": { + "oss:Prefix": "/*" + } + } + }, + { + "Effect": "Allow", + "Action": "oss:GetObject", + "Resource": "acs:oss:*:*://*" + } + ] + } + ``` + + - In the **Resource** section, select the bucket and the specific objects in the bucket. + +3. Attach the custom policies to the RAM user. + + For more information, see [Grant permissions to a RAM user](https://www.alibabacloud.com/help/en/ram/user-guide/grant-permissions-to-the-ram-user). diff --git a/tidb-cloud/premium/built-in-monitoring-premium.md b/tidb-cloud/premium/built-in-monitoring-premium.md new file mode 100644 index 0000000000000..b3b9b3269588a --- /dev/null +++ b/tidb-cloud/premium/built-in-monitoring-premium.md @@ -0,0 +1,82 @@ +--- +title: "{{{ .premium }}} Built-in Metrics" +summary: Learn how to view {{{ .premium }}} built-in metrics and understand the meanings of these metrics. +--- + +# {{{ .premium }}} Built-in Metrics + +TiDB Cloud collects and displays a full set of standard metrics of your cluster on the Metrics page. By viewing these metrics, you can easily identify performance issues and determine whether your current database deployment meets your requirements. + +## View the Metrics page + +To view the metrics on the **Metrics** page, take the following steps: + +1. On the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, click the name of your target instance to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and TiDB instances. + +2. In the left navigation pane, click **Monitoring** > **Metrics**. + +## Metrics retention policy + +For TiDB Cloud instances, the metrics data is kept for 7 days. + +## Metrics for {{{ .premium }}} Instances + +The following sections illustrate the metrics on the **Metrics** page for {{{ .premium }}} instances. + +### Overview + +| Metric name | Labels | Description | +| :------------| :------| :-------------------------------------------- | +| Request Units per Second | Total RU per second | The Request Unit (RU) is a unit of measurement used to track the resource consumption of a query or transaction. In addition to queries that you run, request units can be consumed by background activities, so when the QPS is 0, the request units per second might not be zero. | +| Used Storage Size | {type} | The size of the row store and the size of the column store. | +| Query Per Second | All, {SQL type} | The number of SQL statements executed per second in all TiDB instances, which are collected by SQL types, such as `SELECT`, `INSERT`, and `UPDATE`. | +| Query Duration | avg, avg-{SQL type}, 99, 99-{SQL type} | The duration from receiving a request from the client to TiDB until TiDB executes the request and returns the result to the client. | +| Database Time by SQL Types | All, {SQL type} | All: total database time per second.
{SQL type}: database time consumed by SQL statements per second, which are collected by SQL types, such as `SELECT`, `INSERT`, and `UPDATE`. | +| Failed Queries | All | The statistics of error types (such as syntax errors and primary key conflicts) according to the SQL statement execution errors per minute. | +| Command Per Second | {type} | The number of commands processed by all TiDB instances per second based on command types. | +| Queries Using Plan Cache OPS | hit, miss | hit: the number of queries using plan cache per second in all TiDB instances.
miss: the number of queries missing plan cache per second in all TiDB instances. | +| Transaction Per Second | {types}-{transaction model} | The number of transactions executed per second. | +| Transaction Duration | avg-{transaction model}, 99-{transaction model} | The average or the 99th percentile duration of transactions. | +| Connection Count | All, active connection | All: the number of connections to all TiDB instances.
Active connections: the number of active connections to all TiDB instances. | +| Disconnection Count | {result} | The number of clients disconnected to all TiDB instances. | + +### Database + +| Metric name | Labels | Description | +| :------------| :------| :-------------------------------------------- | +| QPS Per DB | All, {database} | The number of SQL statements executed per second on every database, which are collected by SQL types, such as `SELECT`, `INSERT`, and `UPDATE`. | +| Query Duration Per DB | avg, avg-{database}, 99, 99-{database} | The duration from receiving a request from the client to a database until the database executes the request and returns the result to the client. | +| Failed Query Per DB | All, {database} | The statistics of error types according to the SQL statement execution errors per second on every database. | + +### Advanced + +| Metric name | Labels | Description | +| :------------| :------| :-------------------------------------------- | +| Average Idle Connection Duration | avg-in-txn, avg-not-in-txn | The connection idle duration indicates the duration of a connection being idle.
avg-in-txn: The average connection idle duration when a connection is within a transaction.
avg-not-in-txn: The average connection idle duration when a connection is not within a transaction. | +| Get Token Duration | avg, 99 | The average or the 99th percentile duration consumed in getting tokens of SQL statements. | +| Parse Duration | avg, 99 | The average or the 99th percentile duration consumed in parsing SQL statements. | +| Compile Duration | avg, 99 | The average or the 99th percentile duration consumed in compiling the parsed SQL AST to execution plans. | +| Execute Duration | avg, 99 | The average or the 99th percentile duration consumed in executing execution plans of SQL statements. | +| Average TiDB KV Request Duration | {Request Type} | The average time consumed in executing KV requests in all TiDB instances based on request types, such as `Get`, `Prewrite`, and `Commit`. | +| Average / P99 PD TSO Wait/RPC Duration | wait-avg/99, rpc-avg/99 | Wait: the average or the 99th percentile duration in waiting for PD to return TSO in all TiDB instances.
RPC: the average time or the 99th percentile of duration from sending TSO requests to PD to receiving TSO in all TiDB instances. | + +## FAQ + +**1. Why are some panes empty on this page?** + +If a pane does not provide any metrics, the possible reasons are as follows: + +- The workload of the corresponding cluster does not trigger this metric. For example, the failed query metric is always empty in the case of no failed queries. +- The cluster version is low. You need to upgrade it to the latest version of TiDB to see these metrics. + +If all these reasons are excluded, you can contact the [PingCAP support team](/tidb-cloud/tidb-cloud-support.md) for troubleshooting. + +**2. Why might metrics be discontinuous in rare cases?** + +In some rare cases, metrics might be lost, such as when the metrics system experiences high pressure. + +If you encounter this problem, you can contact [PingCAP Support](/tidb-cloud/tidb-cloud-support.md) for troubleshooting. diff --git a/tidb-cloud/premium/configure-ip-access-list-premium.md b/tidb-cloud/premium/configure-ip-access-list-premium.md new file mode 100644 index 0000000000000..0c5e7621b321d --- /dev/null +++ b/tidb-cloud/premium/configure-ip-access-list-premium.md @@ -0,0 +1,30 @@ +--- +title: Configure an IP Access List for {{{ .premium }}} +summary: Learn how to configure IP addresses that are allowed to access your {{{ .premium }}} instance. +--- + +# Configure an IP Access List for {{{ .premium }}} + +For each {{{ .premium }}} instance in TiDB Cloud, you can configure an IP access list to filter internet traffic trying to access the instance, which works similarly to a firewall access control list. After the configuration, only the clients and applications whose IP addresses are in the IP access list can connect to your {{{ .premium }}} instance. + +> **Note:** +> +> This document applies to **{{{ .premium }}}**. For instructions on configuring an IP access list for **{{{ .starter }}}** or **{{{ .essential }}}**, see [Configure {{{ .starter }}} or Essential Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). + +To configure an IP access list for your {{{ .premium }}} instance, take the following steps: + +1. Navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your target instance to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations. + +2. In the left navigation pane, click **Settings** > **Networking**. +3. On the **Networking** page, click **Enable** for **Public Endpoint** to to make the instance accessible via a public endpoint, and then click **Add IP Address**. +4. In the displayed dialog, choose one of the following options: + + - **Allow access from anywhere**: allows all IP addresses to access TiDB Cloud. This option exposes your instance to the internet completely and is highly risky. + - **Use IP addresses** (recommended): you can add a list of IPs and CIDR addresses that are allowed to access TiDB Cloud via a SQL client. + +5. If you choose **Use IP addresses**, add IP addresses or CIDR ranges with an optional description. +6. Click **Confirm** to save your changes. diff --git a/tidb-cloud/premium/connect-to-premium-via-alibaba-cloud-private-endpoint.md b/tidb-cloud/premium/connect-to-premium-via-alibaba-cloud-private-endpoint.md new file mode 100644 index 0000000000000..258ad804051b5 --- /dev/null +++ b/tidb-cloud/premium/connect-to-premium-via-alibaba-cloud-private-endpoint.md @@ -0,0 +1,88 @@ +--- +title: Connect to {{{ .premium }}} via Alibaba Cloud Private Endpoint +summary: Learn how to connect to your {{{ .premium }}} instance via a private endpoint on Alibaba Cloud. +--- + +# Connect to {{{ .premium }}} via Alibaba Cloud Private Endpoint + +This document describes how to connect to your {{{ .premium }}} instance via a private endpoint on Alibaba Cloud. Connecting through a private endpoint enables secure and private communication between your services and your TiDB instance without using the public internet. + +> **Tip:** +> +> To learn how to connect to a {{{ .premium }}} instance via AWS PrivateLink, see [Connect to {{{ .premium }}} via AWS PrivateLink](/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md). + +## Restrictions + +- Currently, TiDB Premium supports private endpoint connections when the endpoint service is hosted on AWS or Alibaba Cloud. If the service is hosted on another cloud provider, the private endpoint is not applicable. +- Cross-region private endpoint connections are not supported. + +## Set up a private endpoint with Alibaba Cloud + +To connect to your Premium instance via a private endpoint, perform the following steps. + +### Step 1. Choose a TiDB instance + +1. On the [**TiDB Instances**](https://{{{.console-url}}}/instances) page, click the name of your target TiDB instance to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. +3. In the **Connection Type** drop-down list, select **Private Endpoint**. +4. Take a note of **Service Name**, **Availability Zone ID**, and **Region ID**. + +### Step 2. Create a private endpoint on Alibaba Cloud + +To use the Alibaba Cloud Management Console to create a VPC interface endpoint, perform the following steps: + +1. Sign in to the [Alibaba Cloud Management Console](https://account.alibabacloud.com/login/login.htm). +2. Navigate to **VPC** > **Endpoints**. +3. Click the **Interface Endpoints** tab, and then click **Create Endpoint**. +4. Fill in the endpoint details: + - **Region**: select the same region as your TiDB Cloud instance. + - **Endpoint Name**: enter a name for the endpoint. + - **Endpoint Type**: choose **Interface Endpoint**. + - **Endpoint Service**: select **Other Endpoint Services**. +5. In the **Endpoint Service Name** field, paste the service name you copied from TiDB Cloud. +6. Click **Verify**. A green check mark indicates that the service is valid. +7. Choose the **VPC**, **Security Group**, and **Zone** to associate with the endpoint. +8. Click **OK** to create the endpoint. +9. Wait until the endpoint status is **Active** and the connection status is **Connected**. + +After creating the interface endpoint, navigate to the **EndPoints** page and select the newly created endpoint. + +- In the **Basic Information** section, copy the **Endpoint ID**. You will use this value later as the *Endpoint Resource ID*. + +- In the **Domain name of Endpoint Service** section, copy the **Default Domain Name**. You will use this value later as the *Domain Name*. + + ![AliCloud private endpoint Information](/media/tidb-cloud/private-endpoint/alicloud-private-endpoint-info.png) + +### Step 3. Accept the endpoint and create the endpoint connection + +1. Return to the **Create Alibaba Cloud Private Endpoint Connection** dialog in the TiDB Cloud console. + +2. Paste the *Endpoint Resource ID* and *Domain Name* that you copied earlier into the corresponding fields. + +3. Click **Create Private Endpoint Connection** to accept the connection from your private endpoint. + +### Step 4. Connect to your TiDB instance + +After you have accepted the endpoint connection, you are redirected back to the connection dialog. + +1. Wait for the private endpoint connection status to become **Active** (approximately 5 minutes). To check the status, navigate to the **Networking** page by clicking **Settings** > **Networking** in the left navigation pane. + +2. In the **Connect With** drop-down list, select your preferred connection method. The corresponding connection string is displayed at the bottom of the dialog. + +3. Connect to your instance using the connection string. + +## Private endpoint status reference + +To view the statuses of private endpoints or private endpoint services, navigate to the **Networking** page by clicking **Settings** > **Networking** in the left navigation pane. + +The possible statuses of a private endpoint are explained as follows: + +- **Pending**: waiting for processing. +- **Active**: the private endpoint is ready for use. +- **Deleting**: the private endpoint is being deleted. +- **Failed**: the private endpoint creation fails. You can delete the private endpoint and create a new one. + +The possible statuses of a private endpoint service are explained as follows: + +- **Creating**: the endpoint service is being created, which takes 3 to 5 minutes. +- **Active**: the endpoint service is created, no matter whether the private endpoint is created or not. \ No newline at end of file diff --git a/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md b/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md new file mode 100644 index 0000000000000..eaf8775f3e014 --- /dev/null +++ b/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md @@ -0,0 +1,201 @@ +--- +title: Connect to {{{ .premium }}} via AWS PrivateLink +summary: Learn how to connect to your {{{ .premium }}} instance via private endpoint with AWS. +--- + +# Connect to {{{ .premium }}} via AWS PrivateLink + +This document describes how to connect to your {{{ .premium }}} instance via [AWS PrivateLink](https://aws.amazon.com/privatelink). + +> **Tip:** +> +> To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via AWS PrivateLink, see [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + +TiDB Cloud supports highly secure and one-way access to the TiDB Cloud service hosted in an AWS VPC via [AWS PrivateLink](https://aws.amazon.com/privatelink), as if the service were in your own VPC. A private endpoint is exposed in your VPC and you can create a connection to the TiDB Cloud service via the endpoint with permission. + +Powered by AWS PrivateLink, the endpoint connection is secure and private, and does not expose your data to the public internet. In addition, the endpoint connection supports CIDR overlap and is easier for network management. + +The architecture of the private endpoint is as follows: + +![Private endpoint architecture](/media/tidb-cloud/aws-private-endpoint-arch.png) + +For more detailed definitions of the private endpoint and endpoint service, see the following AWS documents: + +- [What is AWS PrivateLink?](https://docs.aws.amazon.com/vpc/latest/privatelink/what-is-privatelink.html) +- [AWS PrivateLink concepts](https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html) + +## Restrictions + +- Only users with the `Organization Owner` role can create private endpoint connections. +- The private endpoint and the TiDB instance you want to connect to must be located in the same region. + +## Prerequisites + +Make sure that DNS hostnames and DNS resolution are both enabled in your AWS VPC settings. They are disabled by default when you create a VPC in the [AWS Management Console](https://console.aws.amazon.com/). + +## Set up a private endpoint connection and connect to your instance + +To connect to your {{{ .premium }}} instance via a private endpoint, follow these steps: + +1. [Select a TiDB instance](#step-1-select-a-tidb-instance) +2. [Create an AWS interface endpoint](#step-2-create-an-aws-interface-endpoint) +3. [Create a private endpoint connection](#step-3-create-a-private-endpoint-connection) +4. [Enable private DNS](#step-4-enable-private-dns) +5. [Connect to your TiDB instance](#step-5-connect-to-your-tidb-instance) + +If you have multiple instances, you need to repeat these steps for each instance that you want to connect to using AWS PrivateLink. + +### Step 1. Select a TiDB instance + +1. On the [**TiDB Instances**](https://tidbcloud.com/tidbs) page of your TiDB Cloud web console, click the name of your target TiDB instance to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. +3. In the **Connection Type** drop-down list, select **Private Endpoint**, and then click **Create Private Endpoint Connection**. + +> **Note:** +> +> If you have already created a private endpoint connection, the active endpoint will appear in the connection dialog. To create additional private endpoint connections, navigate to the **Networking** page by clicking **Settings** > **Networking** in the left navigation pane. + +### Step 2. Create an AWS interface endpoint + +> **Note:** +> +> For each {{{ .premium }}} instance, the corresponding endpoint service is automatically created 3 to 4 minutes after the instance creation. + +If you see the `TiDB Private Link Service is ready` message, the corresponding endpoint service is ready. You can provide the following information to create the endpoint. + +1. Fill in the **Your VPC ID** and **Your Subnet IDs** fields. You can find these IDs from your [AWS Management Console](https://console.aws.amazon.com/). For multiple subnets, enter the IDs separated by spaces. +2. Click **Generate Command** to get the following endpoint creation command. + + ```bash + aws ec2 create-vpc-endpoint --vpc-id ${your_vpc_id} --region ${your_region} --service-name ${your_endpoint_service_name} --vpc-endpoint-type Interface --subnet-ids ${your_application_subnet_ids} + ``` + +Then, you can create an AWS interface endpoint either using the AWS CLI or using the [AWS Management Console](https://aws.amazon.com/console/). + + +
+ +To use the AWS CLI to create a VPC interface endpoint, perform the following steps: + +1. Copy the generated command and run it in your terminal. +2. Record the VPC endpoint ID you just created. + +> **Tip:** +> +> - Before running the command, you need to have AWS CLI installed and configured. See [AWS CLI configuration basics](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) for details. +> +> - If your service is spanning across more than three availability zones (AZs), you will get an error message indicating that the VPC endpoint service does not support the AZ of the subnet. This issue occurs when there is an extra AZ in your selected region in addition to the AZs where your TiDB instance is located. In this case, you can contact [PingCAP Technical Support](https://docs.pingcap.com/tidbcloud/tidb-cloud-support). + +
+
+ +To use the AWS Management Console to create a VPC interface endpoint, perform the following steps: + +1. Sign in to the [AWS Management Console](https://aws.amazon.com/console/) and open the Amazon VPC console at [https://console.aws.amazon.com/vpc/](https://console.aws.amazon.com/vpc/). +2. Click **Endpoints** in the navigation pane, and then click **Create Endpoint** in the upper-right corner. + + The **Create endpoint** page is displayed. + + ![Verify endpoint service](/media/tidb-cloud/private-endpoint/create-endpoint-2.png) + +3. In the **Endpoint settings** area, fill in a name tag if needed, and then select the **Endpoint services that use NLBs and GWLBs** option. +4. In the **Service settings** area, enter the service name `${your_endpoint_service_name}` from the generated command (`--service-name ${your_endpoint_service_name}`). +5. Click **Verify service**. +6. In the **Network settings** area, select your VPC in the drop-down list. +7. In the **Subnets** area, select the availability zones where your TiDB instance is located. + + > **Tip:** + > + > If your service is spanning across more than three availability zones (AZs), you might not be able to select AZs in the **Subnets** area. This issue occurs when there is an extra AZ in your selected region in addition to the AZs where your TiDB instance is located. In this case, contact [PingCAP Technical Support](https://docs.pingcap.com/tidbcloud/tidb-cloud-support). + +8. In the **Security groups** area, select your security group properly. + + > **Note:** + > + > Make sure the selected security group allows inbound access from your EC2 instances on port `4000` or a customer-defined port. + +9. Click **Create endpoint**. + +
+
+ +### Step 3. Create a private endpoint connection + +1. Go back to the TiDB Cloud console. +2. On the **Create AWS Private Endpoint Connection** page, enter your VPC endpoint ID. +3. Click **Create Private Endpoint Connection**. + +> **Tip:** +> +> You can view and manage private endpoint connections on the **Networking** page of your target TiDB instance. To access this page, click **Settings** > **Networking** in the left navigation pane. + +### Step 4. Enable private DNS + +Enable private DNS in AWS. You can either use the AWS CLI or the AWS Management Console. + + +
+ +To enable private DNS using your AWS CLI, copy the following `aws ec2 modify-vpc-endpoint` command from the **Create Private Endpoint Connection** page and run it in your AWS CLI. + +```bash +aws ec2 modify-vpc-endpoint --vpc-endpoint-id ${your_vpc_endpoint_id} --private-dns-enabled +``` + +Alternatively, you can find the command on the **Networking** page of your instance. Locate the private endpoint and click **...** > **Enable DNS** in the **Action** column. + +
+
+ +To enable private DNS in your AWS Management Console: + +1. Go to **VPC** > **Endpoints**. +2. Right-click your endpoint ID and select **Modify private DNS name**. +3. Select the **Enable for this endpoint** check box. +4. Click **Save changes**. + + ![Enable private DNS](/media/tidb-cloud/private-endpoint/enable-private-dns.png) + +
+
+ +### Step 5. Connect to your TiDB instance + +After you have accepted the private endpoint connection, you are redirected back to the connection dialog. + +1. Wait for the private endpoint connection status to change from **System Checking** to **Active** (approximately 5 minutes). +2. In the **Connect With** drop-down list, select your preferred connection method. The corresponding connection string is displayed at the bottom of the dialog. +3. Connect to your instance using the connection string. + +> **Tip:** +> +> If you cannot connect to the instance, the reason might be that the security group of your VPC endpoint in AWS is not properly set. See [this FAQ](#troubleshooting) for solutions. + +### Private endpoint status reference + +When you use private endpoint connections, the statuses of private endpoints and private endpoint services are displayed on the instance-level **Networking** page: + +1. Switch to your target instance using the combo box in the upper-left corner. +2. Click **Settings** > **Networking** in the left navigation pane. + +The possible statuses of a private endpoint are explained as follows: + +- **Not Configured**: The endpoint service is created but the private endpoint is not created yet. +- **Pending**: Waiting for processing. +- **Active**: Your private endpoint is ready to use. You cannot edit a private endpoint in this status. +- **Deleting**: The private endpoint is being deleted. +- **Failed**: The private endpoint creation fails. You can click **Edit** in that row to retry the creation. + +The possible statuses of a private endpoint service are explained as follows: + +- **Creating**: The endpoint service is being created, which takes 3 to 5 minutes. +- **Active**: The endpoint service is created, regardless of whether the private endpoint is created or not. +- **Deleting**: The endpoint service or the instance is being deleted, which takes 3 to 5 minutes. + +## Troubleshooting + +### I cannot connect to a TiDB instance via a private endpoint after enabling private DNS. Why? + +You might need to properly set the security group for your VPC endpoint in the AWS Management Console. To do so, go to **VPC** > **Endpoints**, right-click your VPC endpoint, and select **Manage security groups**. Ensure that the selected security group allows inbound access from your EC2 instances on port `4000` or a customer-defined port. + +![Manage security groups](/media/tidb-cloud/private-endpoint/manage-security-groups.png) diff --git a/tidb-cloud/premium/connect-to-premium-via-public-connection.md b/tidb-cloud/premium/connect-to-premium-via-public-connection.md new file mode 100644 index 0000000000000..f3ee4b024bf66 --- /dev/null +++ b/tidb-cloud/premium/connect-to-premium-via-public-connection.md @@ -0,0 +1,45 @@ +--- +title: Connect to {{{ .premium }}} via Public Connection +summary: Learn how to connect to your {{{ .premium }}} via public connection. +--- + +# Connect to {{{ .premium }}} via Public Connection + +This document describes how to connect to your {{{ .premium }}} instance via public connection. The public connection exposes a public endpoint with traffic filters, so you can connect to your {{{ .premium }}} instance via a SQL client from your laptop. + +> **Tip:** +> +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via public connection, see [Connect to {{{ .starter }}} or Essential via Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md). +> - To learn how to connect to a TiDB Cloud Dedicated cluster via public endpoint, see [Connect to TiDB Cloud Dedicated via Public Connection](/tidb-cloud/connect-via-standard-connection.md). + +## Prerequisite: Configure IP access list + +For public connections, {{{ .premium }}} only allows client connections from addresses in the IP access list. If you have not configured the IP access list, follow the steps in [Configure an IP Access List](/tidb-cloud/premium/configure-ip-access-list-premium.md) to configure it before your first connection. + +## Connect to the instance + +To connect to a {{{ .premium }}} instance via public connection, take the following steps: + +1. Open the overview page of the target instance. + + 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations. + + 2. Click the name of your target instance to go to its overview page. + +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. + +3. In the connection dialog, select **Public** from the **Connection Type** drop-down list. + + If you have not configured the IP access list, click **Configure IP Access List** or follow the steps in [Configure an IP Access List](/tidb-cloud/premium/configure-ip-access-list-premium.md) to configure it before your first connection. + +4. Click **CA cert** to download CA cert for TLS connection to TiDB instances. The CA cert supports TLS 1.2 by default. + +5. Choose your preferred connection method, and then refer to the connection string and sample code on the tab to connect to your instance. + +## What's next + +After you have successfully connected to your TiDB instance, you can [explore SQL statements with TiDB](/basic-sql-operations.md). diff --git a/tidb-cloud/premium/connect-to-tidb-instance.md b/tidb-cloud/premium/connect-to-tidb-instance.md new file mode 100644 index 0000000000000..a15e269c66c94 --- /dev/null +++ b/tidb-cloud/premium/connect-to-tidb-instance.md @@ -0,0 +1,46 @@ +--- +title: Connect to Your {{{ .premium }}} Instance +summary: Learn how to connect to your {{{ .premium }}} instance via different methods. +--- + +# Connect to Your {{{ .premium }}} Instance + +This document describes how to connect to your {{{ .premium }}} instance. + +> **Tip:** +> +> To learn how to connect to a TiDB Cloud Dedicated cluster, see [Connect to Your TiDB Cloud Dedicated Cluster](/tidb-cloud/connect-to-tidb-cluster.md). + +## Connection methods + +After your {{{ .premium }}} instance is created on TiDB Cloud, you can connect to it via direct connections. + +Direct connections mean the MySQL native connection system over TCP. You can connect to your instance using any tool that supports MySQL connection, such as [MySQL client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html). + +| Connection method | User interface | Scenario | +|--------------------|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Direct connections | SQL/ORM | Long-running environment, such as Java, Node.js, and Python. | + +## Network + +There are two network connection types for {{{ .premium }}}: + +- [Private endpoint](/tidb-cloud/premium/connect-to-premium-via-aws-private-endpoint.md) (recommended) + + Private endpoint connection provides a private endpoint to allow SQL clients in your VPC to securely access services over AWS PrivateLink, which provides highly secure and one-way access to database services with simplified network management. + +- [Public endpoint](/tidb-cloud/premium/connect-to-premium-via-public-connection.md) + + The standard connection exposes a public endpoint, so you can connect to your TiDB instance via a SQL client from your laptop. + + + +The following table shows the network you can use: + +| Connection method | Network | Description | +|----------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------| +| Direct connections | Public or private endpoint | Direct connections can be made via both public and private endpoints. | + +## What's next + +After you have successfully connected to your TiDB instance, you can [explore SQL statements with TiDB](/basic-sql-operations.md). diff --git a/tidb-cloud/premium/create-tidb-instance-premium.md b/tidb-cloud/premium/create-tidb-instance-premium.md new file mode 100644 index 0000000000000..4e555836ff4ae --- /dev/null +++ b/tidb-cloud/premium/create-tidb-instance-premium.md @@ -0,0 +1,71 @@ +--- +title: Create a {{{ .premium }}} Instance +summary: Learn how to create a {{{ .premium }}} instance. +--- + +# Create a {{{ .premium }}} Instance + +This document describes how to create a {{{ .premium }}} instance in the [TiDB Cloud console](https://tidbcloud.com/). + +> **Note:** +> +> - Currently, {{{ .premium }}} is only available upon request. To request {{{ .premium }}}, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for {{{ .premium }}}" in the **Description** in the **Description** field, and then click **Submit**. +> - To learn how to create a TiDB Cloud Dedicated cluster, see [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md). + +## Before you begin + +If you do not have a TiDB Cloud account, click [here](https://tidbcloud.com/signup) to sign up for an account. + + + +- You can either sign up with email and password so that you can manage your password using TiDB Cloud, or sign up with your Google, GitHub, or Microsoft account. +- For AWS Marketplace users, you can also sign up through AWS Marketplace. To do that, search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Azure Marketplace users, you can also sign up through Azure Marketplace. To do that, search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Google Cloud Marketplace users, you can also sign up through Google Cloud Marketplace. To do that, search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Alibaba Cloud Marketplace users, you can also sign up through Alibaba Cloud Marketplace. To do that, search for `TiDB Cloud` in [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + + + + + +- You can either sign up with email and password so that you can manage your password using TiDB Cloud, or sign up with your Google, GitHub, or Microsoft account. +- For AWS Marketplace users, you can also sign up through AWS Marketplace. To do that, search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Azure Marketplace users, you can also sign up through Azure Marketplace. To do that, search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- For Google Cloud Marketplace users, you can also sign up through Google Cloud Marketplace. To do that, search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + + + +## Steps + +If you have the `Organization Owner` role, you can create a {{{ .premium }}} instance as follows: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/), and then click **Switch to Private Preview** in the lower-left corner to open the **TiDB Instances** page for {{{ .premium }}}. + + > **Note:** + > + > If **Switch to Private Preview** is not visible in the lower-left corner of your TiDB Cloud console, it means that your organization has not been invited to the private preview of {{{ .premium }}}. In this case, you can click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for the private preview of {{{ .premium }}}" in the **Description** field, and then click **Submit**. + +2. On the **TiDB Instances** page, click **Create Instance**. +3. Enter a name for your {{{ .premium }}} instance. +4. Choose a cloud provider and a region where you want to host your instance. +5. In the **Capacity** area, set the maximum number of the Request Capacity Units (RCUs) for your instance. + + RCUs represent the compute resources provisioned for your workload. TiDB Cloud automatically scales your instance within this range based on demand. + + > **Note:** + > + > You will be billed for the **Minimum Billing RCU** shown below the maximum number of RCUs, even if your actual usage is lower. The maximum RCU value must be set in increments of 100. + +6. For {{{ .premium }}} instances, only regional high availability is enabled, and it is not configurable. For more information, see [High Availability](/tidb-cloud/serverless-high-availability.md). + +7. Click **Create**. + + The instance creation process begins. If this is your first instance in the selected region, provisioning typically takes about 30 minutes. If the selected region already has existing instances, the process is faster and usually completes within about 1 minute. + +## What's next + +After your instance is created, follow the instructions in [Connect to TiDB Cloud via Public Endpoint](/tidb-cloud/premium/connect-to-premium-via-public-connection.md) to create a password for your instance. + +> **Note:** +> +> If you do not set a password, you cannot connect to the instance. diff --git a/tidb-cloud/premium/delete-tidb-instance.md b/tidb-cloud/premium/delete-tidb-instance.md new file mode 100644 index 0000000000000..882966fd21e52 --- /dev/null +++ b/tidb-cloud/premium/delete-tidb-instance.md @@ -0,0 +1,36 @@ +--- +title: Delete a {{{ .premium }}} instance +summary: Learn how to delete a {{{ .premium }}} instance. +--- + +# Delete a {{{ .premium }}} instance + +This document describes how to delete a {{{ .premium }}} instance. + +You can delete an instance at any time by performing the following steps: + +1. Navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. +2. In the row of your target instance to be deleted, click **...**. + + > **Tip:** + > + > Alternatively, you can also click the name of the target instance to go to its overview page, and then click **...** in the upper-right corner. + +3. Click **Delete** in the drop-down menu. +4. In the deletion confirmation window, confirm the deletion: + + Enter `/` to ensure the instance is deleted correctly. + + If you want to restore the instance some time in the future, make sure that you have a backup of the instance. Otherwise, you cannot restore it. For more information about how to back up {{{ .premium }}} instances, see [Back Up and Restore {{{ .premium }}} Data](/tidb-cloud/premium/backup-and-restore-premium.md). + +5. Click **I understand, delete it**. + + Once you delete a backed up {{{ .premium }}} instance, the existing backup files of the instance are moved to the recycle bin. + + Automatic backups will expire and be automatically deleted once the retention period ends. The default retention period is 7 days if you don't modify it. + + > **Note:** + > + > Please be aware that backups will continue to incur charges until deleted. + + If you want to restore a {{{ .premium }}} instance from the recycle bin, see [Restore from Recycle Bin](/tidb-cloud/premium/backup-and-restore-premium.md#restore-from-recycle-bin). diff --git a/tidb-cloud/premium/import-csv-files-premium.md b/tidb-cloud/premium/import-csv-files-premium.md new file mode 100644 index 0000000000000..e17a169407dae --- /dev/null +++ b/tidb-cloud/premium/import-csv-files-premium.md @@ -0,0 +1,224 @@ +--- +title: Import CSV Files from Cloud Storage into {{{ .premium }}} +summary: Learn how to import CSV files from Amazon S3 or Alibaba Cloud Object Storage Service (OSS) into {{{ .premium }}} instances. +--- + +# Import CSV Files from Cloud Storage into {{{ .premium }}} + +This document describes how to import CSV files from Amazon Simple Storage Service (Amazon S3) or Alibaba Cloud Object Storage Service (OSS) into {{{ .premium }}} instances. + +> **Warning:** +> +> {{{ .premium }}} is currently available in **private preview** in select AWS regions. +> +> If Premium is not yet enabled for your organization, or if you need access in another cloud provider or region, click **Support** in the lower-left corner of the [TiDB Cloud console](https://tidbcloud.com/), or submit a request through the [Contact Us](https://www.pingcap.com/contact-us) form on the website. + +> **Tip:** +> +> - For {{{ .starter }}} or Essential, see [Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-csv-files-serverless.md). +> - For {{{ .dedicated }}}, see [Import CSV Files from Cloud Storage into {{{ .dedicated }}}](/tidb-cloud/import-csv-files.md). + +## Limitations + +To ensure data consistency, {{{ .premium }}} allows importing CSV files into empty tables only. To import data into an existing table that already contains data, you can import the data into a temporary empty table by following this document, and then use the `INSERT SELECT` statement to copy the data to the target existing table. + +## Step 1. Prepare the CSV files + +1. If a CSV file is larger than 256 MiB, consider splitting it into smaller files, each with a size around 256 MiB. + + {{{ .premium }}} supports importing very large CSV files but performs best with multiple input files around 256 MiB in size. This is because {{{ .premium }}} can process multiple files in parallel, which can greatly improve the import speed. + +2. Name the CSV files as follows: + + - If a CSV file contains all data of an entire table, name the file in the `${db_name}.${table_name}.csv` format, which maps to the `${db_name}.${table_name}` table when you import the data. + - If the data of one table is separated into multiple CSV files, append a numeric suffix to these CSV files. For example, `${db_name}.${table_name}.000001.csv` and `${db_name}.${table_name}.000002.csv`. The numeric suffixes can be non-consecutive but must be in ascending order. You also need to add extra zeros before the number to ensure that all suffixes have the same length. + - {{{ .premium }}} supports importing compressed files in the following formats: `.gzip`, `.gz`, `.zstd`, `.zst` and `.snappy`. If you want to import compressed CSV files, name the files in the `${db_name}.${table_name}.${suffix}.csv.${compress}` format, where `${suffix}` is optional and can be any integer such as '000001'. For example, if you want to import the `trips.000001.csv.gz` file to the `bikeshare.trips` table, you need to rename the file as `bikeshare.trips.000001.csv.gz`. + + > **Note:** + > + > - To achieve better performance, it is recommended to limit the size of each compressed file to 100 MiB. + > - The Snappy compressed file must be in the [official Snappy format](https://github.com/google/snappy). Other variants of Snappy compression are not supported. + > - For uncompressed files, if you cannot update the CSV filenames according to the preceding rules in some cases (for example, the CSV file links are also used by your other programs), you can keep the filenames unchanged and use the **Mapping Settings** in [Step 4](#step-4-import-csv-files) to import your source data to a single target table. + +## Step 2. Create the target table schemas + +Because CSV files do not contain schema information, before importing data from CSV files into {{{ .premium }}}, you need to create the table schemas using either of the following methods: + +- Method 1: In {{{ .premium }}}, create the target databases and tables for your source data. + +- Method 2: In the Amazon S3 or Alibaba Cloud Object Storage Service (OSS) directory where the CSV files are located, create the target table schema files for your source data as follows: + + 1. Create database schema files for your source data. + + If your CSV files follow the naming rules in [Step 1](#step-1-prepare-the-csv-files), the database schema files are optional for the data import. Otherwise, the database schema files are mandatory. + + Each database schema file must be in the `${db_name}-schema-create.sql` format and contain a `CREATE DATABASE` DDL statement. With this file, {{{ .premium }}} will create the `${db_name}` database to store your data when you import the data. + + For example, if you create a `mydb-schema-create.sql` file that contains the following statement, {{{ .premium }}} will create the `mydb` database when you import the data. + + ```sql + CREATE DATABASE mydb; + ``` + + 2. Create table schema files for your source data. + + If you do not include the table schema files in the Amazon S3 or Alibaba Cloud Object Storage Service directory where the CSV files are located, {{{ .premium }}} will not create the corresponding tables for you when you import the data. + + Each table schema file must be in the `${db_name}.${table_name}-schema.sql` format and contain a `CREATE TABLE` DDL statement. With this file, {{{ .premium }}} will create the `${table_name}` table in the `${db_name}` database when you import the data. + + For example, if you create a `mydb.mytable-schema.sql` file that contains the following statement, {{{ .premium }}} will create the `mytable` table in the `mydb` database when you import the data. + + ```sql + CREATE TABLE mytable ( + ID INT, + REGION VARCHAR(20), + COUNT INT ); + ``` + + > **Note:** + > + > Each `${db_name}.${table_name}-schema.sql` file should only contain a single DDL statement. If the file contains multiple DDL statements, only the first one takes effect. + +## Step 3. Configure cross-account access + +To allow {{{ .premium }}} to access the CSV files in Amazon S3 or Alibaba Cloud Object Storage Service (OSS), do one of the following: + +- If your CSV files are located in Amazon S3, [configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access) for your TiDB instance. + + You can use either an AWS access key or a Role ARN to access your bucket. Once finished, make a note of the access key (including the access key ID and secret access key) or the Role ARN value as you will need it in [Step 4](#step-4-import-csv-files). + +- If your CSV files are located in Alibaba Cloud Object Storage Service (OSS), [configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access) for your TiDB instance. + +## Step 4. Import CSV files + +To import the CSV files to {{{ .premium }}}, take the following steps: + + +
+ +1. Open the **Import** page for your target TiDB instance. + + 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + + 2. Click the name of your target TiDB instance to go to its overview page, and then click **Data** > **Import** in the left navigation pane. + +2. Click **Import data from Cloud Storage**. + +3. On the **Import Data from Cloud Storage** page, provide the following information: + + - **Storage Provider**: select **Amazon S3**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `s3://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `s3://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `s3://[bucket_name]/[data_source_folder]/`. For example, `s3://sampledata/ingest/`. + - **Credential**: you can use either an AWS Role ARN or an AWS access key to access your bucket. For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). + - **AWS Role ARN**: enter the AWS Role ARN value. If you need to create a new role, click **Click here to create a new one with AWS CloudFormation** and follow the guided steps to launch the provided template, acknowledge the IAM warning, create the stack, and copy the generated ARN back into {{{ .premium }}}. + - **AWS Access Key**: enter the AWS access key ID and AWS secret access key. + - **Test Bucket Access**: click this button after the credentials are in place to confirm that {{{ .premium }}} can reach the bucket. + - **Target Connection**: provide the TiDB username and password that will run the import. Optionally, click **Test Connection** to validate the credentials. + +4. Click **Next**. + +5. In the **Source Files Mapping** section, {{{ .premium }}} scans the bucket and proposes mappings between the source files and destination tables. + + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. + + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and {{{ .premium }}} automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. + + - Leave automatic mapping enabled to apply the [file naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to your source files and target tables. Keep **CSV** selected as the data format. + + - **Advanced options**: expand the panel to view the `Ignore compatibility checks (advanced)` toggle. Leave it disabled unless you intentionally want to bypass schema compatibility validation. + + + > **Note:** + > + > Manual mapping is coming soon. When the toggle becomes available, clear the automatic mapping option and configure the mapping manually: + > + > - **Source**: enter a filename pattern such as `TableName.01.csv`. Wildcards `*` and `?` are supported (for example, `my-data*.csv`). + > - **Target Database** and **Target Table**: choose the destination objects for the matched files. + +6. {{{ .premium }}} automatically scans the source path. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. + +7. When the import progress shows **Completed**, check the imported tables. + +
+ +
+ +1. Open the **Import** page for your target TiDB instance. + + 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + + 2. Click the name of your target TiDB instance to go to its overview page, and then click **Data** > **Import** in the left navigation pane. + +2. Click **Import data from Cloud Storage**. + +3. On the **Import Data from Cloud Storage** page, provide the following information: + + - **Storage Provider**: select **Alibaba Cloud OSS**. + - **Source Files URI**: + - When importing one file, enter the source file URI in the following format `oss://[bucket_name]/[data_source_folder]/[file_name].csv`. For example, `oss://sampledata/ingest/TableName.01.csv`. + - When importing multiple files, enter the source folder URI in the following format `oss://[bucket_name]/[data_source_folder]/`. For example, `oss://sampledata/ingest/`. + - **Credential**: you can use an AccessKey pair to access your bucket. For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access). + - **Test Bucket Access**: click this button after the credentials are in place to confirm that {{{ .premium }}} can reach the bucket. + - **Target Connection**: provide the TiDB username and password that will run the import. Optionally, click **Test Connection** to validate the credentials. + +4. Click **Next**. + +5. In the **Source Files Mapping** section, {{{ .premium }}} scans the bucket and proposes mappings between the source files and destination tables. + + When a directory is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is selected by default. + + > **Note:** + > + > When a single file is specified in **Source Files URI**, the **Use [File naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) for automatic mapping** option is not displayed, and {{{ .premium }}} automatically populates the **Source** field with the file name. In this case, you only need to select the target database and table for data import. + + - Leave automatic mapping enabled to apply the [file naming conventions](/tidb-cloud/naming-conventions-for-data-import.md) to your source files and target tables. Keep **CSV** selected as the data format. + + - **Advanced options**: expand the panel to view the `Ignore compatibility checks (advanced)` toggle. Leave it disabled unless you intentionally want to bypass schema compatibility validation. + + + > **Note:** + > + > Manual mapping is coming soon. When the toggle becomes available, clear the automatic mapping option and configure the mapping manually: + > + > - **Source**: enter a filename pattern such as `TableName.01.csv`. Wildcards `*` and `?` are supported (for example, `my-data*.csv`). + > - **Target Database** and **Target Table**: choose the destination objects for the matched files. + +6. {{{ .premium }}} automatically scans the source path. Review the scan results, check the data files found and corresponding target tables, and then click **Start Import**. + +7. When the import progress shows **Completed**, check the imported tables. + +
+ +
+ +When you run an import task, if any unsupported or invalid conversions are detected, {{{ .premium }}} terminates the import job automatically and reports an importing error. + +If you get an importing error, do the following: + +1. Drop the partially imported table. +2. Check the table schema file. If there are any errors, correct the table schema file. +3. Check the data types in the CSV files. +4. Try the import task again. + +## Troubleshooting + +### Resolve warnings during data import + +After clicking **Start Import**, if you see a warning message such as `can't find the corresponding source files`, resolve this by providing the correct source file, renaming the existing one according to [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md), or using **Advanced Settings** to make changes. + +After resolving these issues, you need to import the data again. + +### Zero rows in the imported tables + +After the import progress shows **Completed**, check the imported tables. If the number of rows is zero, it means no data files matched the Bucket URI that you entered. In this case, resolve this issue by providing the correct source file, renaming the existing one according to [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md), or using **Advanced Settings** to make changes. After that, import those tables again. diff --git a/tidb-cloud/premium/import-from-s3-premium.md b/tidb-cloud/premium/import-from-s3-premium.md new file mode 100644 index 0000000000000..8b5059ff56b8b --- /dev/null +++ b/tidb-cloud/premium/import-from-s3-premium.md @@ -0,0 +1,78 @@ +--- +title: Import Data from Amazon S3 into {{{ .premium }}} +summary: Learn how to import CSV files from Amazon S3 into {{{ .premium }}} instances using the console wizard. +--- + +# Import Data from Amazon S3 into {{{ .premium }}} + +This document describes how to import CSV files from Amazon Simple Storage Service (Amazon S3) into {{{ .premium }}} instances. The steps reflect the current private preview user interface and serve as an initial framework for the upcoming public preview launch. + +> **Warning:** +> +> {{{ .premium }}} is currently available in **private preview** in select AWS regions. +> +> If Premium is not yet enabled for your organization, or if you need access in another cloud provider or region, click **Support** in the lower-left corner of the [TiDB Cloud console](https://tidbcloud.com/), or submit a request through the [Contact Us](https://www.pingcap.com/contact-us) form on the website. + +> **Tip:** +> +> - For {{{ .starter }}} or Essential, see [Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-csv-files-serverless.md). +> - For {{{ .dedicated }}}, see [Import CSV Files from Cloud Storage into {{{ .dedicated }}}](/tidb-cloud/import-csv-files.md). + +## Limitations + +- To ensure data consistency, {{{ .premium }}} allows importing CSV files into empty tables only. If the target table already contains data, import into a staging table and then copy the rows using the `INSERT ... SELECT` statement. +- During the private preview, the user interface currently supports Amazon S3 as the only storage provider. Support for additional providers will be added in future releases. +- Each import job maps a single source pattern to one destination table. + +## Step 1. Prepare the CSV files + +1. If a CSV file is larger than 256 MiB, consider splitting it into smaller files around 256 MiB so {{{ .premium }}} can process them in parallel. +2. Name your CSV files according to the Dumpling naming conventions: + - Full-table files: use the `${db_name}.${table_name}.csv` format. + - Sharded files: append numeric suffixes, such as `${db_name}.${table_name}.000001.csv`. + - Compressed files: use the `${db_name}.${table_name}.${suffix}.csv.${compress}` format. +3. Optional schema files (`${db_name}-schema-create.sql`, `${db_name}.${table_name}-schema.sql`) help {{{ .premium }}} create databases and tables automatically. + + + +## Step 2. Create target schemas (optional) + +If you want {{{ .premium }}} to create the databases and tables automatically, place the schema files generated by Dumpling in the same S3 directory. Otherwise, create the databases and tables manually in {{{ .premium }}} before running the import. + +## Step 3. Configure access to Amazon S3 + +To allow {{{ .premium }}} to read your bucket, use either of the following methods: + +- Provide an AWS Role ARN that trusts TiDB Cloud and grants the `s3:GetObject` and `s3:ListBucket` permissions on the relevant paths. +- Provide an AWS access key (access key ID and secret access key) with equivalent permissions. + +The wizard includes a helper link labeled **Click here to create a new one with AWS CloudFormation**. Follow this link if you need {{{ .premium }}} to pre-fill a CloudFormation stack that creates the role for you. + +## Step 4. Import CSV files from Amazon S3 + +1. In the [TiDB Cloud console](https://tidbcloud.com/tidbs), navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your TiDB instance. +2. In the left navigation pane, click **Data** > **Import**, and choose **Import data from Cloud Storage**. +3. In the **Source Connection** dialog: + - Set **Storage Provider** to **Amazon S3**. + - Enter the **Source Files URI** for a single file (`s3://bucket/path/file.csv`) or for a folder (`s3://bucket/path/`). + - Choose **AWS Role ARN** or **AWS Access Key** and provide the credentials. + - Click **Test Bucket Access** to validate connectivity. + +4. Click **Next** and provide the TiDB SQL username and password for the import job. Optionally, test the connection. +5. Review the automatically generated source-to-target mapping. Disable automatic mapping if you need to define custom patterns and destination tables. +6. Click **Next** to run the pre-check. Resolve any warnings about missing files or incompatible schemas. +7. Click **Start Import** to launch the job group. +8. Monitor the job statuses until they show **Completed**, then verify the imported data in TiDB Cloud. + +## Troubleshooting + +- If the pre-check reports zero files, verify the S3 path and IAM permissions. +- If jobs remain in **Preparing**, ensure that the destination tables are empty and the required schema files exist. +- Use the **Cancel** action to stop a job group if you need to adjust mappings or credentials. + +## Next steps + +- See [Import Data into {{{ .premium }}} using the MySQL Command-Line Client](/tidb-cloud/premium/import-with-mysql-cli-premium.md) for scripted imports. +- See [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) for IAM-related problems. diff --git a/tidb-cloud/premium/import-with-mysql-cli-premium.md b/tidb-cloud/premium/import-with-mysql-cli-premium.md new file mode 100644 index 0000000000000..39185d496ad95 --- /dev/null +++ b/tidb-cloud/premium/import-with-mysql-cli-premium.md @@ -0,0 +1,179 @@ +--- +title: Import Data into {{{ .premium }}} using the MySQL Command-Line Client +summary: Learn how to import small CSV or SQL files into {{{ .premium }}} instances using the MySQL Command-Line Client (`mysql`). +--- + +# Import Data into {{{ .premium }}} using the MySQL Command-Line Client + +This document describes how to import data into {{{ .premium }}} using the [MySQL Command-Line Client](https://dev.mysql.com/doc/refman/8.0/en/mysql.html) (`mysql`). The following sections provide step-by-step instructions for importing data from SQL or CSV files. This process performs a logical import, where the MySQL Command-Line Client replays SQL statements from your local machine against TiDB Cloud. + +> **Warning:** +> +> {{{ .premium }}} is currently available in **private preview** in select AWS regions. +> +> If Premium is not yet enabled for your organization, or if you need access in another cloud provider or region, click **Support** in the lower-left corner of the [TiDB Cloud console](https://tidbcloud.com/), or submit a request through the [Contact Us](https://www.pingcap.com/contact-us) form on the website. + +> **Tip:** +> +> - Logical imports are best suited for relatively small SQL or CSV files. For faster, parallel imports from cloud storage or to process multiple files from [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) exports, see [Import CSV Files from Cloud Storage into {{{ .premium }}}](/tidb-cloud/premium/import-csv-files-premium.md). +> - For {{{ .starter }}} or Essential, see [Import Data into {{{ .starter }}} or Essential via MySQL CLI](/tidb-cloud/import-with-mysql-cli-serverless.md). +> - For {{{ .dedicated }}}, see [Import Data into {{{ .dedicated }}} via MySQL CLI](/tidb-cloud/import-with-mysql-cli.md). + +## Prerequisites + +Before you can import data to a {{{ .premium }}} instance via the MySQL Command-Line Client, you need the following prerequisites: + +- You have access to your {{{ .premium }}} instance. +- Install the MySQL Command-Line Client (`mysql`) on your local computer. + +## Step 1. Connect to your {{{ .premium }}} instance + +Connect to your TiDB instance using the MySQL Command-Line Client. If this is your first time, perform the following steps to configure the network connection and generate the TiDB SQL `root` user password: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**TiDB Instances**](https://tidbcloud.com/project/instances) page. Then, click the name of your target instance to go to its overview page. + +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. + +3. Ensure that the configurations in the connection dialog match your operating environment. + + - **Connection Type** is set to `Public`. + - **Connect With** is set to `MySQL CLI`. + - **Operating System** matches your environment. + + > **Note:** + > + > {{{ .premium }}} instances have the public endpoint disabled by default. If you do not see the `Public` option, enable the public endpoint on the instance details page (under the **Network** tab), or ask an organization admin to enable it before proceeding. + +4. Click **Generate Password** to create a random password. If you have already configured a password, reuse that credential or rotate it before proceeding. + +## Step 2. Define the target database and table schema + +Before importing data, create the target table structure that matches your dataset. + +The following is an example SQL file (`products-schema.sql`) that creates a sample database and table. Update the database or table names to match your environment. + +```sql +CREATE DATABASE IF NOT EXISTS test; +USE test; + +CREATE TABLE products ( + product_id INT PRIMARY KEY, + product_name VARCHAR(255), + price DECIMAL(10, 2) +); +``` + +Run the schema file against your {{{ .premium }}} instance so the database and table exist before you load data in the next step. + +## Step 3. Import data from an SQL or CSV file + +Use the MySQL Command-Line Client to load data into the schema you created in Step 2. Replace the placeholders with your own file paths, credentials, and dataset as needed, then follow the workflow that matches your source format. + + +
+ +Do the following to import data from an SQL file: + +1. Provide an SQL file (for example, `products.sql`) that contains the data you want to import. This SQL file must include `INSERT` statements with data, similar to the following: + + ```sql + INSERT INTO products (product_id, product_name, price) VALUES + (1, 'Laptop', 999.99), + (2, 'Smartphone', 499.99), + (3, 'Tablet', 299.99); + ``` + +2. Use the following command to import data from the SQL file: + + ```bash + mysql --comments --connect-timeout 150 \ + -u '' -h -P 4000 -D test \ + --ssl-mode=VERIFY_IDENTITY --ssl-ca= \ + -p < products.sql + ``` + + Replace the placeholder values (for example, ``, ``, ``, ``, and the SQL file name) with your own connection details and file path. + +> **Note:** +> +> The sample schema creates a `test` database and the commands use `-D test`. Change both the schema file and the `-D` parameter if you plan to import into a different database. + + + +The SQL user you authenticate with must have the required privileges (for example, `CREATE` and `INSERT`) to define tables and load data into the target database. + + + +
+
+ +Do the following to import data from a CSV file: + +1. Ensure the target database and table exist in TiDB (for example, the `products` table you created in Step 2). + +2. Provide a sample CSV file (for example, `products.csv`) that contains the data you want to import. The following is an example: + + **products.csv:** + + ```csv + product_id,product_name,price + 1,Laptop,999.99 + 2,Smartphone,499.99 + 3,Tablet,299.99 + ``` + +3. Use the following command to import data from the CSV file: + + ```bash + mysql --comments --connect-timeout 150 \ + -u '' -h -P 4000 -D test \ + --ssl-mode=VERIFY_IDENTITY --ssl-ca= \ + -p \ + -e "LOAD DATA LOCAL INFILE '' INTO TABLE products + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + IGNORE 1 LINES (product_id, product_name, price);" + ``` + + Replace the placeholder values (for example, ``, ``, ``, ``, ``, and the table name) with your own connection details and dataset paths. + +> **Note:** +> +> For more syntax details about `LOAD DATA LOCAL INFILE`, see [`LOAD DATA`](/sql-statements/sql-statement-load-data.md). + +
+
+ +## Step 4. Validate the imported data + +After the import is complete, run basic queries to verify that the expected rows are present and the data is correct. + +Use the MySQL Command-Line Client to connect to the same database and run validation queries, such as counting rows and inspecting sample records: + +```bash +mysql --comments --connect-timeout 150 \ + -u '' -h -P 4000 -D test \ + --ssl-mode=VERIFY_IDENTITY --ssl-ca= \ + -p \ + -e "SELECT COUNT(*) AS row_count FROM products; \ + SELECT * FROM products ORDER BY product_id LIMIT 5;" +``` + +Expected output (example): + +```text ++-----------+ +| row_count | ++-----------+ +| 3 | ++-----------+ ++------------+---------------+--------+ +| product_id | product_name | price | ++------------+---------------+--------+ +| 1 | Laptop | 999.99 | +| 2 | Smartphone | 499.99 | +| 3 | Tablet | 299.99 | ++------------+---------------+--------+ +``` + +Replace the placeholder values with your own connection details, and adjust the validation queries to suit the shape of your dataset. diff --git a/tidb-cloud/premium/manage-user-access-premium.md b/tidb-cloud/premium/manage-user-access-premium.md new file mode 100644 index 0000000000000..84ad85ae0df0f --- /dev/null +++ b/tidb-cloud/premium/manage-user-access-premium.md @@ -0,0 +1,246 @@ +--- +title: Identity Access Management for {{{ .premium }}} +summary: Learn how to manage identity access in {{{ .premium }}}. +--- + +# Identity Access Management for {{{ .premium }}} + +This document describes how to manage user access, roles, and permissions across organizations and TiDB instances in {{{ .premium }}}. + +Before you can use TiDB Cloud, [sign up for an account](https://tidbcloud.com/free-trial). You can either sign up with email and password to [manage your password in TiDB Cloud](/tidb-cloud/tidb-cloud-password-authentication.md), or choose your Google, GitHub, or Microsoft account for single sign-on (SSO) to TiDB Cloud. + +## Organizations and TiDB instances + +{{{ .premium }}} uses a hierarchical structure of organizations and instances to help you manage users and TiDB instances efficiently. As an `Organization Owner`, you can create and manage multiple instances within your organization. + +For example: + +``` +- Your organization + - TiDB instance 1 + - TiDB instance 2 + - TiDB instance 3 + ... +``` + +In this structure: + +- Users can access an organization only if they are members of it. +- To access a TiDB instance, users need at least read permissions for that instance in the organization. + +For more information about user roles and permissions, see [User Roles](#user-roles). + +### Organizations + +An organization can include multiple TiDB instances. + +TiDB Cloud calculates billing at the organization level, with the billing details available for each instance. + +If you are an `Organization Owner`, you have full administrative privileges in your organization. + +For example, you can do the following: + +- Create TiDB instances for different purposes. +- Assign organization-level and instance-level roles to different users. +- Configure organization-wide settings such as time zone. + +### TiDB instances + +If you are an `Instance Manager`, you can manage settings and operations for a specific TiDB instance. + +For example, you can do the following: + +- Delete a TiDB instance when it is no longer needed. +- Modify instance configurations as needed. + +## User roles + +TiDB Cloud defines different user roles to control permissions at both the organization and TiDB instance levels. + +You can grant roles to users at the organization level or at the TiDB instance level. It is recommended to plan your hierarchy carefully to ensure least‑privilege access and maintain security. + +### Organization roles + +At the organization level, TiDB Cloud defines the following roles, in which `Organization Owner` can invite members and grant organization roles to members. + +| Permission | `Organization Owner` | `Organization Billing Manager` | `Organization Billing Viewer` | `Organization Console Audit Manager` | `Organization Viewer` | +|---|---|---|---|---|---| +| Manage organization settings, such as TiDB instances, API keys, and time zones. | ✅ | ❌ | ❌ | ❌ | ❌ | +| Add or remove organization members, and edit organization roles. | ✅ | ❌ | ❌ | ❌ | ❌ | +| `Instance Manager` permissions for all TiDB instances in the organization. | ✅ | ❌ | ❌ | ❌ | ❌ | +| Manage payment information for the organization. | ✅ | ✅ | ❌ | ❌ | ❌ | +| View billing and use [Cost Explorer](/tidb-cloud/tidb-cloud-billing.md#cost-explorer). | ✅ | ✅ | ✅ | ❌ | ❌ | +| Manage [console audit logging](/tidb-cloud/tidb-cloud-console-auditing.md) for the organization. | ✅ | ❌ | ❌ | ✅ | ❌ | +| View all organization members. | ✅ | ❌ | ❌ | ❌ | ❌ | +| View organization name and time zone. | ✅ | ✅ | ✅ | ✅ | ✅ | + +> **Note:** +> +> - The `Organization Console Audit Manager` role manages audit logging in the TiDB Cloud console only, not database audit logging. + +### TiDB instance roles + +At the TiDB instance level, TiDB Cloud defines two roles: `Instance Manager` and `Instance Viewer`. + +> **Note:** +> +> - The `Organization Owner` automatically inherits all `Instance Manager` permissions for every instance in the organization. +> - Each TiDB instance role inherits all the permissions of the `Organization Viewer` role by default. +> - If a member in your organization does not have any TiDB instance roles, the member cannot access any TiDB instances in your organization. + +| Permission | `Instance Manager` | `Instance Viewer` | +|---|---|---| +| Manage TiDB instance settings | ✅ | ❌ | +| Manage [database audit logging](/tidb-cloud/tidb-cloud-auditing.md) of the TiDB instance. | ✅ | ❌ | +| Manage TiDB instance operations, such as TiDB instance creation, modification, and deletion. | ✅ | ❌ | +| Manage TiDB instance data, such as data import, data backup and restore, and data migration. | ✅ | ❌ | +| Manage [changefeeds](/tidb-cloud/changefeed-overview.md). | ✅ | ❌ | +| Review and reset the root password for the TiDB instance. | ✅ | ❌ | +| View the overview, backup records, metrics, events, and [changefeeds](/tidb-cloud/changefeed-overview.md) of the TiDB instance. | ✅ | ✅ | + +## Manage organization access + +### View and switch between organizations + +To view and switch between organizations, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), click the combo box in the upper-left corner. The list of organizations you belong to is displayed. + + > **Tip:** + > + > - If you are currently on the page of a specific TiDB instance, after clicking the combo box in the upper-left corner, you also need to click ← in the combo box to return to the organization list. + > - If you are a member of multiple organizations, you can click the target organization name in the combo box to switch your account between organizations. + +2. To view the detailed information of your organization, such as the organization ID and time zone, click the organization name, and then click **Organization Settings** > **General** in the left navigation pane. + +### Set the time zone for your organization + +If you are in the `Organization Owner` role, you can modify the system display time according to your time zone. + +To change the local timezone setting, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **General**. + +3. In the **Time Zone** section, select your time zone from the drop-down list. + +4. Click **Update**. + +### Invite a user to your organization + +If you are in the `Organization Owner` role, you can invite users to your organization. + +> **Note:** +> +> You can also [invite a user to access or manage a TiDB instance](#invite-a-user-to-access-or-manage-a-tidb-instance) directly as needed, which also makes the user your organization member. + +To invite a user to an organization, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **Users**. + +3. On the **Users** page, click **Invite User**. + +4. Enter the email address of the user to be invited, and then select an organization role for the user. + + > **Tip:** + > + > - The default role at the organization level is `Organization Viewer`. + > - If you want to invite multiple users at one time, you can enter multiple email addresses. + > - The invited user does not have access to any TiDB instances by default. To grant TiDB instance permissions to the user, see [Invite a user to access or manage a TiDB instance](#invite-a-user-to-access-or-manage-a-tidb-instance). + +5. If you only need to assign the user an organization role and do not need to assign any project or TiDB instance roles, disable the **Add access for projects and instances** option. + +6. Click **Invite**. Then the new user is successfully added into the user list. At the same time, an email is sent to the invited email address with a verification link. + +7. After receiving this email, the user needs to click the link in the email to verify the identity, and a new page shows. + +8. If the invited email address has not been used to sign up for a TiDB Cloud account, the user is directed to the sign-up page to create an account. + +> **Note:** +> +> The verification link in the email expires in 24 hours. If the user you want to invite does not receive the email, click **Resend**. + +### Modify organization roles + +If you are in the `Organization Owner` role, you can modify organization roles of all members in your organization. + +To modify the organization role of a member, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **Users**. + +3. On the **Users** page, click **...** > **Edit Role** in the row of the target member. + +### Remove an organization member + +If you are in the `Organization Owner` role, you can remove organization members from your organization. + +To remove a member from an organization, take the following steps: + +> **Note:** +> +> If a member is removed from an organization, the TiDB instance access for the member is also removed. + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **Users**. + +3. On the **Users** page, click **...** > **Delete** in the row of the target member. + +## Manage TiDB instance access + +### Invite a user to access or manage a TiDB instance + +If you are in the `Organization Owner` role, you can invite users to access or manage your TiDB instances. + +> **Note:** +> +> When you invite a user not in your organization to access or manage your TiDB instance, the user automatically joins your organization as well. + +To invite a user to access or manage a TiDB instance, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **Users**. + +3. On the **Users** page, click **Invite User**. + +4. Enter the email address of the user to be invited, and then select an organization role for the user. + +5. Make sure the **Add access for projects and instances** option is enabled, click **Add access** in the **Instance access** section, and then select a TiDB instance role for the user. + +6. Click **Add access**. Then the new user is successfully added into the user list. At the same time, an email is sent to the invited email address with a verification link. + +7. After receiving this email, the user needs to click the link in the email to verify the identity, and a new page shows. + +8. If the invited email address has not been signed up for a TiDB Cloud account, the user is directed to the sign-up page to create an account. + +> **Note:** +> +> The verification link in the email will expire in 24 hours. If your user doesn't receive the email, click **Resend**. + +### Modify TiDB instance roles + +If you are in the `Organization Owner` role, you can modify TiDB instance roles of all organization members in your organization. + +To modify the TiDB instance role of a member, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Organization Settings** > **Users**. + +3. On the **Users** page, click **...** > **Edit Role** of the target member. + +## Manage user profiles + +In TiDB Cloud, you can easily manage your profile, including your first name, last name, and phone number. + +1. In the [TiDB Cloud console](https://tidbcloud.com), click in the lower-left corner. + +2. Click **Account Settings**. + +3. In the displayed dialog, update the profile information, and then click **Update**. \ No newline at end of file diff --git a/tidb-cloud/premium/migrate-from-op-tidb-premium.md b/tidb-cloud/premium/migrate-from-op-tidb-premium.md new file mode 100644 index 0000000000000..ec3a6dab98ea4 --- /dev/null +++ b/tidb-cloud/premium/migrate-from-op-tidb-premium.md @@ -0,0 +1,414 @@ +--- +title: Migrate from TiDB Self-Managed to {{{ .premium }}} +summary: Learn how to migrate data from TiDB Self-Managed to {{{ .premium }}}. +--- + +# Migrate from TiDB Self-Managed to {{{ .premium }}} + +This document describes how to migrate data from your TiDB Self-Managed clusters to {{{ .premium }}} (on AWS) instances using Dumpling and TiCDC. + +> **Warning:** +> +> {{{ .premium }}} is currently available in **private preview** in select AWS regions. +> +> If Premium is not yet enabled for your organization, or if you need access in another cloud provider or region, click **Support** in the lower-left corner of the [TiDB Cloud console](https://tidbcloud.com/), or submit a request through the [Contact Us](https://www.pingcap.com/contact-us) form on the website. + +The overall procedure is as follows: + +1. Build the environment and prepare the tools. +2. Migrate full data. The process is as follows: + 1. Export data from TiDB Self-Managed to Amazon S3 using Dumpling. + 2. Import data from Amazon S3 to {{{ .premium }}}. +3. Replicate incremental data using TiCDC. +4. Verify the migrated data. + +## Prerequisites + +It is recommended that you put the S3 bucket and the {{{ .premium }}} instance in the same region. Cross-region migration might incur additional cost for data conversion. + +Before migration, you need to prepare the following: + +- An [AWS account](https://docs.aws.amazon.com/AmazonS3/latest/userguide/setting-up-s3.html#sign-up-for-aws-gsg) with administrator access +- An [AWS S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-bucket.html) +- [A TiDB Cloud account](/tidb-cloud/tidb-cloud-quickstart.md) with at least the [`Project Data Access Read-Write`](/tidb-cloud/manage-user-access.md#user-roles) access to your target {{{ .premium }}} instance hosted on AWS + +## Prepare tools + +You need to prepare the following tools: + +- Dumpling: a data export tool +- TiCDC: a data replication tool + +### Dumpling + +[Dumpling](https://docs.pingcap.com/tidb/dev/dumpling-overview) is a tool that exports data from TiDB or MySQL into SQL or CSV files. You can use Dumpling to export full data from TiDB Self-Managed. + +Before you deploy Dumpling, note the following: + +- It is recommended to deploy Dumpling on a new EC2 instance in the same VPC as your target TiDB instance. +- The recommended EC2 instance type is **c6g.4xlarge** (16 vCPU and 32 GiB memory). You can choose other EC2 instance types based on your needs. The Amazon Machine Image (AMI) can be Amazon Linux, Ubuntu, or Red Hat. + +You can deploy Dumpling by using TiUP or using the installation package. + +#### Deploy Dumpling using TiUP + +Use [TiUP](https://docs.pingcap.com/tidb/stable/tiup-overview) to deploy Dumpling: + +```bash +## Deploy TiUP +curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh +source /root/.bash_profile +## Deploy Dumpling and update to the latest version +tiup install dumpling +tiup update --self && tiup update dumpling +``` + +#### Deploy Dumpling using the installation package + +To deploy Dumpling using the installation package: + +1. Download the [toolkit package](https://docs.pingcap.com/tidb/stable/download-ecosystem-tools). + +2. Extract it to the target machine. You can get Dumpling using TiUP by running `tiup install dumpling`. Then, you can use `tiup dumpling ...` to run Dumpling. For more information, see [Dumpling introduction](https://docs.pingcap.com/tidb/stable/dumpling-overview#dumpling-introduction). + +#### Configure privileges for Dumpling + +You need the following privileges to export data from the upstream database: + +- SELECT +- RELOAD +- LOCK TABLES +- REPLICATION CLIENT +- PROCESS + +### Deploy TiCDC + +You need to [deploy TiCDC](https://docs.pingcap.com/tidb/dev/deploy-ticdc) to replicate incremental data from the upstream TiDB cluster to {{{ .premium }}}. + +1. Confirm whether the current TiDB version supports TiCDC. TiDB v4.0.8.rc.1 and later versions support TiCDC. You can check the TiDB version by executing `select tidb_version();` in the TiDB cluster. If you need to upgrade it, see [Upgrade TiDB Using TiUP](https://docs.pingcap.com/tidb/dev/deploy-ticdc#upgrade-ticdc-using-tiup). + +2. Add the TiCDC component to the TiDB cluster. See [Add or scale out TiCDC to an existing TiDB cluster using TiUP](https://docs.pingcap.com/tidb/dev/deploy-ticdc#add-or-scale-out-ticdc-to-an-existing-tidb-cluster-using-tiup). Edit the `scale-out.yml` file to add TiCDC: + + ```yaml + cdc_servers: + - host: 10.0.1.3 + gc-ttl: 86400 + data_dir: /tidb-data/cdc-8300 + - host: 10.0.1.4 + gc-ttl: 86400 + data_dir: /tidb-data/cdc-8300 + ``` + +3. Add the TiCDC component and check the status. + + ```shell + tiup cluster scale-out scale-out.yml + tiup cluster display + ``` + +## Migrate full data + +To migrate data from the TiDB Self-Managed cluster to {{{ .premium }}}, perform a full data migration as follows: + +1. Migrate data from the TiDB Self-Managed cluster to Amazon S3. +2. Migrate data from Amazon S3 to {{{ .premium }}}. + +### Migrate data from the TiDB Self-Managed cluster to Amazon S3 + +You need to migrate data from the TiDB Self-Managed cluster to Amazon S3 using Dumpling. + +If your TiDB cluster is in a local IDC, or the network between the Dumpling server and Amazon S3 is not connected, you can export the files to the local storage first, and then upload them to Amazon S3 later. + +#### Step 1. Disable the GC mechanism of the upstream TiDB Self-Managed cluster temporarily + +To ensure that newly written data is not lost during incremental migration, you need to disable the upstream cluster's garbage collection (GC) mechanism before starting the migration to prevent the system from cleaning up historical data. + +Run the following command to verify whether the setting is successful. + +```sql +SET GLOBAL tidb_gc_enable = FALSE; +``` + +The following is an example output, in which `0` indicates that it is disabled. + +```sql +SELECT @@global.tidb_gc_enable; ++-------------------------+ +| @@global.tidb_gc_enable | ++-------------------------+ +| 0 | ++-------------------------+ +1 row in set (0.01 sec) +``` + +#### Step 2. Configure access permissions to the Amazon S3 bucket for Dumpling + +Create an access key in the AWS console. See [Create an access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) for details. + +1. Use your AWS account ID or account alias, your IAM user name, and your password to sign in to [the IAM console](https://console.aws.amazon.com/iam/home#/security_credentials). + +2. In the navigation bar on the upper right, choose your user name, and then click **My Security Credentials**. + +3. To create an access key, click **Create access key**. Then choose **Download .csv file** to save the access key ID and secret access key to a CSV file on your computer. Store the file in a secure location. You will not have access to the secret access key again after this dialog box closes. After you download the CSV file, choose **Close**. When you create an access key, the key pair is active by default, and you can use the pair right away. + + ![Create access key](/media/tidb-cloud/op-to-cloud-create-access-key01.png) + + ![Download CSV file](/media/tidb-cloud/op-to-cloud-create-access-key02.png) + +#### Step 3. Export data from the upstream TiDB cluster to Amazon S3 using Dumpling + +Do the following to export data from the upstream TiDB cluster to Amazon S3 using Dumpling: + +1. Configure the environment variables for Dumpling. + + ```shell + export AWS_ACCESS_KEY_ID=${AccessKey} + export AWS_SECRET_ACCESS_KEY=${SecretKey} + ``` + +2. Get the S3 bucket URI and region information from the AWS console. See [Create a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) for details. + + The following screenshot shows how to get the S3 bucket URI information: + + ![Get the S3 URI](/media/tidb-cloud/op-to-cloud-copy-s3-uri.png) + + The following screenshot shows how to get the region information: + + ![Get the region information](/media/tidb-cloud/op-to-cloud-copy-region-info.png) + +3. Run Dumpling to export data to the Amazon S3 bucket. + + ```shell + dumpling \ + -u root \ + -P 4000 \ + -h 127.0.0.1 \ + -r 20000 \ + --filetype sql \ + -F 256MiB \ + -t 8 \ + -o "${S3 URI}" \ + --s3.region "${s3.region}" + ``` + + The `-t` option specifies the number of threads for the export. Increasing the number of threads improves the concurrency of Dumpling and the export speed, and also increases the database's memory consumption. Therefore, do not set this parameter to a very large number. + + For more information, see [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview#export-to-sql-files). + +4. Check the export data. Usually the exported data includes the following: + + - `metadata`: this file contains the start time of the export, and the location of the master binary log. + - `{schema}-schema-create.sql`: the SQL file for creating the schema + - `{schema}.{table}-schema.sql`: the SQL file for creating the table + - `{schema}.{table}.{0001}.{sql|csv}`: data files + - `*-schema-view.sql`, `*-schema-trigger.sql`, `*-schema-post.sql`: other exported SQL files + +### Migrate data from Amazon S3 to {{{ .premium }}} + +After you export data from the TiDB Self-Managed cluster to Amazon S3, you need to migrate the data to {{{ .premium }}}. + +1. In the [TiDB Cloud console](https://tidbcloud.com/), get the Account ID and External ID of your target TiDB instance. + + 1. Navigate to the **TiDB Instances** page, and click the name of your target instance. + 2. In the left navigation pane, click **Data** > **Import**. + 3. Choose **Import data from Cloud Storage** > **Amazon S3**. + 4. Note down the **Account ID** and **External ID** displayed in the wizard. These values are embedded in the CloudFormation template. + +2. In the **Source Connection** dialog, select **AWS Role ARN**, then click **Click here to create a new one with AWS CloudFormation**, and follow the on-screen guidance. If your organization cannot launch CloudFormation stacks, see [Manually create the IAM role](#manually-create-the-iam-role-optional). + + 1. Open the pre-filled CloudFormation template in the AWS console. + 2. Provide a role name, review the permissions, and acknowledge the IAM warning. + 3. Create the stack and wait for the status to change to **CREATE_COMPLETE**. + 4. On the **Outputs** tab, copy the newly generated Role ARN. + 5. Return to {{{ .premium }}}, paste the Role ARN, and click **Confirm**. The wizard stores the ARN for subsequent import jobs. + +3. Continue with the remaining steps in the import wizard, and use the saved Role ARN when prompted. + +#### Manually create the IAM role (optional) + +If your organization cannot deploy CloudFormation stacks, create the access policy and IAM role manually: + +1. In AWS IAM, create a policy that grants the following actions on your bucket (and KMS key, if applicable): + + - `s3:GetObject` + - `s3:GetObjectVersion` + - `s3:ListBucket` + - `s3:GetBucketLocation` + - `kms:Decrypt` (only when SSE-KMS encryption is enabled) + + The following JSON template shows the required structure. Replace the placeholders with your bucket path, bucket ARN, and KMS key ARN (if needed). + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:GetObjectVersion" + ], + "Resource": "arn:aws:s3:::" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "" + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt" + ], + "Resource": "" + } + ] + } + ``` + +2. Create an IAM role that trusts {{{ .premium }}} by providing the **Account ID** and **External ID** you have noted down earlier. Then, attach the policy created in the previous step to this role. + +3. Copy the resulting Role ARN and enter it in the {{{ .premium }}} import wizard. + +4. Import data to {{{ .premium }}} by following [Import data from Amazon S3 into {{{ .premium }}}](/tidb-cloud/premium/import-from-s3-premium.md). + +## Replicate incremental data + +To replicate incremental data, do the following: + +1. Get the start time of the incremental data migration. For example, you can get it from the metadata file of the full data migration. + + ![Start Time in Metadata](/media/tidb-cloud/start_ts_in_metadata.png) + +2. Grant TiCDC to connect to {{{ .premium }}}. + + 1. In the [TiDB Cloud console](https://tidbcloud.com/tidbs), navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your target TiDB instance to go to its overview page. + 2. In the left navigation pane, click **Settings** > **Networking**. + 3. On the **Networking** page, click **Add IP Address**. + 4. In the displayed dialog, select **Use IP addresses**, click **+**, fill in the public IP address of the TiCDC component in the **IP Address** field, and then click **Confirm**. Now TiCDC can access {{{ .premium }}}. For more information, see [Configure an IP Access List](/tidb-cloud/configure-ip-access-list.md). + +3. Get the connection information of the downstream {{{ .premium }}} instance. + + 1. In the [TiDB Cloud console](https://tidbcloud.com/tidbs), navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your target TiDB instance to go to its overview page. + 2. Click **Connect** in the upper-right corner. + 3. In the connection dialog, select **Public** from the **Connection Type** drop-down list and select **General** from the **Connect With** drop-down list. + 4. From the connection information, you can get the host IP address and port of the instance. For more information, see [Connect via public connection](/tidb-cloud/connect-via-standard-connection.md). + +4. Create and run the incremental replication task. In the upstream cluster, run the following: + + ```shell + tiup cdc cli changefeed create \ + --pd=http://172.16.6.122:2379 \ + --sink-uri="tidb://root:123456@172.16.6.125:4000" \ + --changefeed-id="upstream-to-downstream" \ + --start-ts="431434047157698561" + ``` + + - `--pd`: the PD address of the upstream cluster. The format is: `[upstream_pd_ip]:[pd_port]` + - `--sink-uri`: the downstream address of the replication task. Configure `--sink-uri` according to the following format. Currently, the scheme supports `mysql`, `tidb`, `kafka`, `s3`, and `local`. + + ```shell + [scheme]://[userinfo@][host]:[port][/path]?[query_parameters] + ``` + + - `--changefeed-id`: the ID of the replication task. The format must match the ^[a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*$ regular expression. If this ID is not specified, TiCDC automatically generates a UUID (the version 4 format) as the ID. + - `--start-ts`: specifies the starting TSO of the changefeed. From this TSO, the TiCDC cluster starts pulling data. The default value is the current time. + + For more information, see [CLI and Configuration Parameters of TiCDC Changefeeds](https://docs.pingcap.com/tidb/dev/ticdc-changefeed-config). + +5. Enable the GC mechanism again in the upstream cluster. If no error or delay is found in incremental replication, enable the GC mechanism to resume garbage collection of the cluster. + + Run the following command to verify whether the setting works. + + ```sql + SET GLOBAL tidb_gc_enable = TRUE; + ``` + + The following is an example output, in which `1` indicates that GC is enabled. + + ```sql + SELECT @@global.tidb_gc_enable; + +-------------------------+ + | @@global.tidb_gc_enable | + +-------------------------+ + | 1 | + +-------------------------+ + 1 row in set (0.01 sec) + ``` + +6. Verify the incremental replication task. + + - If the message "Create changefeed successfully!" is displayed in the output, the replication task is created successfully. + - If the state is `normal`, the replication task is normal. + + ```shell + tiup cdc cli changefeed list --pd=http://172.16.6.122:2379 + ``` + + ![Update Filter](/media/tidb-cloud/normal_status_in_replication_task.png) + + - Verify the replication. Write a new record to the upstream cluster, and then check whether the record is replicated to the downstream {{{ .premium }}} instance. + +7. Set the same timezone for the upstream cluster and downstream instance. By default, {{{ .premium }}} sets the timezone to UTC. If the timezone is different between the upstream cluster and downstream instance, you need to set the same timezone for both. + + 1. In the upstream cluster, run the following command to check the timezone: + + ```sql + SELECT @@global.time_zone; + ``` + + 2. In the downstream instance, run the following command to set the timezone: + + ```sql + SET GLOBAL time_zone = '+08:00'; + ``` + + 3. Check the timezone again to verify the setting: + + ```sql + SELECT @@global.time_zone; + ``` + +8. Back up the [query bindings](/sql-plan-management.md) in the upstream cluster and restore them in the downstream instance. You can use the following query to back up the query bindings: + + ```sql + SELECT DISTINCT(CONCAT('CREATE GLOBAL BINDING FOR ', original_sql,' USING ', bind_sql,';')) FROM mysql.bind_info WHERE status='enabled'; + ``` + + If you do not get any output, it means that no query bindings are used in the upstream cluster. In this case, you can skip this step. + + After you get the query bindings, run them in the downstream instance to restore the query bindings. + +9. Back up the user and privilege information in the upstream cluster and restore them in the downstream instance. You can use the following script to back up the user and privilege information. Note that you need to replace the placeholders with the actual values. + + ```shell + #!/bin/bash + + export MYSQL_HOST={tidb_op_host} + export MYSQL_TCP_PORT={tidb_op_port} + export MYSQL_USER=root + export MYSQL_PWD={root_password} + export MYSQL="mysql -u${MYSQL_USER} --default-character-set=utf8mb4" + + function backup_user_priv(){ + ret=0 + sql="SELECT CONCAT(user,':',host,':',authentication_string) FROM mysql.user WHERE user NOT IN ('root')" + for usr in `$MYSQL -se "$sql"`;do + u=`echo $usr | awk -F ":" '{print $1}'` + h=`echo $usr | awk -F ":" '{print $2}'` + p=`echo $usr | awk -F ":" '{print $3}'` + echo "-- Grants for '${u}'@'${h}';" + [[ ! -z "${p}" ]] && echo "CREATE USER IF NOT EXISTS '${u}'@'${h}' IDENTIFIED WITH 'mysql_native_password' AS '${p}' ;" + $MYSQL -se "SHOW GRANTS FOR '${u}'@'${h}';" | sed 's/$/;/g' + [ $? -ne 0 ] && ret=1 && break + done + return $ret + } + + backup_user_priv + ``` + + After you get the user and privilege information, run the generated SQL statements in the downstream TiDB instance to restore the user and privilege information. diff --git a/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md b/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md new file mode 100644 index 0000000000000..ff5c9ce8ecd5d --- /dev/null +++ b/tidb-cloud/premium/set-up-sink-private-endpoint-premium.md @@ -0,0 +1,111 @@ +--- +title: Set Up Private Endpoint for Changefeeds +summary: Learn how to set up a private endpoint for changefeeds. +--- + +# Set Up Private Endpoint for Changefeeds + +This document describes how to create a private endpoint for changefeeds in your {{{ .premium }}} instances, enabling you to securely stream data to self-hosted Kafka or MySQL through private connectivity. + +## Prerequisites + +- Check permissions for private endpoint creation +- Set up your network connection + +### Permissions + +Only users with any of the following roles in your organization can create private endpoints for changefeeds: + +- `Organization Owner` +- `Instance Manager` for the corresponding instance + +For more information about roles in TiDB Cloud, see [User roles](/tidb-cloud/premium/manage-user-access-premium.md#user-roles). + +### Network + +Private endpoints leverage the **Private Link** technology from cloud providers, enabling resources in your VPC to connect to services in other VPCs through private IP addresses, as if those services were hosted directly within your VPC. + + +
+ +If your changefeed downstream service is hosted on AWS, collect the following information: + +- The name of the Private Endpoint Service for your downstream service +- The availability zones (AZs) where your downstream service is deployed + +If the Private Endpoint Service is not available for your downstream service, follow [Step 2. Expose the Kafka cluster as Private Link Service](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md#step-2-expose-the-kafka-cluster-as-private-link-service) to set up the load balancer and the Private Link Service. + +
+ + + +
+ +If your changefeed downstream service is hosted on Alibaba Cloud, collect the following information: + +- The name of the Private Endpoint Service for your downstream service +- The availability zones (AZs) where your downstream service is deployed + +To grant TiDB Cloud VPC access, you must add the TiDB Cloud's Alibaba Cloud account ID to the allowlist of your endpoint service. + +If the Private Endpoint Service is not available for your downstream service, follow [Step 2. Expose the Kafka cluster as Private Link Service](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md#step-2-expose-the-kafka-cluster-as-private-link-service) to set up the load balancer and the Private Link Service. + +
+
+ +
+ +## Step 1. Open the Networking page for your instance + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/). + +2. On the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, click the name of your target instance to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + +3. In the left navigation pane, click **Settings** > **Networking**. + +## Step 2. Configure the private endpoint for changefeeds + +The configuration steps vary depending on the cloud provider where your instance is deployed. + + +
+ +1. On the **Networking** page, click **Create Private Endpoint** in the **AWS Private Endpoint for Changefeed** section. +2. In the **Create Private Endpoint for Changefeed** dialog, enter a name for the private endpoint. +3. Follow the reminder to authorize the [AWS Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts) of TiDB Cloud to create an endpoint. +4. Enter the **Endpoint Service Name** that you collected in the [Network](#network) section. +5. Select the **Number of AZs**. Ensure that the number of AZs and the AZ IDs match your Kafka deployment. +6. If this private endpoint is created for Apache Kafka, enable the **Advertised Listener for Kafka** option. +7. Configure the advertised listener for Kafka using either the **TiDB Managed** domain or the **Custom** domain. + + - To use the **TiDB Managed** domain for advertised listeners, enter a unique string in the **Domain Pattern** field, and then click **Generate**. TiDB will generate broker addresses with subdomains for each availability zone. + - To use your own **Custom** domain for advertised listeners, switch the domain type to **Custom**, enter the root domain in the **Custom Domain** field, click **Check**, and then specify the broker subdomains for each availability zone. + +8. Click **Create** to validate the configurations and create the private endpoint. + +
+ + + +
+ +1. On the **Networking** page, click **Create Private Endpoint** in the **Alibaba Cloud Private Endpoint for Changefeed** section. +2. In the **Create Private Endpoint for Changefeed** dialog, enter a name for the private endpoint. +3. Follow the reminder to add TiDB Cloud's Alibaba Cloud account ID to the allowlist of your endpoint service to grant TiDB Cloud VPC access. For more information, see [managing account IDs in the allowlist of an endpoint service](https://www.alibabacloud.com/help/en/privatelink/user-guide/add-and-manage-service-whitelists). +4. Enter the **Endpoint Service Name** that you collected in the [Network](#network) section. +5. Select the **Number of AZs**. Ensure that the number of AZs and the AZ IDs match your Kafka deployment. +6. If this private endpoint is created for Apache Kafka, enable the **Advertised Listener for Kafka** option. +7. Configure the advertised listener for Kafka using either the **TiDB Managed** domain or the **Custom** domain. + + - To use the **TiDB Managed** domain for advertised listeners, enter a unique string in the **Domain Pattern** field, and then click **Generate**. TiDB will generate broker addresses with subdomains for each availability zone. + - To use your own **Custom** domain for advertised listeners, switch the domain type to **Custom**, enter the root domain in the **Custom Domain** field, click **Check**, and then specify the broker subdomains for each availability zone. + +8. Click **Create** to validate the configurations and create the private endpoint. + +
+
+
diff --git a/tidb-cloud/premium/tidb-cloud-auditing-premium.md b/tidb-cloud/premium/tidb-cloud-auditing-premium.md new file mode 100644 index 0000000000000..ff903b5cfe250 --- /dev/null +++ b/tidb-cloud/premium/tidb-cloud-auditing-premium.md @@ -0,0 +1,286 @@ +--- +title: "{{{ .premium }}} Database Audit Logging" +summary: Learn how to audit an instance in {{{ .premium }}}. +--- + +# {{{ .premium }}} Database Audit Logging + +TiDB Cloud provides an audit logging feature that records user access activities of your database, such as executed SQL statements. + +To evaluate the effectiveness of user access policies and other information security measures of your organization, it is a security best practice to periodically analyze database audit logs. + +The audit logging feature is **disabled by default**. To audit a TiDB instance, you must first enable audit logging, and then configure auditing filter rules. + +> **Note:** +> +> Because audit logging consumes instance resources, be prudent about whether to audit an instance. + +## Prerequisites + +- You are using a {{{ .premium }}} instance. + + > **Note:** + > + > - Database audit logging is not available for {{{ .starter }}}. + > - For {{{ .essential }}}, see [Database Audit Logging (Beta) for {{{ .essential }}}](/tidb-cloud/essential-database-audit-logging.md). + > - For {{{ .dedicated }}}, see [{{{ .dedicated }}} Database Audit Logging](/tidb-cloud/tidb-cloud-auditing.md). + +- You must have the `Organization Owner` role in your organization. Otherwise, you cannot see the database audit-related options in the TiDB Cloud console. + +## Enable audit logging + +TiDB Cloud supports recording the audit logs of a {{{ .premium }}} instance to your cloud storage service. Before enabling database audit logging, configure your cloud storage service on the cloud provider where the instance is located. + +### Enable audit logging for TiDB on AWS + +To enable audit logging for AWS, take the following steps: + +#### Step 1. Create an Amazon S3 bucket + +Specify an Amazon S3 bucket in your organization-owned AWS account as the destination to which TiDB Cloud writes audit logs. + +> **Note:** +> +> Do not enable object lock on the AWS S3 bucket. Enabling object lock will prevent TiDB Cloud from pushing audit log files to S3. + +For more information, see [Creating a general purpose bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-bucket-overview.html) in the AWS User Guide. + +#### Step 2. Configure Amazon S3 access + +1. Get the TiDB Cloud Account ID and the External ID of the TiDB instance that you want to enable audit logging. + + 1. In the TiDB Cloud console, navigate to the [**TiDB Instances**](https://tidbcloud.com/instances) page. + + 2. Click the name of your target instance to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + + 3. On the **DB Audit Logging** page, click **Enable** in the upper-right corner. + + 4. In the **Database Audit Log Storage Configuration** dialog, locate the **AWS IAM Policy Settings** section, and record **TiDB Cloud Account ID** and **TiDB Cloud External ID** for later use. + +2. In the AWS Management Console, go to **IAM** > **Access Management** > **Policies**, and then check whether there is a storage bucket policy with the `s3:PutObject` write-only permission. + + - If yes, record the matched storage bucket policy for later use. + - If not, go to **IAM** > **Access Management** > **Policies** > **Create Policy**, and define a bucket policy according to the following policy template. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:PutObject", + "Resource": "/*" + } + ] + } + ``` + + In the template, `` is the Amazon Resource Name (ARN) of your S3 bucket where the audit log files are to be written. You can go to the **Properties** tab in your S3 bucket and get the ARN value in the **Bucket Overview** area. In the `"Resource"` field, you need to add `/*` after the ARN. For example, if the ARN is `arn:aws:s3:::tidb-cloud-test`, you need to configure the value of the `"Resource"` field as `"arn:aws:s3:::tidb-cloud-test/*"`. + +3. Go to **IAM** > **Access Management** > **Roles**, and then check whether a role whose trust entity corresponds to the TiDB Cloud Account ID and the External ID that you recorded earlier already exists. + + - If yes, record the matched role for later use. + - If not, click **Create role**, select **Another AWS account** as the trust entity type, and then enter the TiDB Cloud Account ID value into the **Account ID** field. Then, choose the **Require External ID** option and enter the TiDB Cloud External ID value into the **External ID** field. + +4. In **IAM** > **Access Management** > **Roles**, click the role name from the previous step to go to the **Summary** page, and then take the following steps: + + 1. Under the **Permissions** tab, check whether the recorded policy with the `s3:PutObject` write-only permission is attached to the role. If not, choose **Attach Policies**, search for the needed policy, and then click **Attach Policy**. + 2. Return to the **Summary** page and copy the **Role ARN** value to your clipboard. + +#### Step 3. Enable audit logging + +In the TiDB Cloud console, go back to the **Database Audit Log Storage Configuration** dialog where you got the TiDB Cloud account ID and the External ID values, and then take the following steps: + +1. In the **Bucket URI** field, enter the URI of your S3 bucket where the audit log files are to be written. +2. In the **Bucket Region** drop-down list, select the AWS region where the bucket is located. +3. In the **Role ARN** field, fill in the Role ARN value that you copied in [Step 2. Configure Amazon S3 access](#step-2-configure-amazon-s3-access). +4. Click **Test Connection and Next** to verify whether TiDB Cloud can access and write to the bucket. + + If it is successful, **The connection is successful** is displayed. Otherwise, check your access configuration. + +5. Click **Enable** to enable audit logging for the instance. + + TiDB Cloud is ready to write audit logs for the specified instance to your Amazon S3 bucket. + +> **Note:** +> +> - After enabling audit logging, if you make any new changes to the bucket URI, location, or ARN, you must click **Test Connection** again to verify that TiDB Cloud can connect to the bucket. Then, click **Enable** to apply the changes. +> - To remove TiDB Cloud's access to your Amazon S3, simply delete the trust policy granted to this instance in the AWS Management Console. + + + +### Enable audit logging for TiDB on Alibaba Cloud + +To enable database audit logging for TiDB cloud on Alibaba Cloud, take the following steps: + +#### Step 1. Create an OSS bucket + +Create an Object Storage Service (OSS) bucket in your organization-owned Alibaba Cloud account as the destination to which TiDB Cloud writes audit logs. + +For more information, see [Create a bucket](https://www.alibabacloud.com/help/en/oss/user-guide/create-a-bucket-4) in the Alibaba Cloud Storage documentation. + +#### Step 2. Configure OSS access + +1. Get the Alibaba Cloud Service Account ID of the TiDB instance that you want to enable audit logging. + + 1. In the TiDB Cloud console, navigate to the [**TiDB Instances**](https://tidbcloud.com/instances) page. + 2. Click the name of your target instance to go to its overview page, and then click **Settings** > **DB Audit Logging** in the left navigation pane. + 3. On the **DB Audit Logging** page, click **Enable** in the upper-right corner. + 4. In the **Database Audit Log Storage Configuration** dialog, locate the **Alibaba Cloud RAM Policy Settings** section, and record **TiDB Cloud Account ID** and **TiDB Cloud External ID** for later use. + +2. In the Alibaba Cloud console, go to **RAM** > **Permissions** > **Policies**, and then check whether a policy already exists with the `oss:PutObject` write-only permission for your audit log OSS bucket. + + - If yes, record the policy name for later use. + + - If not, click **Create Policy**, and define the policy using the following policy template. + + ```json + { + "Version": "1", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "oss:PutObject" + ], + "Resource": "acs:oss:*:*:/*" + } + ] + } + ``` + + Replace `` with the name of your OSS bucket where TiDB Cloud will write audit logs. For example, if your bucket name is `auditlog-bucket`, use: `"Resource": "acs:oss:*:*:auditlog-bucket/*"`. + +3. In the Alibaba Cloud console, go to **RAM** > **Identities** > **Roles**, and then check whether a role already exists whose **trusted entity** matches the TiDB Cloud Account ID and External ID you recorded earlier. + + - If yes, record the role name for later use. + + - If not, click **Create Role** by taking the following steps. + + 1. In the role creation page, click **Switch to Policy Editor**. + 2. Under **Principal**, choose **Cloud Account** and enter the **TiDB Cloud Account Id** in the field. + 3. Under **Action**, select **sts:AssumeRole** from the drop-down list. + 4. Click **Add condition**, and then configure the condition as follows: + - Set **Key** to ``sts:ExternalId``. + - Set **Operator** to ``StringEquals``. + - Set **Value** to the **TiDB Cloud External ID**. + 5. Click **OK** to open the **Create Role** dialog. + 6. Enter the role name in the **Role Name** field, and click **OK** to create the role. + +4. After the role is created, go to the **Permissions** tab and click **Grant Permission**. + + In the dialog, configure the following settings: + + - For **Resource Scope**, select **Account**. + - In the **Policy** field, select the OSS write policy created earlier. + - Click **Grant Permissions**. + +5. Copy the **Role ARN** (for example: `acs:ram:::role/tidb-cloud-audit-role`) for later use. + +#### Step 3. Enable audit logging + +In the TiDB Cloud console, go back to the **Database Audit Log Storage Configuration** dialog where you got the TiDB Cloud account ID, and then take the following steps: + +1. In the **Bucket URI** field, enter the URI of your OSS bucket. For example, ``oss://tidb-cloud-audit-log``. +2. In the **Bucket Region** field, select the Alibaba Cloud region where the bucket is located (recommended to match your TiDB instance region). +3. In the **Role ARN** field, paste the Role ARN value copied in [Step 2. Configure the OSS access](#step-2-configure-oss-access). +4. Click **Test Connection** to verify whether TiDB Cloud can access and write to the OSS bucket. + + - If it is successful, **The connection is successful** is displayed. + - If not, check the OSS bucket permissions, RAM role configuration, and policy. + +5. Click **Enable** to activate audit logging for the instance. + + TiDB Cloud is ready to write audit logs for the specified instance to your OSS bucket. + +> **Note:** +> +> - After enabling audit logging, if you make any new changes to the bucket URI or location, you must click **Test Connection** again to verify that TiDB Cloud can connect to the bucket. Then, click **Enable** to apply the changes. +> - To remove TiDB Cloud's access to your OSS bucket, delete the trust policy granted to this instance in the Alibaba Cloud console. + + + +## Specify auditing filter rules + +After enabling audit logging, you must specify auditing filter rules to control which user access events to capture and write to audit logs. If no filter rules are specified, TiDB Cloud does not log anything. + +To specify auditing filter rules for an instance, take the following steps: + +1. On the **DB Audit Logging** page, click **Add Filter Rule** in the **Log Filter Rules** section to add an audit filter rule. + + You can add one audit rule at a time. Each rule specifies a user expression, database expression, table expression, and access type. You can add multiple audit rules to meet your auditing requirements. + +2. In the **Log Filter Rules** section, click **>** to expand and view the list of audit rules you have added. + +> **Note:** +> +> - The filter rules are regular expressions and case-sensitive. If you use the wildcard rule `.*`, all users, databases, or table events in the instance are logged. +> - Because audit logging consumes instance resources, be prudent when specifying filter rules. To minimize the consumption, it is recommended that you specify filter rules to limit the scope of audit logging to specific database objects, users, and actions, where possible. + +## View audit logs + +By default, TiDB Cloud stores database audit log files in your storage service, so you need to read the audit log information from your storage service. + +TiDB Cloud audit logs are readable text files with the instance ID, internal ID, and log creation date incorporated into the fully qualified filenames. + +For example, `13796619446086334065/tidb-5m5z34/tidb-audit-2022-04-21T18-16-29.529.log`. In this example, `13796619446086334065` indicates the instance ID and `tidb-5m5z34` indicates the internal ID. + +## Disable audit logging + +If you no longer want to audit an instance, go to the page of the instance, click **Settings** > **Audit Settings**, and then toggle the audit setting in the upper-right corner to **Disable**. + +> **Note:** +> +> Each time the size of the log file reaches 10 MiB, the log file is pushed to the cloud storage bucket. Therefore, after audit logging is disabled, the log file whose size is smaller than 10 MiB will not be automatically pushed to the cloud storage bucket. To get the log file in this situation, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +## Audit log fields + +For each database event record in audit logs, TiDB provides the following fields: + +> **Note:** +> +> In the following tables, the empty maximum length of a field means that the data type of this field has a well-defined constant length (for example, 4 bytes for INTEGER). + +| Col # | Field name | TiDB data type | Maximum length | Description | +|---|---|---|---|---| +| 1 | N/A | N/A | N/A | Reserved for internal use | +| 2 | N/A | N/A | N/A | Reserved for internal use | +| 3 | N/A | N/A | N/A | Reserved for internal use | +| 4 | ID | INTEGER | | Unique event ID | +| 5 | TIMESTAMP | TIMESTAMP | | Time of event | +| 6 | EVENT_CLASS | VARCHAR | 15 | Event type | +| 7 | EVENT_SUBCLASS | VARCHAR | 15 | Event subtype | +| 8 | STATUS_CODE | INTEGER | | Response status of the statement | +| 9 | COST_TIME | FLOAT | | Time consumed by the statement | +| 10 | HOST | VARCHAR | 16 | Server IP | +| 11 | CLIENT_IP | VARCHAR | 16 | Client IP | +| 12 | USER | VARCHAR | 17 | Login username | +| 13 | DATABASE | VARCHAR | 64 | Event-related database | +| 14 | TABLES | VARCHAR | 64 | Event-related table name | +| 15 | SQL_TEXT | VARCHAR | 64 KB | Masked SQL statement | +| 16 | ROWS | INTEGER | | Number of affected rows (`0` indicates that no rows are affected) | + +Depending on the EVENT_CLASS field value set by TiDB, database event records in audit logs also contain additional fields as follows: + +- If the EVENT_CLASS value is `CONNECTION`, database event records also contain the following fields: + + | Col # | Field name | TiDB data type | Maximum length | Description | + |---|---|---|---|---| + | 17 | CLIENT_PORT | INTEGER | | Client port number | + | 18 | CONNECTION_ID | INTEGER | | Connection ID | + | 19 | CONNECTION_TYPE | VARCHAR | 12 | Connection via `socket` or `unix-socket` | + | 20 | SERVER_ID | INTEGER | | TiDB server ID | + | 21 | SERVER_PORT | INTEGER | | The port that the TiDB server uses to listen to client communication via the MySQL protocol | + | 22 | SERVER_OS_LOGIN_USER | VARCHAR | 17 | The username of the TiDB process startup system | + | 23 | OS_VERSION | VARCHAR | N/A | The version of the operating system where the TiDB server is located | + | 24 | SSL_VERSION | VARCHAR | 6 | The current SSL version of TiDB | + | 25 | PID | INTEGER | | The PID of the TiDB process | + +- If the EVENT_CLASS value is `TABLE_ACCESS` or `GENERAL`, database event records also contain the following fields: + + | Col # | Field name | TiDB data type | Maximum length | Description | + |---|---|---|---|---| + | 17 | CONNECTION_ID | INTEGER | | Connection ID | + | 18 | COMMAND | VARCHAR | 14 | The command type of the MySQL protocol | + | 19 | SQL_STATEMENT | VARCHAR | 17 | The SQL statement type | + | 20 | PID | INTEGER | | The PID of the TiDB process | diff --git a/tidb-cloud/premium/tidb-cloud-billing-ticdc-ccu.md b/tidb-cloud/premium/tidb-cloud-billing-ticdc-ccu.md new file mode 100644 index 0000000000000..70be0ec5bd7c6 --- /dev/null +++ b/tidb-cloud/premium/tidb-cloud-billing-ticdc-ccu.md @@ -0,0 +1,47 @@ +--- +title: Changefeed Billing for {{{ .premium }}} +summary: Learn about billing for changefeeds in {{{ .premium }}}. +--- + +# Changefeed Billing for {{{ .premium }}} + +This document describes the billing details for changefeeds in {{{ .premium }}}. + +## CCU cost + +{{{ .premium }}} measures the capacity of [changefeeds](/tidb-cloud/changefeed-overview.md) in TiCDC Changefeed Capacity Units (CCUs). When you [create a changefeed](/tidb-cloud/changefeed-overview.md#create-a-changefeed) for an instance, you can select an appropriate specification. The higher the CCU, the better the replication performance. You will be charged for these TiCDC CCUs. + +### Number of TiCDC CCUs + +The following table lists the specifications and corresponding replication performances for changefeeds: + +| Specification | Maximum replication performance | +|---------------|---------------------------------| +| 2 CCUs | 5,000 rows/s | +| 4 CCUs | 10,000 rows/s | +| 8 CCUs | 20,000 rows/s | +| 16 CCUs | 40,000 rows/s | +| 24 CCUs | 60,000 rows/s | +| 32 CCUs | 80,000 rows/s | +| 40 CCUs | 100,000 rows/s | +| 64 CCUs | 160,000 rows/s | +| 96 CCUs | 240,000 rows/s | +| 128 CCUs | 320,000 rows/s | +| 192 CCUs | 480,000 rows/s | +| 256 CCUs | 640,000 rows/s | +| 320 CCUs | 800,000 rows/s | +| 384 CCUs | 960,000 rows/s | + +> **Note:** +> +> The preceding performance data is for reference only and might vary in different scenarios. It is strongly recommended that you conduct a real workload test before using the changefeed feature in a production environment. For further assistance, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). + +### Price + +Currently, {{{ .premium }}} is in private preview. You can [contact our sales](https://www.pingcap.com/contact-us/) for pricing details. + +## Private Data Link cost + +If you choose the **Private Link** or **Private Service Connect** network connectivity method, additional **Private Data Link** costs will be incurred. These charges fall under the [Data Transfer Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#data-transfer-cost) category. + +The price of **Private Data Link** is **$0.01/GiB**, the same as **Data Processed** of [AWS Interface Endpoint pricing](https://aws.amazon.com/privatelink/pricing/#Interface_Endpoint_pricing), **Consumer data processing** of [Google Cloud Private Service Connect pricing](https://cloud.google.com/vpc/pricing#psc-forwarding-rules), and **Inbound/Outbound Data Processed** of [Azure Private Link pricing](https://azure.microsoft.com/en-us/pricing/details/private-link/). diff --git a/tidb-cloud/premium/tidb-cloud-tls-connect-to-premium.md b/tidb-cloud/premium/tidb-cloud-tls-connect-to-premium.md new file mode 100644 index 0000000000000..c5bd1ce12e018 --- /dev/null +++ b/tidb-cloud/premium/tidb-cloud-tls-connect-to-premium.md @@ -0,0 +1,55 @@ +--- +title: TLS Connections to {{{ .premium }}} +summary: Introduce TLS connections in {{{ .premium }}}. +--- + +# TLS Connections to {{{ .premium }}} + +On TiDB Cloud, establishing TLS connections is one of the basic security practices for connecting to {{{ .premium }}} instances. You can configure multiple TLS connections from your client, application, and development tools to your {{{ .premium }}} instance to protect data transmission security. For security reasons, {{{ .premium }}} only supports TLS 1.2 and TLS 1.3, and does not support TLS 1.0 or TLS 1.1. + +To ensure data security, the Certificate Authority (CA) certificate for your {{{ .premium }}} instance is hosted on [AWS Private Certificate Authority](https://aws.amazon.com/private-ca/). The private key of the CA certificate is stored in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. + +## Prerequisites + +- Log in to TiDB Cloud via [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) or [SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md), and then [Create a {{{ .premium }}} instance](/tidb-cloud/premium/create-tidb-instance-premium.md). + +- Set a password to access your instance in secure settings. + + To do so, you can navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, click **...** in the row of your {{{ .premium }}} instance, and then select **Change Root Password**. In password settings, you can click **Auto-generate Password** to automatically generate a root password with a length of 16 characters, including numbers, uppercase and lowercase characters, and special characters. + +## Secure connection to a {{{ .premium }}} instance + +In the [TiDB Cloud console](https://tidbcloud.com/), you can get examples of different connection methods and connect to your {{{ .premium }}} instance as follows: + +1. Navigate to the [**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your {{{ .premium }}} instance to go to its overview page. + +2. Click **Connect** in the upper-right corner. A dialog is displayed. + +3. In the connection dialog, select **Public** from the **Connection Type** drop-down list. + + If you have not configured the IP access list, click **Configure IP Access List** to configure it before your first connection. For more information, see [Configure an IP access list](/tidb-cloud/premium/configure-ip-access-list-premium.md). + +4. Click **CA cert** to download CA cert for TLS connection to TiDB instances. The CA cert supports TLS 1.2 by default. + + > **Note:** + > + > - You can store the downloaded CA cert in the default storage path of your operating system, or specify another storage path. You need to replace the CA cert path in the code example with your own CA cert path in the subsequent steps. + > - {{{ .premium }}} does not force clients to use TLS connections, and user-defined configuration of the [`require_secure_transport`](/system-variables.md#require_secure_transport-new-in-v610) variable is currently not supported on {{{ .premium }}}. + +5. Choose your preferred connection method, and then refer to the connection string and sample code on the tab to connect to your instance. + +## Manage root certificates for {{{ .premium }}} + +{{{ .premium }}} uses certificates from [AWS Private Certificate Authority](https://aws.amazon.com/private-ca/) as a CA for TLS connections between clients and {{{ .premium }}} instances. Usually, the private key of the CA certificate is stored securely in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. + +## FAQs + +### Which TLS versions are supported to connect to my {{{ .premium }}} instance? + +For security reasons, {{{ .premium }}} only supports TLS 1.2 and TLS 1.3, and does not support TLS 1.0 or TLS 1.1. See IETF [Deprecating TLS 1.0 and TLS 1.1](https://datatracker.ietf.org/doc/rfc8996/) for details. + +### Is two-way TLS authentication between my client and {{{ .premium }}} supported? + +No. + +{{{ .premium }}} only supports one-way TLS authentication, and does not support two-way TLS authentication currently. If you need two-way TLS authentication, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). diff --git a/tidb-cloud/releases/_index.md b/tidb-cloud/releases/_index.md new file mode 100644 index 0000000000000..8aa2b1df1a0af --- /dev/null +++ b/tidb-cloud/releases/_index.md @@ -0,0 +1,16 @@ +--- +title: TiDB Cloud Releases +summary: Learn about TiDB Cloud release notes and maintenance notifications. +--- + +# TiDB Cloud Releases + +[TiDB Cloud](https://www.pingcap.com/tidb/cloud/) is a fully managed Database-as-a-Service (DBaaS) that brings [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source Hybrid Transactional and Analytical Processing (HTAP) database, to your cloud. TiDB Cloud offers an easy way to deploy and manage databases to let you focus on your applications, not the complexities of databases. This document provides an overview of TiDB Cloud release notes and maintenance notifications. + +## Release notes + +TiDB Cloud release notes provide information about new features and improvements in each release. For detailed release notes, see [TiDB Cloud Release Notes](/tidb-cloud/releases/tidb-cloud-release-notes.md). + +## Maintenance notifications + +TiDB Cloud maintenance notifications provide information about scheduled maintenance activities that might affect your TiDB Cloud services. diff --git a/tidb-cloud/notification-2023-08-31-console-maintenance.md b/tidb-cloud/releases/notification-2023-08-31-console-maintenance.md similarity index 100% rename from tidb-cloud/notification-2023-08-31-console-maintenance.md rename to tidb-cloud/releases/notification-2023-08-31-console-maintenance.md diff --git a/tidb-cloud/notification-2023-09-26-console-maintenance.md b/tidb-cloud/releases/notification-2023-09-26-console-maintenance.md similarity index 92% rename from tidb-cloud/notification-2023-09-26-console-maintenance.md rename to tidb-cloud/releases/notification-2023-09-26-console-maintenance.md index 5e00966b1da98..ed1305080f35b 100644 --- a/tidb-cloud/notification-2023-09-26-console-maintenance.md +++ b/tidb-cloud/releases/notification-2023-09-26-console-maintenance.md @@ -20,7 +20,7 @@ This notification describes the details that you need to know about the [TiDB Cl ## Reason for maintenance -We're upgrading the management infrastructure of the TiDB Cloud Serverless to enhance performance and efficiency, delivering a better experience for all users. This is part of our ongoing commitment to providing high-quality services. +We're upgrading the management infrastructure of the {{{ .starter }}} to enhance performance and efficiency, delivering a better experience for all users. This is part of our ongoing commitment to providing high-quality services. ## Impact diff --git a/tidb-cloud/notification-2023-11-14-scale-feature-maintenance.md b/tidb-cloud/releases/notification-2023-11-14-scale-feature-maintenance.md similarity index 100% rename from tidb-cloud/notification-2023-11-14-scale-feature-maintenance.md rename to tidb-cloud/releases/notification-2023-11-14-scale-feature-maintenance.md diff --git a/tidb-cloud/notification-2024-04-09-monitoring-features-maintenance.md b/tidb-cloud/releases/notification-2024-04-09-monitoring-features-maintenance.md similarity index 98% rename from tidb-cloud/notification-2024-04-09-monitoring-features-maintenance.md rename to tidb-cloud/releases/notification-2024-04-09-monitoring-features-maintenance.md index 111d47fcdb34b..acc17d4d89800 100644 --- a/tidb-cloud/notification-2024-04-09-monitoring-features-maintenance.md +++ b/tidb-cloud/releases/notification-2024-04-09-monitoring-features-maintenance.md @@ -30,7 +30,7 @@ During the maintenance window, the monitoring features in the following regions - Cloud Provider: Google Cloud, Region: Iowa (us-central1) - Cloud Provider: Google Cloud, Region: Taiwan (asia-east1) -- TiDB Cloud Serverless clusters: +- {{{ .starter }}} clusters: - Cloud Provider: AWS, Region: Frankfurt (eu-central-1) - Cloud Provider: AWS, Region: Oregon (us-west-2) diff --git a/tidb-cloud/notification-2024-04-11-dm-feature-maintenance.md b/tidb-cloud/releases/notification-2024-04-11-dm-feature-maintenance.md similarity index 100% rename from tidb-cloud/notification-2024-04-11-dm-feature-maintenance.md rename to tidb-cloud/releases/notification-2024-04-11-dm-feature-maintenance.md diff --git a/tidb-cloud/notification-2024-04-16-monitoring-features-maintenance.md b/tidb-cloud/releases/notification-2024-04-16-monitoring-features-maintenance.md similarity index 98% rename from tidb-cloud/notification-2024-04-16-monitoring-features-maintenance.md rename to tidb-cloud/releases/notification-2024-04-16-monitoring-features-maintenance.md index a83d40080a648..17d92e1d1b976 100644 --- a/tidb-cloud/notification-2024-04-16-monitoring-features-maintenance.md +++ b/tidb-cloud/releases/notification-2024-04-16-monitoring-features-maintenance.md @@ -23,7 +23,7 @@ During the maintenance window, the monitoring features in the following regions - Cloud Provider: AWS, Region: Tokyo (ap-northeast-1) - Cloud Provider: AWS, Region: N. Virginia (us-east-1) -- TiDB Cloud Serverless clusters: +- {{{ .starter }}} clusters: - Cloud Provider: AWS, Region: Tokyo (ap-northeast-1) - Cloud Provider: AWS, Region: N. Virginia (us-east-1) diff --git a/tidb-cloud/notification-2024-04-18-dm-feature-maintenance.md b/tidb-cloud/releases/notification-2024-04-18-dm-feature-maintenance.md similarity index 100% rename from tidb-cloud/notification-2024-04-18-dm-feature-maintenance.md rename to tidb-cloud/releases/notification-2024-04-18-dm-feature-maintenance.md diff --git a/tidb-cloud/notification-2024-09-15-console-maintenance.md b/tidb-cloud/releases/notification-2024-09-15-console-maintenance.md similarity index 100% rename from tidb-cloud/notification-2024-09-15-console-maintenance.md rename to tidb-cloud/releases/notification-2024-09-15-console-maintenance.md diff --git a/tidb-cloud/release-notes-2020.md b/tidb-cloud/releases/release-notes-2020.md similarity index 100% rename from tidb-cloud/release-notes-2020.md rename to tidb-cloud/releases/release-notes-2020.md diff --git a/tidb-cloud/release-notes-2021.md b/tidb-cloud/releases/release-notes-2021.md similarity index 97% rename from tidb-cloud/release-notes-2021.md rename to tidb-cloud/releases/release-notes-2021.md index c99c9d36a8811..e1365317b6f79 100644 --- a/tidb-cloud/release-notes-2021.md +++ b/tidb-cloud/releases/release-notes-2021.md @@ -40,7 +40,7 @@ Bug fixes: ## November 8, 2021 -* Launch [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless), which offers you a one-year free trial of TiDB Cloud +* Launch [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter), which offers you a one-year free trial of TiDB Cloud Each Developer Tier cluster is a full-featured TiDB cluster and comes with the following: diff --git a/tidb-cloud/release-notes-2022.md b/tidb-cloud/releases/release-notes-2022.md similarity index 94% rename from tidb-cloud/release-notes-2022.md rename to tidb-cloud/releases/release-notes-2022.md index e279309a37fd2..7e85f66d6c712 100644 --- a/tidb-cloud/release-notes-2022.md +++ b/tidb-cloud/releases/release-notes-2022.md @@ -11,13 +11,13 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Currently, after upgrading the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.3.0](https://docs-archive.pingcap.com/tidb/v6.3/release-6.3.0) to [v6.4.0](https://docs-archive.pingcap.com/tidb/v6.4/release-6.4.0), the cold start becomes slower in certain circumstances. So we roll back the default TiDB version of all Serverless Tier clusters from v6.4.0 to v6.3.0, then fix the problem as soon as possible, and upgrade it later again. +- Currently, after upgrading the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.3.0](https://docs.pingcap.com/tidb/stable/release-6.3.0) to [v6.4.0](https://docs.pingcap.com/tidb/stable/release-6.4.0), the cold start becomes slower in certain circumstances. So we roll back the default TiDB version of all Serverless Tier clusters from v6.4.0 to v6.3.0, then fix the problem as soon as possible, and upgrade it later again. ## December 27, 2022 **General changes** -- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.3.0](https://docs-archive.pingcap.com/tidb/v6.3/release-6.3.0) to [v6.4.0](https://docs-archive.pingcap.com/tidb/v6.4/release-6.4.0). +- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.3.0](https://docs.pingcap.com/tidb/stable/release-6.3.0) to [v6.4.0](https://docs.pingcap.com/tidb/stable/release-6.4.0). - The point-in-time recovery (PITR) for Dedicated Tier clusters is now in General Availability (GA). @@ -110,7 +110,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c No matter whether you are new to TiDB Cloud or you already have a TiDB Cloud account, now you can link with your AWS or GCP billing account, which makes it easier to complete AWS or GCP Marketplace subscriptions. - For how to make the link, see [Billing from AWS Marketplace or Google Cloud Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-aws-marketplace-azure-marketplace-or-google-cloud-marketplace). + For how to make the link, see [Billing from Cloud Provider Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-cloud-provider-marketplace). ## November 22, 2022 @@ -169,7 +169,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -* Developer Tier is upgraded to [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). Serverless Tier, a fully-managed, auto-scaling deployment of TiDB, is now available. It is still in beta and free to use. +* Developer Tier is upgraded to [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter). Serverless Tier, a fully-managed, auto-scaling deployment of TiDB, is now available. It is still in beta and free to use. * A Serverless Tier cluster still contains fully functional HTAP ability as Dedicated Tier clusters. * Serverless Tier offers you faster cluster creation time and instantaneous cold start time. Compared with Developer Tier, the creation time reduces from minutes to seconds. @@ -247,7 +247,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -* Upgrade the default TiDB version of new [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.2.0](https://docs-archive.pingcap.com/tidb/v6.2/release-6.2.0) to [v6.3.0](https://docs-archive.pingcap.com/tidb/v6.3/release-6.3.0). +* Upgrade the default TiDB version of new [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.2.0](https://docs.pingcap.com/tidb/stable/release-6.2.0) to [v6.3.0](https://docs.pingcap.com/tidb/stable/release-6.3.0). **Console changes** @@ -292,7 +292,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c * The TiDB Cloud API (beta) is now available to all users. - You can start using the API by creating an API key in the TiDB Cloud console. For more information, refer to [API documentation](/tidb-cloud/api-overview.md). + You can start using the API by creating an API key in the TiDB Cloud console. For more information, refer to [API documentation](https://docs.pingcap.com/api/tidb-cloud-api-overview). ## September 15, 2022 @@ -310,7 +310,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c In the new design, the entrances of upgrade to Dedicated Tier, cluster connection, and data import are highlighted. -* Introduce Playground for [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +* Introduce Playground for [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. Playground contains a pre-loaded dataset of GitHub events, which allows you to get started with TiDB Cloud by running queries instantly, without importing your data or connecting to a client. @@ -340,7 +340,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **API changes** -* Support increasing the storage of a TiKV or TiFlash node through the [TiDB Cloud API](/tidb-cloud/api-overview.md). You can use the `storage_size_gib` field of the API endpoint to do the scaling. +* Support increasing the storage of a TiKV or TiFlash node through the [TiDB Cloud API](https://docs.pingcap.com/api/tidb-cloud-api-overview). You can use the `storage_size_gib` field of the API endpoint to do the scaling. Currently, TiDB Cloud API is still in beta and only available upon request. @@ -374,7 +374,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -* Upgrade the default TiDB version of new [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.1.0](https://docs.pingcap.com/tidb/stable/release-6.1.0) to [v6.2.0](https://docs-archive.pingcap.com/tidb/v6.2/release-6.2.0). +* Upgrade the default TiDB version of new [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.1.0](https://docs.pingcap.com/tidb/stable/release-6.1.0) to [v6.2.0](https://docs.pingcap.com/tidb/stable/release-6.2.0). **API changes** @@ -441,19 +441,19 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c ## July 26, 2022 -* Support automatic hibernation and resuming for new [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +* Support automatic hibernation and resuming for new [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#starter). A Developer Tier cluster will not be deleted after 7 days of inactivity so you can still use it at any time until the one-year free trial ends. After 24 hours of inactivity, the Developer Tier cluster will hibernate automatically. To resume the cluster, either send a new connection to the cluster or click the **Resume** button in the TiDB Cloud console. The cluster will be resumed within 50 seconds and back to service automatically. -* Add a user name prefix limitation for new [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +* Add a user name prefix limitation for new [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#starter). Whenever you use or set a database user name, you must include the prefix for your cluster in the user name. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). -* Disable the backup and restore feature for [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +* Disable the backup and restore feature for [Developer Tier clusters](/tidb-cloud/select-cluster-tier.md#starter). The backup and restore feature (including both automatic backup and manual backup) is disabled for Developer Tier clusters. You can still use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export your data as a backup. -* Increase the storage size of a [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster from 500 MiB to 1 GiB. +* Increase the storage size of a [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster from 500 MiB to 1 GiB. * Add breadcrumbs to the TiDB Cloud console to improve the navigation experience. * Support configuring multiple filter rules when you import data into TiDB Cloud. * Remove the **Traffic Filters** page from **Project Settings**, and remove the **Add Rules from Default Set** button from the **Connect to TiDB** dialog. @@ -526,7 +526,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c ## May 19, 2022 -* Add the support of the AWS region `Frankfurt` for [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster creation. +* Add the support of the AWS region `Frankfurt` for [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster creation. ## May 18, 2022 @@ -544,13 +544,13 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c ## April 7, 2022 -* Upgrade TiDB Cloud to [TiDB v6.0.0](https://docs-archive.pingcap.com/tidb/v6.0/release-6.0.0-dmr) for Developer Tier. +* Upgrade TiDB Cloud to [TiDB v6.0.0](https://docs.pingcap.com/tidb/stable/release-6.0.0-dmr) for Developer Tier. ## March 31, 2022 TiDB Cloud is now in General Availability. You can [sign up](https://tidbcloud.com/signup) and select one of the following options: -* Get started with [Developer Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) for free. +* Get started with [Developer Tier](/tidb-cloud/select-cluster-tier.md#starter) for free. * Contact us to apply for a 14-day PoC trial for free. * Get full access with [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). diff --git a/tidb-cloud/release-notes-2023.md b/tidb-cloud/releases/release-notes-2023.md similarity index 92% rename from tidb-cloud/release-notes-2023.md rename to tidb-cloud/releases/release-notes-2023.md index c464fdb824131..c6414224482d7 100644 --- a/tidb-cloud/release-notes-2023.md +++ b/tidb-cloud/releases/release-notes-2023.md @@ -17,7 +17,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Enhance the connection experience for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless). +- Enhance the connection experience for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter). Refine the **Connect** dialog interface to offer TiDB Cloud Serverless users a smoother and more efficient connection experience. In addition, TiDB Cloud Serverless introduces more client types and allows you to select the desired branch for connection. @@ -35,9 +35,9 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) supports monitoring SQL statement RU costs. +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) supports monitoring SQL statement RU costs. - TiDB Cloud Serverless now provides detailed insights into each SQL statement's [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit). You can view both the **Total RU** and **Mean RU** costs per SQL statement. This feature helps you identify and analyze RU costs, offering opportunities for potential cost savings in your operations. + TiDB Cloud Serverless now provides detailed insights into each SQL statement's [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru). You can view both the **Total RU** and **Mean RU** costs per SQL statement. This feature helps you identify and analyze RU costs, offering opportunities for potential cost savings in your operations. To check your SQL statement RU details, navigate to the **Diagnosis** page of [your TiDB Cloud Serverless cluster](https://tidbcloud.com/project/clusters) and then click the **SQL Statement** tab. @@ -124,7 +124,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c - Support using TiDB Cloud Serverless branches in [Vercel Preview Deployments](https://vercel.com/docs/deployments/preview-deployments), with TiDB Cloud Vercel integration. - For more information, see [Connect with TiDB Cloud Serverless branching](/tidb-cloud/integrate-tidbcloud-with-vercel.md#connect-with-tidb-cloud-serverless-branching). + For more information, see [Connect with TiDB Cloud Serverless branching](/tidb-cloud/integrate-tidbcloud-with-vercel.md#connect-with-branching). ## September 28, 2023 @@ -132,7 +132,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c - Introduce a TiDB Cloud Billing API endpoint to retrieve the bill for the given month of a specific organization. - This Billing API endpoint is released in TiDB Cloud API v1beta1, which is the latest API version of TiDB Cloud. For more information, refer to the [API documentation (v1beta1)](https://docs.pingcap.com/tidbcloud/api/v1beta1/billing). + This Billing API endpoint is released in TiDB Cloud API v1beta1, which is the latest API version of TiDB Cloud. For more information, refer to the [API documentation (v1beta1)](https://docs.pingcap.com/tidbcloud/api/v1beta1#tag/Billing). ## September 19, 2023 @@ -142,15 +142,15 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c The 2 vCPU option is no longer available on the **Create Cluster** page or the **Modify Cluster** page. -- Release [TiDB Cloud serverless driver (beta)](/tidb-cloud/serverless-driver.md) for JavaScript. +- Release [TiDB Cloud serverless driver (beta)](/develop/serverless-driver.md) for JavaScript. - TiDB Cloud serverless driver for JavaScript allows you to connect to your [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster over HTTPS. It is particularly useful in edge environments where TCP connections are limited, such as [Vercel Edge Function](https://vercel.com/docs/functions/edge-functions) and [Cloudflare Workers](https://workers.cloudflare.com/). + TiDB Cloud serverless driver for JavaScript allows you to connect to your [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) cluster over HTTPS. It is particularly useful in edge environments where TCP connections are limited, such as [Vercel Edge Function](https://vercel.com/docs/functions/edge-functions) and [Cloudflare Workers](https://workers.cloudflare.com/). - For more information, see [TiDB Cloud serverless driver (beta)](/tidb-cloud/serverless-driver.md). + For more information, see [TiDB Cloud serverless driver (beta)](/develop/serverless-driver.md). **Console changes** -- For [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, you can get an estimation of cost in the **Usage This Month** panel or while setting up the spending limit. +- For [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters, you can get an estimation of cost in the **Usage This Month** panel or while setting up the spending limit. ## September 5, 2023 @@ -170,7 +170,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Introduce the **Events** page for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, which provides the records of main changes to your cluster. +- Introduce the **Events** page for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters, which provides the records of main changes to your cluster. On this page, you can view the event history for the last 7 days and track important details such as the trigger time and the user who initiated an action. @@ -306,7 +306,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.3](https://docs.pingcap.com/tidb/v6.5/release-6.5.3) to [v7.1.1](https://docs.pingcap.com/tidb/v7.1/release-7.1.1). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.3](https://docs.pingcap.com/tidb/stable/release-6.5.3) to [v7.1.1](https://docs.pingcap.com/tidb/stable/release-7.1.1). **Console changes** @@ -348,7 +348,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) now is Generally Available. +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) now is Generally Available. - Introduce TiDB Bot (beta), an OpenAI-powered chatbot that offers multi-language support, 24/7 real-time response, and integrated documentation access. @@ -360,7 +360,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c To use TiDB Bot, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and select **Ask TiDB Bot** to start a chat. -- Support [the branching feature (beta)](/tidb-cloud/branch-overview.md) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Support [the branching feature (beta)](/tidb-cloud/branch-overview.md) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. TiDB Cloud lets you create branches for TiDB Cloud Serverless clusters. A branch for a cluster is a separate instance that contains a diverged copy of data from the original cluster. It provides an isolated environment, allowing you to connect to it and experiment freely without worrying about affecting the original cluster. @@ -374,7 +374,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Support point-in-time recovery (PITR) (beta) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Support point-in-time recovery (PITR) (beta) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. You can now restore your TiDB Cloud Serverless cluster to any point in time within the last 90 days. This feature enhances the data recovery capability of TiDB Cloud Serverless clusters. For example, you can use PITR when data write errors occur and you want to restore the data to an earlier state. @@ -382,7 +382,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Enhance the **Usage This Month** panel on the cluster overview page for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters to provide a clearer view of your current resource usage. +- Enhance the **Usage This Month** panel on the cluster overview page for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters to provide a clearer view of your current resource usage. - Enhance the overall navigation experience by making the following changes: @@ -396,13 +396,13 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Remove the pre-built sample dataset for newly created [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Remove the pre-built sample dataset for newly created [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. ## June 20, 2023 **General changes** -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.2](https://docs.pingcap.com/tidb/v6.5/release-6.5.2) to [v6.5.3](https://docs.pingcap.com/tidb/v6.5/release-6.5.3). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.2](https://docs.pingcap.com/tidb/stable/release-6.5.2) to [v6.5.3](https://docs.pingcap.com/tidb/stable/release-6.5.3). ## June 13, 2023 @@ -420,7 +420,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Size your cluster](/tidb-cloud/size-your-cluster.md). -- Extend the [monitoring metrics retention period](/tidb-cloud/built-in-monitoring.md#metrics-retention-policy) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from 3 days to 7 days. +- Extend the [monitoring metrics retention period](/tidb-cloud/built-in-monitoring.md#metrics-retention-policy) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters from 3 days to 7 days. By extending the metrics retention period, now you have access to more historical data. This helps you identify trends and patterns of the cluster for better decision-making and faster troubleshooting. @@ -537,7 +537,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md) and [Import Sample Data](/tidb-cloud/import-sample-data.md). -- Support AWS PrivateLink-powered endpoint connection as a new network access management option for TiDB Cloud [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Support AWS PrivateLink-powered endpoint connection as a new network access management option for TiDB Cloud [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. The private endpoint connection does not expose your data to the public internet. In addition, the endpoint connection supports CIDR overlap and is easier for network management. @@ -549,7 +549,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c To get a full list of the events that can be recorded, see [Logged events](/tidb-cloud/tidb-cloud-events.md#logged-events). -- Introduce the **SQL Statement** tab on the [**SQL Diagnosis**](/tidb-cloud/tune-performance.md) page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Introduce the **SQL Statement** tab on the [**SQL Diagnosis**](/tidb-cloud/tune-performance.md) page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. The **SQL Statement** tab provides the following: @@ -563,7 +563,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Support directly accessing the [Data Service endpoint](/tidb-cloud/tidb-cloud-glossary.md#endpoint) in the region where a TiDB [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster is located. +- Support directly accessing the [Data Service endpoint](/tidb-cloud/tidb-cloud-glossary.md#endpoint) in the region where a TiDB [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster is located. For newly created Serverless Tier clusters, the endpoint URL now includes the cluster region information. By requesting the regional domain `.data.tidbcloud.com`, you can directly access the endpoint in the region where the TiDB cluster is located. @@ -575,22 +575,22 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- For the first five [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters in your organization, TiDB Cloud provides a free usage quota for each of them as follows: +- For the first five [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters in your organization, TiDB Cloud provides a free usage quota for each of them as follows: - Row storage: 5 GiB - - [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit): 50 million RUs per month + - [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru): 50 million RUs per month Until May 31, 2023, Serverless Tier clusters are still free, with a 100% discount off. After that, usage beyond the free quota will be charged. - You can easily [monitor your cluster usage or increase your usage quota](/tidb-cloud/manage-serverless-spend-limit.md#manage-spending-limit-for-tidb-cloud-serverless-scalable-clusters) in the **Usage This Month** area of your cluster **Overview** page. Once the free quota of a cluster is reached, the read and write operations on this cluster will be throttled until you increase the quota or the usage is reset upon the start of a new month. + You can easily [monitor your cluster usage or increase your usage quota](/tidb-cloud/manage-serverless-spend-limit.md) in the **Usage This Month** area of your cluster **Overview** page. Once the free quota of a cluster is reached, the read and write operations on this cluster will be throttled until you increase the quota or the usage is reset upon the start of a new month. - For more information about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [TiDB Cloud Serverless Tier Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details). + For more information about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [TiDB Cloud Serverless Tier Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details). -- Support backup and restore for TiDB Cloud [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Support backup and restore for TiDB Cloud [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. For more information, see [Back up and Restore TiDB Cluster Data](/tidb-cloud/backup-and-restore-serverless.md). -- Upgrade the default TiDB version of new [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.1](https://docs.pingcap.com/tidb/v6.5/release-6.5.1) to [v6.5.2](https://docs.pingcap.com/tidb/v6.5/release-6.5.2). +- Upgrade the default TiDB version of new [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.1](https://docs.pingcap.com/tidb/stable/release-6.5.1) to [v6.5.2](https://docs.pingcap.com/tidb/stable/release-6.5.2). - Provide a maintenance window feature to enable you to easily schedule and manage planned maintenance activities for [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. @@ -729,7 +729,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Scale a changefeed](/tidb-cloud/changefeed-overview.md#scale-a-changefeed). -- Support replicating incremental data in real-time from a [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster in AWS to a [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster in the same project and same region. +- Support replicating incremental data in real-time from a [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster in AWS to a [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster in the same project and same region. For more information, see [Sink to TiDB Cloud](/tidb-cloud/changefeed-sink-to-tidb-cloud.md). @@ -741,7 +741,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Release a new native web infrastructure for the [Slow Query](/tidb-cloud/tune-performance.md#slow-query) page of [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Release a new native web infrastructure for the [Slow Query](/tidb-cloud/tune-performance.md#slow-query) page of [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. With this new infrastructure, you can easily navigate through the [Slow Query](/tidb-cloud/tune-performance.md#slow-query) page and access the necessary information in a more intuitive and efficient manner. The new infrastructure also resolves many problems on UX, making the SQL diagnosis process more user-friendly. @@ -749,7 +749,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Introduce [Data Service (beta)](https://tidbcloud.com/project/data-service) for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, which enables you to access data via an HTTPS request using a custom API endpoint. +- Introduce [Data Service (beta)](https://tidbcloud.com/project/data-service) for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters, which enables you to access data via an HTTPS request using a custom API endpoint. With Data Service, you can seamlessly integrate TiDB Cloud with any application or service that is compatible with HTTPS. The following are some common scenarios: @@ -784,7 +784,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [TiDB Cloud cluster events](/tidb-cloud/tidb-cloud-events.md). -- Add the **Database Status** tab to the **Monitoring** page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, which displays the following database-level metrics: +- Add the **Database Status** tab to the **Monitoring** page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters, which displays the following database-level metrics: - QPS Per DB - Average Query Duration Per DB @@ -798,11 +798,11 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the default TiDB version of new [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.0](https://docs.pingcap.com/tidb/v6.5/release-6.5.0) to [v6.5.1](https://docs.pingcap.com/tidb/v6.5/release-6.5.1). +- Upgrade the default TiDB version of new [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.5.0](https://docs.pingcap.com/tidb/stable/release-6.5.0) to [v6.5.1](https://docs.pingcap.com/tidb/stable/release-6.5.1). - Support modifying column names of the target table to be created by TiDB Cloud when uploading a local CSV file with a header row. - When importing a local CSV file with a header row to a [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster, if you need TiDB Cloud to create the target table and the column names in the header row do not follow the TiDB Cloud column naming conventions, you will see a warning icon next to the corresponding column name. To resolve the warning, you can move the cursor over the icon and follow the message to edit the existing column names or enter new column names. + When importing a local CSV file with a header row to a [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster, if you need TiDB Cloud to create the target table and the column names in the header row do not follow the TiDB Cloud column naming conventions, you will see a warning icon next to the corresponding column name. To resolve the warning, you can move the cursor over the icon and follow the message to edit the existing column names or enter new column names. For information about column naming conventions, see [Import local files](/tidb-cloud/tidb-cloud-import-local-files.md#import-local-files). @@ -810,13 +810,13 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.4.0](https://docs.pingcap.com/tidb/v6.4/release-6.4.0) to [v6.6.0](https://docs.pingcap.com/tidb/v6.6/release-6.6.0). +- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.4.0](https://docs.pingcap.com/tidb/stable/release-6.4.0) to [v6.6.0](https://docs.pingcap.com/tidb/stable/release-6.6.0). ## February 28, 2023 **General changes** -- Add the [SQL Diagnosis](/tidb-cloud/tune-performance.md) feature for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Add the [SQL Diagnosis](/tidb-cloud/tune-performance.md) feature for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. With SQL Diagnosis, you can gain deep insights into SQL-related runtime status, which makes the SQL performance tuning more efficient. Currently, the SQL Diagnosis feature for Serverless Tier only provides slow query data. @@ -880,7 +880,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Release a new native web infrastructure on the Monitoring page of [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Release a new native web infrastructure on the Monitoring page of [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. With the new infrastructure, you can easily navigate through the Monitoring page and access the necessary information in a more intuitive and efficient manner. The new infrastructure also resolves many problems on UX, making the monitoring process a lot more user-friendly. @@ -902,7 +902,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Introduce the **Monitoring** page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Introduce the **Monitoring** page for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. The **Monitoring** page provides a range of metrics and data, such as the number of SQL statements executed per second, the average duration of queries, and the number of failed queries, which helps you better understand the overall performance of SQL statements in your Serverless Tier cluster. @@ -930,7 +930,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c - Upgrade the default TiDB version of new [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v6.1.3](https://docs.pingcap.com/tidb/stable/release-6.1.3) to [v6.5.0](https://docs.pingcap.com/tidb/stable/release-6.5.0). -- For new sign-up users, TiDB Cloud will automatically create a free [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster so that you can quickly start a data exploration journey with TiDB Cloud. +- For new sign-up users, TiDB Cloud will automatically create a free [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) cluster so that you can quickly start a data exploration journey with TiDB Cloud. - Support a new AWS region for [Dedicated Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters: `Seoul (ap-northeast-2)`. @@ -944,7 +944,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Optimize the feature of importing data from local CSV files to TiDB to improve the user experience for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +- Optimize the feature of importing data from local CSV files to TiDB to improve the user experience for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters. - To upload a CSV file, now you can simply drag and drop it to the upload area on the **Import** page. - When creating an import task, if your target database or table does not exist, you can enter a name to let TiDB Cloud create it for you automatically. For the target table to be created, you can specify a primary key or select multiple fields to form a composite primary key. @@ -965,7 +965,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **Console changes** -- Rename SQL Editor (beta) to Chat2Query (beta) for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters and support generating SQL queries using AI. +- Rename SQL Editor (beta) to Chat2Query (beta) for [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters and support generating SQL queries using AI. In Chat2Query, you can either let AI generate SQL queries automatically or write SQL queries manually, and run SQL queries against databases without a terminal. @@ -991,7 +991,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Use Grafana GUI dashboards to visualize the metrics](/tidb-cloud/monitor-prometheus-and-grafana-integration.md#step-3-use-grafana-gui-dashboards-to-visualize-the-metrics). -- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.3.0](https://docs.pingcap.com/tidb/v6.3/release-6.3.0) to [v6.4.0](https://docs.pingcap.com/tidb/v6.4/release-6.4.0). The cold start issue after upgrading the default TiDB version of Serverless Tier clusters to v6.4.0 has been resolved. +- Upgrade the default TiDB version of all [Serverless Tier](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.3.0](https://docs.pingcap.com/tidb/stable/release-6.3.0) to [v6.4.0](https://docs.pingcap.com/tidb/stable/release-6.4.0). The cold start issue after upgrading the default TiDB version of Serverless Tier clusters to v6.4.0 has been resolved. **Console changes** diff --git a/tidb-cloud/release-notes-2024.md b/tidb-cloud/releases/release-notes-2024.md similarity index 87% rename from tidb-cloud/release-notes-2024.md rename to tidb-cloud/releases/release-notes-2024.md index 50480e54047e2..1d91781a79072 100644 --- a/tidb-cloud/release-notes-2024.md +++ b/tidb-cloud/releases/release-notes-2024.md @@ -29,16 +29,14 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c - Introduce the Recovery Group feature (beta) for disaster recovery of [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters deployed on AWS. This feature enables you to replicate your databases between TiDB Cloud Dedicated clusters, ensuring rapid recovery in the event of a regional disaster. If you are in the Project Owner role, you can enable this feature by creating a new recovery group and assigning databases to the group. By replicating databases with recovery groups, you can improve disaster readiness, meet stricter availability SLAs, and achieve more aggressive Recovery Point Objectives (RPO) and Recovery Time Objectives (RTO). - - For more information, see [Get started with recovery groups](/tidb-cloud/recovery-group-get-started.md). ## November 26, 2024 **General changes** -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.4](https://docs.pingcap.com/tidb/v7.5/release-7.5.4) to [v8.1.1](https://docs.pingcap.com/tidb/stable/release-8.1.1). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.4](https://docs.pingcap.com/tidb/stable/release-7.5.4) to [v8.1.1](https://docs.pingcap.com/tidb/stable/release-8.1.1). -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) reduces costs for large data writes by up to 80% for the following scenarios: +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) reduces costs for large data writes by up to 80% for the following scenarios: - When you perform write operations larger than 16 MiB in [autocommit mode](/transaction-overview.md#autocommit). - When you perform write operations larger than 16 MiB in [optimistic transaction model](/optimistic-transaction.md). @@ -90,7 +88,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.3](https://docs.pingcap.com/tidb/v7.5/release-7.5.3) to [v7.5.4](https://docs.pingcap.com/tidb/v7.5/release-7.5.4). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.3](https://docs.pingcap.com/tidb/stable/release-7.5.3) to [v7.5.4](https://docs.pingcap.com/tidb/stable/release-7.5.4). ## October 15, 2024 @@ -110,8 +108,8 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c TiDB Cloud CLI provides the following new features: - - Support SQL user management for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters via [`ticloud serverless sql-user`](/tidb-cloud/ticloud-serverless-sql-user-create.md). - - Allow disabling the public endpoint for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters in [`ticloud serverless create`](/tidb-cloud/ticloud-cluster-create.md) and [`ticloud serverless update`](/tidb-cloud/ticloud-serverless-update.md). + - Support SQL user management for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters via [`ticloud serverless sql-user`](/tidb-cloud/ticloud-serverless-sql-user-create.md). + - Allow disabling the public endpoint for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters in [`ticloud serverless create`](/tidb-cloud/ticloud-cluster-create.md) and [`ticloud serverless update`](/tidb-cloud/ticloud-serverless-update.md). - Add the [`ticloud auth whoami`](/tidb-cloud/ticloud-auth-whoami.md) command to get information about the current user when using OAuth authentication. - Support `--sql`, `--where`, and `--filter` flags in [`ticloud serverless export create`](/tidb-cloud/ticloud-serverless-export-create.md) to choose source tables flexibly. - Support exporting data to CSV and Parquet files. @@ -148,7 +146,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c Previously, TiDB Cloud only supported exporting data using the [TiDB Cloud CLI](/tidb-cloud/cli-reference.md). Now, you can easily export data from TiDB Cloud Serverless clusters to local files and Amazon S3 in the [TiDB Cloud console](https://tidbcloud.com/). - For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) and [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md). + For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) and [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/configure-external-storage-access.md). - Enhance the connection experience for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. @@ -158,7 +156,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Connect to TiDB Cloud Dedicated](/tidb-cloud/connect-to-tidb-cluster.md). -- Enhance the data import experience for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters: +- Enhance the data import experience for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters: - Refine the layout of the **Import** page with a clearer layout. - Unify the import steps for TiDB Cloud Serverless and TiDB Cloud Dedicated clusters. @@ -184,7 +182,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c This charge will appear under the existing **TiDB Cloud Dedicated - Data Transfer - Load Balancing** service in your [billing details](/tidb-cloud/tidb-cloud-billing.md#billing-details). -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.2](https://docs.pingcap.com/tidb/v7.5/release-7.5.2) to [v7.5.3](https://docs.pingcap.com/tidb/v7.5/release-7.5.3). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.2](https://docs.pingcap.com/tidb/stable/release-7.5.2) to [v7.5.3](https://docs.pingcap.com/tidb/stable/release-7.5.3). **Console changes** @@ -198,7 +196,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c - [Data Service (beta)](https://tidbcloud.com/project/data-service) supports automatically generating vector search endpoints. - If your table contains [vector data types](/vector-search/vector-search-data-types.md), you can automatically generate a vector search endpoint that calculates vector distances based on your selected distance function. + If your table contains [vector data types](/ai/reference/vector-search-data-types.md), you can automatically generate a vector search endpoint that calculates vector distances based on your selected distance function. This feature enables seamless integration with AI platforms such as [Dify](https://docs.dify.ai/guides/tools) and [GPTs](https://openai.com/blog/introducing-gpts), enhancing your applications with advanced natural language processing and AI capabilities for more complex tasks and intelligent solutions. @@ -240,18 +238,18 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) supports vector search (beta). +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) supports vector search (beta). The vector search (beta) feature provides an advanced search solution for performing semantic similarity searches across various data types, including documents, images, audio, and video. This feature enables developers to easily build scalable applications with generative artificial intelligence (AI) capabilities using familiar MySQL skills. Key features include: - - [Vector data types](/vector-search/vector-search-data-types.md), [vector index](/vector-search/vector-search-index.md), and [vector functions and operators](/vector-search/vector-search-functions-and-operators.md). - - Ecosystem integrations with [LangChain](/vector-search/vector-search-integrate-with-langchain.md), [LlamaIndex](/vector-search/vector-search-integrate-with-llamaindex.md), and [JinaAI](/vector-search/vector-search-integrate-with-jinaai-embedding.md). - - Programming language support for Python: [SQLAlchemy](/vector-search/vector-search-integrate-with-sqlalchemy.md), [Peewee](/vector-search/vector-search-integrate-with-peewee.md), and [Django ORM](/vector-search/vector-search-integrate-with-django-orm.md). - - Sample applications and tutorials: perform semantic searches for documents using [Python](/vector-search/vector-search-get-started-using-python.md) or [SQL](/vector-search/vector-search-get-started-using-sql.md). + - [Vector data types](/ai/reference/vector-search-data-types.md), [vector index](/ai/reference/vector-search-index.md), and [vector functions and operators](/ai/reference/vector-search-functions-and-operators.md). + - Ecosystem integrations with [LangChain](/ai/integrations/vector-search-integrate-with-langchain.md), [LlamaIndex](/ai/integrations/vector-search-integrate-with-llamaindex.md), and [JinaAI](/ai/integrations/vector-search-integrate-with-jinaai-embedding.md). + - Programming language support for Python: [SQLAlchemy](/ai/integrations/vector-search-integrate-with-sqlalchemy.md), [Peewee](/ai/integrations/vector-search-integrate-with-peewee.md), and [Django ORM](/ai/integrations/vector-search-integrate-with-django-orm.md). + - Sample applications and tutorials: perform semantic searches for documents using [Python](/ai/quickstart-via-python.md) or [SQL](/ai/quickstart-via-sql.md). - For more information, see [Vector search (beta) overview](/vector-search/vector-search-overview.md). + For more information, see [Vector search (beta) overview](/ai/concepts/vector-search-overview.md). -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) now offers weekly email reports for organization owners. +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) now offers weekly email reports for organization owners. These reports provide insights into the performance and activity of your clusters. By receiving automatic weekly updates, you can stay informed about your clusters and make data-driven decisions to optimize your clusters. @@ -282,7 +280,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [TiFlash node storage](/tidb-cloud/size-your-cluster.md#tiflash-node-storage). -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.1](https://docs.pingcap.com/tidb/v7.5/release-7.5.1) to [v7.5.2](https://docs.pingcap.com/tidb/v7.5/release-7.5.2). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.1](https://docs.pingcap.com/tidb/stable/release-7.5.1) to [v7.5.2](https://docs.pingcap.com/tidb/stable/release-7.5.2). ## June 4, 2024 @@ -292,15 +290,13 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c This feature enables you to replicate your databases between TiDB Cloud Dedicated clusters, ensuring rapid recovery in the event of a regional disaster. If you are in the `Project Owner` role, you can enable this feature by creating a new recovery group and assigning databases to the group. By replicating databases with recovery groups, you can improve disaster readiness, meet stricter availability SLAs, and achieve more aggressive Recovery Point Objectives (RPO) and Recovery Time Objectives (RTO). - For more information, see [Get started with recovery groups](/tidb-cloud/recovery-group-get-started.md). - -- Introduce billing and metering (beta) for the [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) columnar storage [TiFlash](/tiflash/tiflash-overview.md). +- Introduce billing and metering (beta) for the [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) columnar storage [TiFlash](/tiflash/tiflash-overview.md). Until June 30, 2024, columnar storage in TiDB Cloud Serverless clusters remains free with a 100% discount. After this date, each TiDB Cloud Serverless cluster will include a free quota of 5 GiB for columnar storage. Usage beyond the free quota will be charged. For more information, see [TiDB Cloud Serverless pricing details](https://www.pingcap.com/tidb-serverless-pricing-details/#storage). -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) supports [Time to live (TTL)](/time-to-live.md). +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) supports [Time to live (TTL)](/time-to-live.md). ## May 28, 2024 @@ -350,12 +346,11 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **CLI changes** -- Introduce [TiDB Cloud CLI 1.0.0-beta.1](https://github.com/tidbcloud/tidbcloud-cli), built upon the new [TiDB Cloud API](/tidb-cloud/api-overview.md). The new CLI brings the following new features: +- Introduce [TiDB Cloud CLI 1.0.0-beta.1](https://github.com/tidbcloud/tidbcloud-cli), built upon the new [TiDB Cloud API](https://docs.pingcap.com/api/tidb-cloud-api-overview). The new CLI brings the following new features: - [Export data from TiDB Cloud Serverless clusters](/tidb-cloud/serverless-export.md) - [Import data from local storage into TiDB Cloud Serverless clusters](/tidb-cloud/ticloud-import-start.md) - [Authenticate via OAuth](/tidb-cloud/ticloud-auth-login.md) - - [Ask questions via TiDB Bot](/tidb-cloud/ticloud-ai.md) Before upgrading your TiDB Cloud CLI, note that this new CLI is incompatible with previous versions. For example, `ticloud cluster` in CLI commands is now updated to `ticloud serverless`. For more information, see [TiDB Cloud CLI reference](/tidb-cloud/cli-reference.md). @@ -369,11 +364,11 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Introduce two service plans for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters: **Free** and **Scalable**. +- Introduce two service plans for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters: **Free** and **Scalable**. To meet different user requirements, TiDB Cloud Serverless offers the free and scalable service plans. Whether you are just getting started or scaling to meet the increasing application demands, these plans provide the flexibility and capabilities you need. - For more information, see [Cluster plans](/tidb-cloud/select-cluster-tier.md#cluster-plans). + For more information, see [Cluster plans](/tidb-cloud/select-cluster-tier.md). - Modify the throttling behavior for TiDB Cloud Serverless clusters upon reaching their usage quota. Now, once a cluster reaches its usage quota, it immediately denies any new connection attempts, thereby ensuring uninterrupted service for existing operations. @@ -383,7 +378,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.0](https://docs.pingcap.com/tidb/v7.5/release-7.5.0) to [v7.5.1](https://docs.pingcap.com/tidb/v7.5/release-7.5.1). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.5.0](https://docs.pingcap.com/tidb/stable/release-7.5.0) to [v7.5.1](https://docs.pingcap.com/tidb/stable/release-7.5.1). **Console changes** @@ -405,7 +400,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c **General changes** -- Upgrade the TiDB version of [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters from [v6.6.0](https://docs.pingcap.com/tidb/v6.6/release-6.6.0) to [v7.1.3](https://docs.pingcap.com/tidb/v7.1/release-7.1.3). +- Upgrade the TiDB version of [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v6.6.0](https://docs.pingcap.com/tidb/stable/release-6.6.0) to [v7.1.3](https://docs.pingcap.com/tidb/stable/release-7.1.3). ## February 20, 2024 @@ -462,7 +457,7 @@ This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-c For more information, see [Organization SSO Authentication](/tidb-cloud/tidb-cloud-org-sso-authentication.md). -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.1.1](https://docs.pingcap.com/tidb/v7.1/release-7.1.1) to [v7.5.0](https://docs.pingcap.com/tidb/v7.5/release-7.5.0). +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v7.1.1](https://docs.pingcap.com/tidb/stable/release-7.1.1) to [v7.5.0](https://docs.pingcap.com/tidb/stable/release-7.5.0). - The dual region backup feature for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) is now in General Availability (GA). diff --git a/tidb-cloud/releases/release-notes-2025.md b/tidb-cloud/releases/release-notes-2025.md new file mode 100644 index 0000000000000..2e7ec50ebb765 --- /dev/null +++ b/tidb-cloud/releases/release-notes-2025.md @@ -0,0 +1,849 @@ +--- +title: TiDB Cloud Release Notes in 2025 +summary: Learn about the release notes of TiDB Cloud in 2025. +--- + +# TiDB Cloud Release Notes in 2025 + +This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-cloud/) in 2025. + +## December 30, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Support TiProxy (Beta). + + TiProxy, the official proxy component of PingCAP, is now available in beta for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. It provides enhanced connection management and load balancing to improve database reliability and performance. + + Highlights: + + - Maintains persistent client connections during scaling operations and rolling upgrades. + - Evenly distributes traffic across TiDB nodes for better resource utilization. + + For more information, see [Overview of TiProxy](/tidb-cloud/tiproxy-overview-for-cloud.md). + +- **TiDB Cloud Essential** + + - Support changefeeds (Beta). + + The changefeed feature is now available in beta in both the [TiDB Cloud console](https://tidbcloud.com) and [TiDB Cloud CLI](/tidb-cloud/cli-reference.md) for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential). It enables you to stream data from TiDB Cloud to other data services, currently supporting Apache Kafka and MySQL as destinations. + + - Support configuring private link connections for downstream resources. + + Private link connections are now available in both the [TiDB Cloud console](https://tidbcloud.com) and [TiDB Cloud CLI](/tidb-cloud/cli-reference.md) for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential). This feature enables you to establish private and direct connectivity between TiDB Cloud and your downstream resources (such as MySQL and Apache Kafka). It is tailored for integration with changefeeds and other dataflow services that initiate connections from TiDB Cloud to your infrastructure. + + For more information, see [Private Link Connections for Dataflow](/tidb-cloud/serverless-private-link-connection.md). + +## December 16, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Support duplicating changefeeds. + + You can now duplicate an existing changefeed while retaining its key configurations and routing information. This feature enables you to quickly recreate a failed changefeed or create a new changefeed with similar settings, reducing setup time and operational effort. + + For more information, see [Duplicate a changefeed](/tidb-cloud/changefeed-overview.md#duplicate-a-changefeed). + +**Console changes** + +- **TiDB Cloud Starter** + + - Support seamless AI integration with the Model Context Protocol (MCP) for [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + TiDB Cloud Starter now supports MCP, providing a unified and secure way to connect your TiDB Cloud Starter cluster to popular AI tools including Cursor, Claude Code, VS Code, and WindSurf. You can set up your connection once and begin querying your data with AI tools in minutes. + + To access this feature, click **Use with AI Tools** in the upper-right corner of your [cluster](https://tidbcloud.com/project/clusters) overview page. + +## December 9, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.5.3](https://docs.pingcap.com/tidb/stable/release-8.5.3/) to [v8.5.4](https://docs.pingcap.com/tidb/stable/release-8.5.4/). + +**Console changes** + +- **TiDB Cloud Starter and TiDB Cloud Essential** + + - Add a unified **Integrations** page at the cluster level for [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) and [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) clusters. + + - Consolidate all third-party integrations on the **Integrations** page for your cluster. The following list outlines these integrations, grouped by use case: + - **Deploy**: AWS Lambda, Cloudflare Workers, Gitpod, Netlify, Terraform, WordPress + - **Data**: Airbyte, Amazon AppFlow, dbt Labs, Looker Studio, n8n, Zapier + - **GUIs**: DBeaver, JetBrains DataGrip, MySQL Workbench, Navicat, ProxySQL, Visual Studio Code + - **Java**: JDBC, Hibernate, MyBatis, Spring Boot + - **Go**: Go-MySQL-Driver, GORM + - **Python**: Django, mysqlclient, MySQL Connector/Python, peewee, PyMySQL, SQLAlchemy + - **Node.js**: mysql.js, Next.js, node-mysql2, Prisma, Sequelize, TypeORM + - **Ruby**: mysql2, Rails + - Move the [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) and [AWS Bedrock](/ai/integrations/vector-search-integrate-with-amazon-bedrock.md) integration entries to the cluster level to improve discoverability. + - Add **Suggest Integration** for requesting new integrations. + +**API changes** + +- TiDB Cloud IAM API (v1beta1) supports listing console audit logs. + + The [List audit logs](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam/#tag/Audit-Log/paths/~1auditLogs/get) endpoint provides programmatic access to console audit logs. You can use this endpoint to automatically retrieve audit logs and schedule regular backups to meet security and compliance requirements. + + For more information, see [TiDB Cloud IAM API](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam/). + +## December 2, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - The Prometheus integration is now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + TiDB Cloud now manages Prometheus integrations at the cluster level, offering more granular control and configuration. This feature enables you to seamlessly ship the metrics of your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster to Prometheus, allowing for advanced alerting in a unified platform. + + For integration steps, see [Integrate TiDB Cloud with Prometheus and Grafana](/tidb-cloud/monitor-prometheus-and-grafana-integration.md). + + To migrate existing Prometheus integrations to the cluster level, see [Migrate Prometheus Integrations](/tidb-cloud/migrate-prometheus-metrics-integrations.md). + +## November 18, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Enhance the changefeed summary to include complete configuration details. + + Previously, you had to pause a changefeed to check its configuration and then resume it. The **Changefeed** page now displays the complete configuration directly in the summary view. This update keeps the edit and view modes consistent and introduces a redesigned layout for better readability. With this update, you can review the current configuration more efficiently. + + For more information, see [Changefeed Overview](/tidb-cloud/changefeed-overview.md). + +## November 11, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - When you restore a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster from a backup to a new cluster, you can now select a node storage type for the new cluster, such as [Standard storage](/tidb-cloud/size-your-cluster.md#standard-storage), instead of using the default storage type. + + This feature enables you to either restore the original configuration exactly or choose a different storage type that better meets your needs. + + For more information, see [Restore data to a new cluster](/tidb-cloud/backup-and-restore.md#restore-data-to-a-new-cluster). + +## November 4, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - When you connect to a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster hosted on Google Cloud via VPC peering, you can now configure an IP range size between `/16` and `/18` directly in the [TiDB Cloud console](https://tidbcloud.com/). You no longer need to contact TiDB Cloud support for this configuration. + + For more information, see [Connect to TiDB Cloud Dedicated via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md). + + - TiDB Cloud Dedicated now provides clearer guidance and messaging for the 4 vCPU node size. Use this node size only for testing, learning, and exploring TiDB Cloud features in non-production environments. + + For more information, see [Determine Your TiDB Size](/tidb-cloud/size-your-cluster.md). + +## October 28, 2025 + +**General changes** + +- **TiDB Cloud Starter and TiDB Cloud Essential** + + To improve connection stability and prevent unexpected disconnections during TiDB server restarts or maintenance, it is recommended that you set the maximum lifetime of your database connections to less than 30 minutes. + + For more information, see [Configure the lifetime of connections](/develop/dev-guide-connection-parameters.md#configure-the-lifetime-of-connections). + +**API changes** + +- **TiDB Cloud Dedicated** + + Introduce the following [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) API endpoints for managing third-party monitoring integrations: + + - List integrations + - Create an integration + - Delete an integration + + For more information, see [TiDB Cloud Dedicated API](https://docs.pingcap.com/tidbcloud/api/v1beta1/dedicated/). + +## October 21, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) enhances the private endpoint feature for [Changefeeds](/tidb-cloud/changefeed-overview.md) to simplify configuration, improve security, and provide greater flexibility for data sinks. + + - **Simplified configuration**: private endpoint creation is now independent of changefeed creation, allowing multiple changefeeds within the same project to share a single private endpoint, thereby reducing redundant configurations. + - **Private link sinks for MySQL**: offers a more secure way to sink data to MySQL, and now also supports sinking data directly to another TiDB Cloud Dedicated cluster via private link. + - **Custom domain support**: when using self-hosted Kafka services, you can configure custom domains for data sinks to enhance security and make advertised listener updates more flexible without requiring server restarts. + + For more information, see [Set Up Private Endpoint for Changefeeds](/tidb-cloud/set-up-sink-private-endpoint.md). + + - [Prometheus integrations (Preview)](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) are now available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + TiDB Cloud now manages Prometheus integrations at the cluster level, offering more granular control and configuration. This feature enables you to seamlessly ship the metrics of your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster to Prometheus, allowing for advanced alerting in a unified platform. + + For more information, see [Integrate TiDB Cloud with Prometheus and Grafana](/tidb-cloud/monitor-prometheus-and-grafana-integration.md). + +## October 14, 2025 + +**General changes** + +- **TiDB Cloud Starter** + + - [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) no longer supports database audit logging. + + Currently, only [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) and [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) support database audit logging. Existing TiDB Cloud Starter clusters currently using database audit logging are not affected. + + - [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) removes the in-place restore feature, which means you can no longer restore a backup directly to the same cluster. This change helps prevent accidental overwrites of active production data and potential data loss. + + To restore your data, you can [restore the backup to a new cluster](/tidb-cloud/backup-and-restore-serverless.md#perform-the-restore). After validating the restored data, switch your application to the new cluster. Previously restored data in existing clusters remains intact, and no action is required unless you perform a new restore. + + For safer restore and migration workflows with more control and flexibility, consider using [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential). + + - The [**Metrics**](/tidb-cloud/built-in-monitoring.md#view-the-metrics-page) page for [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) adds the following metrics for faster diagnosis and capacity planning: + + - `Lock-wait (P95/P99)`: monitors lock wait time percentiles to surface contention hotspots. + - `Idle Connection Duration (P99 incl. not/in txn)`: identifies long-lived idle connections, both in-transaction and not-in-transaction, to adjust pooler limits and timeouts. + +- **TiDB Cloud Essential** + + - [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) is in public preview on AWS and Alibaba Cloud. + + For applications experiencing growing workloads and needing scalability in real time, TiDB Cloud Essential provides the flexibility and performance to keep pace with your business growth. + + + + For more information, see [TiDB Cloud Essential Now Available in Public Preview on AWS and Alibaba Cloud](https://www.pingcap.com/blog/tidb-cloud-essential-now-available-public-preview-aws-alibaba-cloud/). + + + + - Database audit logging is now available in the [TiDB Cloud console](https://tidbcloud.com) for TiDB Cloud Essential and supports customizing rotation settings. + + You can configure database audit logs to be stored in TiDB Cloud, Amazon S3, Google Cloud Storage, Azure Blob Storage, or Alibaba Cloud OSS. + + Currently, this feature is in beta. For more information, see [Database Audit Logging for TiDB Cloud Essential](/tidb-cloud/essential-database-audit-logging.md). + + - TiDB Cloud Essential adds a new event `ResourceLimitation` that notifies you when Request Capacity Units (RCUs) consumption of your cluster reaches the configured maximum multiple times within one hour. + + Usage exceeding the limit might be throttled. To avoid service impact, consider increasing the maximum RCU. + + For more information about events, see [TiDB Cloud Cluster Events](/tidb-cloud/tidb-cloud-events.md). + + - The [**Metrics**](/tidb-cloud/built-in-monitoring.md#view-the-metrics-page) page for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) adds the following metrics for faster diagnosis and capacity planning: + + - `Capacity vs Usage (RU/s)`: visualizes provisioned Request Unit (RU) capacity versus actual RU consumption to spot headroom and tune autoscaling. + - `Lock-wait (P95/P99)`: monitors lock wait time percentiles to surface contention hotspots. + - `Idle Connection Duration (P99 incl. not/in txn)`: identifies long-lived idle connections, both in-transaction and not-in-transaction, to adjust pooler limits and timeouts. + + For more information, see [TiDB Cloud Built-in Metrics](/tidb-cloud/built-in-monitoring.md). + +## September 30, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Datadog and New Relic integrations are now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + TiDB Cloud now manages Datadog and New Relic integrations at the cluster level, offering more granular control and configuration. This feature enables you to seamlessly ship the metrics of your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster to Datadog or New Relic, allowing for advanced alerting in a unified platform. + + For integration steps, see [Integrate TiDB Cloud with Datadog](/tidb-cloud/monitor-datadog-integration.md) and [Integrate TiDB Cloud with New Relic](/tidb-cloud/monitor-new-relic-integration.md). + + To migrate existing Datadog and New Relic integrations to the cluster level, see [Migrate Datadog and New Relic Integrations](/tidb-cloud/migrate-metrics-integrations.md). + +## September 23, 2025 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Support user-controlled splitting of `UPDATE` events in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) changefeeds. + + In TiDB Cloud Dedicated clusters, you can configure whether to keep `UPDATE` events as raw events or split them into separate `DELETE` and `INSERT` events. This feature provides greater flexibility for advanced replication scenarios. + + This feature is supported only for non-SQL destinations such as Apache Kafka and Amazon S3. For more information, see [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md), [Sink to Apache Pulsar](/tidb-cloud/changefeed-sink-to-apache-pulsar.md), and [Sink to Cloud Storage](/tidb-cloud/changefeed-sink-to-cloud-storage.md). + + For more information about the splitting behavior, see [Split primary or unique key `UPDATE` events for non-MySQL sinks](https://docs.pingcap.com/tidb/stable/ticdc-split-update-behavior/#split-primary-or-unique-key-update-events-for-non-mysql-sinks). + + - Provide a new node size for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on Google Cloud: `32 vCPU, 64 GiB`. + + This new node size is available for TiDB nodes. + +## September 16, 2025 + +**General changes** + +- **TiDB Cloud Starter** + + - Upgrade the TiDB version of [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v7.5.2](https://docs.pingcap.com/tidb/stable/release-7.5.2) to [v7.5.6](https://docs.pingcap.com/tidb/stable/release-7.5.6). + +- **TiDB Cloud Dedicated** + + - Encryption at Rest with Customer-Managed Encryption Keys (CMEK) is available for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on Azure. + + This feature enables you to secure your data at rest by using an encryption key that you control. CMEK provides the following benefits: + + - Data security: you own and manage the encryption key, which ensures that your data is protected and under your control. + - Compliance: using CMEK helps you meet regulatory and compliance requirements for data encryption. + - Flexibility: you can enable CMEK when you create a project and complete CMEK configurations before you create a cluster. + + To enable this feature, perform the following steps: + + 1. In the [TiDB Cloud console](https://tidbcloud.com), create a CMEK-enabled project. + 2. Complete the CMEK configuration for the project. + 3. Create a TiDB Cloud Dedicated cluster hosted on Azure in the same region as your CMEK configuration. + + For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys on Azure](/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md). + +## September 9, 2025 + +**High availability changes** + +- **TiDB Cloud Starter** + + - For newly created [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) clusters, only zonal high availability is enabled, and it is not configurable. + - For existing TiDB Cloud Starter clusters with regional high availability enabled before **September 9, 2025**, regional high availability remains supported and is not affected. + + + +- **TiDB Cloud Essential** + + - For newly created [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) clusters, regional high availability is enabled by default, and you can change it to zonal high availability as needed during cluster creation. + + For more information, see [High Availability in TiDB Cloud Starter and Essential](/tidb-cloud/serverless-high-availability.md). + + + +## September 2, 2025 + +**General changes** + + + +- **TiDB Cloud Essential** + + - Support three new Alibaba Cloud regions for [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) clusters: `Jakarta (ap-southeast-5)`, `Mexico (na-south-1)`, and `Tokyo (ap-northeast-1)`. + +- **TiDB Cloud Dedicated** + + - Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.5.2](https://docs.pingcap.com/tidb/stable/release-8.5.2/) to [v8.5.3](https://docs.pingcap.com/tidb/stable/release-8.5.3/). + + + + + +- **TiDB Cloud Dedicated** + + - Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.5.2](https://docs.pingcap.com/tidb/stable/release-8.5.2/) to [v8.5.3](https://docs.pingcap.com/tidb/stable/release-8.5.3/). + + + +## August 26, 2025 + +**General changes** + +- **TiDB Cloud Starter** + + - Introduce Auto Embedding (Beta) in [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter), making it simple to convert your text into vectors without additional setup. This feature enables faster development of semantic search, RAG, reranking, and classification in TiDB Cloud with less integration overhead. + + - **Auto Embedding with popular LLM providers**: Amazon Titan, OpenAI, Cohere, Gemini, Jina AI, Hugging Face, and NVIDIA NIM. + - **Native integration with AWS Bedrock**: managed embedding models with a free quota, including Amazon Titan and Cohere text embedding models from AWS Bedrock. + - **SQL and Python support**, with code examples for creating, storing, and querying embeddings. + + For more information, see [Auto Embedding](https://docs.pingcap.com/tidbcloud/vector-search-auto-embedding-overview/?plan=starter). + +- **TiDB Cloud Dedicated** + + - [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) no longer supports the Index Insight (beta) feature. + + It is recommended that you use [Index Advisor](/index-advisor.md) instead, which is available for TiDB v8.5.0 and later versions. Index Advisor introduces the `RECOMMEND INDEX` SQL statement, which helps optimize your workload by recommending indexes that improve query performance. + + - You can now manually disable the Point-in-time Restore feature on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters with weekly backups enabled. + + This enhancement helps reduce costs for clusters that do not require Point-in-time Restore for high RPO protection. + + For more information, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md). + +## August 12, 2025 + +**General changes** + + + +- **TiDB Cloud Starter** + + - Rename "TiDB Cloud Serverless" to "TiDB Cloud Starter". + + The auto-scaling entry plan is now named "TiDB Cloud Starter" to better reflect its role for new users. All features, pricing, and the free usage quota remain unchanged. + + Starting August 12, 2025 (PDT), your existing Serverless clusters will appear as Starter in the [TiDB Cloud console](https://tidbcloud.com). Your connection strings, endpoints, and data will remain unchanged, so you do not need to make any code changes or schedule downtime. + + - TiDB Cloud Starter is in preview on Alibaba Cloud. + +- **TiDB Cloud Essential** + + [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) is in preview on Alibaba Cloud. + + TiDB Cloud Essential on Alibaba Cloud has been in a limited public preview since May 2025. This is the first time Essential is officially included in the release notes. At this stage, Essential on Alibaba Cloud offers a feature set aligned with Starter, available in the Alibaba Cloud Singapore region. + + How to try it: + + - From the [TiDB Cloud console](https://tidbcloud.com/), choose Alibaba Cloud as the cloud provider when creating a cluster to view the Essential option. + - You can also access Essential via the [Alibaba Cloud Marketplace listing](https://www.alibabacloud.com/en/marketplace/tidb?_p_lc=1). + + Next, we plan to expand region coverage on Alibaba Cloud and add AWS support. + + If you try Essential on Alibaba Cloud during this preview, you can share feedback through our web console or join our community on [Slack](https://tidbcommunity.slack.com/archives/CH7TTLL7P) or [Discord](https://discord.gg/ukhXbn69Nx). + +- **TiDB Cloud Dedicated** + + - [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) on Google Cloud now supports more than 8 Google Private Service Connect (PSC) connections per region by optimizing the NAT subnet allocation strategy. + + For more information, see [Connect to a TiDB Cloud Dedicated Cluster via Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md#restrictions). + + - Optimize [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) metrics: + + - In the [**Advanced**](/tidb-cloud/built-in-monitoring.md#advanced) category, add the **Affected Rows**, **Leader Count**, and **Region Count** metrics to improve diagnostics. + - In the [**Server**](/tidb-cloud/built-in-monitoring.md#server) category, refine the **TiKV IO Bps** metric to improve accuracy and consistency. + + For more information, see [TiDB Cloud Built-in Metrics](/tidb-cloud/built-in-monitoring.md). + + + + + +- **TiDB Cloud Starter** + + Rename "TiDB Cloud Serverless" to "TiDB Cloud Starter". + + The auto-scaling entry plan is now named "TiDB Cloud Starter" to better reflect its role for new users. All features, pricing, and the free usage quota remain unchanged. + + Starting August 12, 2025 (PDT), your existing Serverless clusters will appear as Starter in the [TiDB Cloud console](https://tidbcloud.com). Your connection strings, endpoints, and data will remain unchanged, so you do not need to make any code changes or schedule downtime. + +- **TiDB Cloud Dedicated** + + - [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) on Google Cloud now supports more than 8 Google Private Service Connect (PSC) connections per region by optimizing the NAT subnet allocation strategy. + + For more information, see [Connect to a TiDB Cloud Dedicated Cluster via Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md#restrictions). + + - Optimize [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) metrics: + + - In the [**Advanced**](/tidb-cloud/built-in-monitoring.md#advanced) category, add the **Affected Rows**, **Leader Count**, and **Region Count** metrics to improve diagnostics. + - In the [**Server**](/tidb-cloud/built-in-monitoring.md#server) category, refine the **TiKV IO Bps** metric to improve accuracy and consistency. + + For more information, see [TiDB Cloud Built-in Metrics](/tidb-cloud/built-in-monitoring.md). + + + +**API changes** + +- Introduce TiDB Cloud Dedicated API (v1beta1) for managing the following resources automatically and efficiently: + + - **Cluster**: manage your TiDB Cloud Dedicated clusters with greater flexibility. + - **Region**: show all available cloud regions in which you can deploy your TiDB Cloud Dedicated cluster. + - **Private endpoint connection**: set up secure and private connections for your clusters. + - **Import**: manage data import tasks for your clusters. + + For more information, see [TiDB Cloud Dedicated API](https://docs.pingcap.com/tidbcloud/api/v1beta1/dedicated/). + +- Introduce TiDB Cloud Starter and Essential API (v1beta1) for managing the following resources automatically and efficiently: + + - **Cluster**: manage your TiDB Cloud Starter or Essential clusters with greater flexibility. + - **Branch**: manage the branches of your clusters. + - **Export**: manage data export tasks for your clusters. + - **Import**: manage data import tasks for your clusters. + + For more information, see [TiDB Cloud Starter and Essential API](https://docs.pingcap.com/tidbcloud/api/v1beta1/serverless/). + +- TiDB Cloud IAM API (v1beta1) supports role-based access control (RBAC) for API key management at both the organization and project levels. + + You can set API key roles at the organization level or project level to improve security and access control. + + For more information, see [TiDB Cloud IAM API](https://docs.pingcap.com/tidbcloud/api/v1beta1/iam/). + +## July 31, 2025 + +**General changes** + +- Enhanced Datadog and New Relic integrations are now available for preview. + + Key enhancements: + + - Rebuild the integration backend with the optimized isolation architecture to minimize metric gaps. + - Add more monitoring metrics based on user needs. + - Refine metric rules for better consistency. + + These enhancements deliver more accurate monitoring and strengthen the reliability of Datadog and New Relic integrations. + + Rollout plan: + + This preview version is now available to organizations without existing Datadog or New Relic integrations. For organizations with existing Datadog or New Relic integrations, we will proactively reach out to you to coordinate a suitable migration plan and timeline next month. + + For more information, see [Integrate TiDB Cloud with Datadog (Preview)](/tidb-cloud/monitor-datadog-integration.md) and [Integrate TiDB Cloud with New Relic (Preview)](/tidb-cloud/monitor-new-relic-integration.md). + +## July 22, 2025 + +**General changes** + +- Provide a new node size for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on Google Cloud: `32 vCPU, 128 GiB`. + + This new size is available for TiDB, TiKV, and TiFlash nodes. + +- Improve the TiKV scaling process in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) to enhance cluster stability. + + When you [change the vCPU and RAM size](/tidb-cloud/scale-tidb-cluster.md#change-vcpu-and-ram) of TiKV nodes, TiDB Cloud automatically checks whether the cluster's internal service requires additional capacity to support the new configuration. + + - If an expansion is required, TiDB Cloud prompts you for confirmation before proceeding. + - If the current internal service capacity is already larger than the required size after scaling, TiDB Cloud retains the existing configuration of the internal service to avoid unnecessary changes that might affect cluster stability. + +**Console changes** + +- Enhance the cloud storage data import experience for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + The import process is now streamlined into a 3-step wizard with intelligent pre-checks. This new wizard guides you through connection setup, file mapping, and bucket scanning. With the scanning, TiDB Cloud shows you exactly which files will be imported and their target destinations before the import, significantly reducing configuration complexity and preventing import failures. + + For more information, see the following documentation: + + - [Import Sample Data into TiDB Cloud Serverless](/tidb-cloud/import-sample-data-serverless.md) + - [Import CSV Files from Cloud Storage into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) + - [Import Apache Parquet Files from Cloud Storage into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md) + +## July 15, 2025 + +**General changes** + +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.1.2](https://docs.pingcap.com/tidb/stable/release-8.1.2/) to [v8.5.2](https://docs.pingcap.com/tidb/stable/release-8.5.2/). + + Compared with v8.1.2, v8.5.2 includes new features, improvements, and bug fixes released in [v8.2.0-DMR](https://docs.pingcap.com/tidb/stable/release-8.2.0/), [v8.3.0-DMR](https://docs.pingcap.com/tidb/stable/release-8.3.0/), [v8.4.0-DMR](https://docs.pingcap.com/tidb/stable/release-8.4.0/), [v8.5.0](https://docs.pingcap.com/tidb/stable/release-8.5.0/), [v8.5.1](https://docs.pingcap.com/tidb/stable/release-8.5.1/), and [v8.5.2](https://docs.pingcap.com/tidb/stable/release-8.5.2/). + +- Support auditing the `BackupCompleted` event to enhance console audit logging for backup activities. + + This enhancement lets you log backup completion activities to meet security and compliance requirements. + + For more information, see [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md). + +- Support filtering column values in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) changefeeds. + + You can now use expressions to filter specific column values in changefeeds to exclude irrelevant data at the source. This feature enables fine-grained filtering of DML events, helping you reduce resource consumption and improve performance. + + For more information, see [Changefeed](/tidb-cloud/changefeed-overview.md). + +## June 24, 2025 + +**General changes** + +- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) database audit logging (beta) is now available upon request. This feature lets you record a history of user access details (such as any SQL statements executed) in logs. + + To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for TiDB Cloud Serverless database audit logging" in the Description field and click **Submit**. + +- [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) supports user-controlled log redaction. + + You can now enable or disable log redaction for your TiDB Cloud Dedicated clusters to manage the redaction status of cluster logs by yourself. + + For more information, see [User-Controlled Log Redaction](/tidb-cloud/tidb-cloud-log-redaction.md). + +- Encryption at Rest with Customer-Managed Encryption Keys (CMEK) is now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS. + + This feature enables you to secure your data at rest by leveraging a symmetric encryption key that you manage through Key Management Service (KMS). + + For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys on AWS](/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md). + +## June 17, 2025 + +**General changes** + +- For [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, the maximum storage size of TiKV nodes with 16 vCPU and 32 vCPU is changed from **6144 GiB** to **4096 GiB**. + + For more information, see [TiKV node storage size](/tidb-cloud/size-your-cluster.md#tikv-node-storage-size). + +**Console changes** + +- Revamp the left navigation pane to improve the overall navigation experience. + + - A new icon is now available in the upper-left corner, letting you easily hide or show the left navigation pane whenever you need. + - A combo box is now available in the upper-left corner, letting you quickly switch between organizations, projects, and clusters, all from one central location. + + + + - The entries shown on the left navigation pane now dynamically adapt to your current selection in the combo box, helping you focus on the most relevant functionalities. + - For your quick access, **Support**, **Notification**, and your account entries are now consistently displayed at the bottom of the left navigation pane on all console pages. + +## June 4, 2025 + +**General changes** + +- [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) on Microsoft Azure is now available in public preview. + + With this launch, TiDB Cloud now supports all three major public cloud platforms — AWS, Google Cloud, and Azure, which enables you to deploy TiDB Cloud Dedicated clusters wherever best fits your business needs and cloud strategy. + + - All core features available on AWS and Google Cloud are fully supported on Azure. + - Azure support is currently available in three regions: East US 2, Japan East, and Southeast Asia, with more regions coming soon. + - TiDB Cloud Dedicated clusters on Azure require TiDB version v7.5.3 or later. + + To quickly get started with TiDB Cloud Dedicated on Azure, see the following documentation: + + - [Create a TiDB Cloud Dedicated Cluster on Azure](/tidb-cloud/create-tidb-cluster.md) + - [Connect a TiDB Cloud Dedicated Cluster via Azure Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) + - [Import Data into TiDB Cloud Dedicated Cluster on Azure](/tidb-cloud/import-csv-files.md) + +- The Prometheus integration provides more metrics to enhance monitoring capabilities of [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + Now you can integrate additional metrics, such as `tidbcloud_disk_read_latency` and `tidbcloud_kv_request_duration`, into Prometheus to track more aspects of your TiDB Cloud Dedicated performance. + + For more information on available metrics and how to enable them for both existing and new users, see [Integrate TiDB Cloud with Prometheus and Grafana (Beta)](/tidb-cloud/monitor-prometheus-and-grafana-integration.md#metrics-available-to-prometheus). + +- TiKV [Standard](/tidb-cloud/size-your-cluster.md#standard-storage) and [Performance](/tidb-cloud/size-your-cluster.md#performance-and-plus-storage) storage pricing is officially released. + + The discount period ends from **00:00 UTC on June 5, 2025**. After that, the price returns to the standard price. For more information about TiDB Cloud Dedicated prices, see [TiDB Cloud Dedicated Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/#node-cost). + +**Console changes** + +- Enhance the interactive experience when configuring the size of TiFlash nodes of [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + You can now use a toggle switch to control the TiFlash configuration when creating a TiDB Cloud Dedicated cluster, which makes the configuration experience more intuitive and seamless. + +## May 27, 2025 + +**General changes** + +- Support streaming data to [Apache Pulsar](https://pulsar.apache.org) with changefeeds for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + This feature enables you to integrate your TiDB Cloud Dedicated cluster with a wider range of downstream systems, and accommodates additional data integration requirements. To use this feature, make sure that your TiDB Cloud Dedicated cluster version is v7.5.1 or later. + + For more information, see [Sink to Apache Pulsar](/tidb-cloud/changefeed-sink-to-apache-pulsar.md). + +## May 13, 2025 + +**General changes** + +- Full-text search (beta) now available in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) for AI applications. + + TiDB Cloud Serverless now supports full-text search (beta), enabling AI and Retrieval-Augmented Generation (RAG) applications to retrieve content by exact keywords. This complements vector search, which retrieves content by semantic similarity. Combining both methods significantly improves retrieval accuracy and answer quality in RAG workflows. Key features include: + + - Direct text search: query string columns directly without the need for embeddings. + - Multilingual support: automatically detects and analyzes text in multiple languages, even within the same table, without requiring language specification. + - Relevance-based ranking: results are ranked using the industry-standard BM25 algorithm for optimal relevance. + - Native SQL compatibility: seamlessly use SQL features such as filtering, grouping, and joining with full-text search. + + To get started, see [Full Text Search with SQL](/ai/guides/vector-search-full-text-search-sql.md) or [Full Text Search with Python](/ai/guides/vector-search-full-text-search-python.md). + +- Increase the maximum TiFlash node storage for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster: + + - For 8 vCPU TiFlash, from 2048 GiB to 4096 GiB + - For 32 vCPU TiFlash, from 4096 GiB to 8192 GiB + + This enhancement increases the analytics data storage capacity of your TiDB Cloud Dedicated cluster, improves workload scaling efficiency, and accommodates growing data requirements. + + For more information, see [TiFlash node storage](/tidb-cloud/size-your-cluster.md#tiflash-node-storage). + +- Enhance the maintenance window configuration experience by providing intuitive options to configure and reschedule maintenance tasks. + + For more information, see [Configure maintenance window](/tidb-cloud/configure-maintenance-window.md). + +- Extend the discount period for TiKV [Standard](/tidb-cloud/size-your-cluster.md#standard-storage) and [Performance](/tidb-cloud/size-your-cluster.md#performance-and-plus-storage) storage types. The promotion now ends on June 5, 2025. After this date, pricing will return to the standard rate. + +**Console changes** + +- Refine the **Backup Setting** page layout to improve the backup configuration experience in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. + + For more information, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md). + +## April 22, 2025 + +**General changes** + +- Data export to Alibaba Cloud OSS is now supported. + + [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters now support exporting data to [Alibaba Cloud Object Storage Service (OSS)](https://www.alibabacloud.com/en/product/object-storage-service) using an [AccessKey pair](https://www.alibabacloud.com/help/en/ram/user-guide/create-an-accesskey-pair). + + For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md#alibaba-cloud-oss). + +- Upgrade the TiDB version of [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v7.1.3](https://docs.pingcap.com/tidb/stable/release-7.1.3) to [v7.5.2](https://docs.pingcap.com/tidb/stable/release-7.5.2). + +## April 15, 2025 + +**General changes** + +- Support importing data from [Alibaba Cloud Object Storage Service (OSS)](https://www.alibabacloud.com/en/product/object-storage-service) into [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + This feature simplifies data migration to TiDB Cloud Serverless. You can use an AccessKey pair to authenticate. + + For more information, see the following documentation: + + - [Import CSV Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) + - [Import Apache Parquet Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md) + +## April 1, 2025 + +**General changes** + +- The [TiDB Node Groups](/tidb-cloud/tidb-node-group-overview.md) feature is now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS and Google Cloud. + + This feature enables **fine-grained computing resource isolation** within a single cluster, helping you optimize performance and resource allocation for multi-tenant or multi-workload scenarios. + + **Key benefits:** + + - **Resource isolation**: + + - Group TiDB nodes into logically isolated units, ensuring workloads in one group do not affect other groups. + - Prevent resource contention between applications or business units. + + - **Simplified management**: + + - Manage all node groups within a single cluster, reducing operational overhead. + - Scale groups independently based on demand. + + For more information about the benefits, see [the technical blog](https://www.pingcap.com/blog/tidb-cloud-node-groups-scaling-workloads-predictable-performance/). To get started, see [Manage TiDB Node Groups](/tidb-cloud/tidb-node-group-management.md). + +- Introduce the [Standard storage](/tidb-cloud/size-your-cluster.md#standard-storage) type for TiKV nodes in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS. + + The Standard storage type is ideal for most workloads, providing a balance between performance and cost efficiency. + + **Key benefits:** + + - **Improved performance**: Reserves sufficient disk resources for Raft logs, reducing I/O contention between Raft and data storage, thereby improving both the read and write performance of TiKV. + - **Enhanced stability**: Isolates critical Raft operations from data workloads, ensuring more predictable performance. + - **Cost efficiency**: Delivers higher performance at a competitive price compared with the previous storage type. + + **Availability:** + + The Standard storage type is automatically applied to new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters created on or after April 1, 2025, hosted on AWS, with supported versions (versions >= 7.5.5, 8.1.2, or 8.5.0). Existing clusters still use the previous [Basic storage](/tidb-cloud/size-your-cluster.md#basic-storage) type, and no migration is needed. + + The price of the Standard storage differs from that of the Basic storage. For more information, see [Pricing](https://www.pingcap.com/tidb-dedicated-pricing-details/). + +## March 25, 2025 + +**Console changes** + +- Support firewall rules for public endpoints in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + You can now configure firewall rules for TiDB Cloud Serverless clusters to control access via public endpoints. Specify allowed IP addresses or ranges directly in the [TiDB Cloud console](https://tidbcloud.com/) to enhance security. + + For more information, see [Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). + +## March 18, 2025 + +**General changes** + +- Support creating TiDB node groups for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters deployed on Google Cloud to enhance resource management flexibility. + + For more information, see [Overview of TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md). + +- Support storing database audit log files in TiDB Cloud for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters deployed on AWS. + + You can download these audit log files directly from TiDB Cloud. Note that this feature is only available upon request. + + For more information, see [Database Audit Logging](/tidb-cloud/tidb-cloud-auditing.md). + +- Enhance TiDB Cloud account security by improving the management of multi-factor authentication (MFA). This feature applies to password-based logins for TiDB Cloud. + + For more information, see [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md). + +## February 18, 2025 + +**Console changes** + +- Introduce Connected Care, the new support services for TiDB Cloud. + + The Connected Care services are designed to strengthen your connection with TiDB Cloud through modern communication tools, proactive support, and advanced AI capabilities, delivering a seamless and customer-centric experience. + + The Connected Care services introduce the following features: + + - **Clinic service**: Advanced monitoring and diagnostics to optimize performance. + - **AI chat in IM**: Get immediate AI assistance through an instant message (IM) tool. + - **IM subscription for alerts and ticket updates**: Stay informed with alerts and ticket progress via IM. + - **IM interaction for support tickets**: Create and interact with support tickets through an IM tool. + + For more information, see [Connected Care Overview](/tidb-cloud/connected-care-overview.md). + +- Support importing data from GCS and Azure Blob Storage into [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + TiDB Cloud Serverless now supports importing data from Google Cloud Storage (GCS) and Azure Blob Storage. You can use a Google Cloud service account key or an Azure shared access signature (SAS) token to authenticate. This feature simplifies data migration to TiDB Cloud Serverless. + + For more information, see [Import CSV Files from Amazon S3, GCS, or Azure Blob Storage into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) and [Import Apache Parquet Files from Amazon S3, GCS, or Azure Blob Storage into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md). + +## January 21, 2025 + +**Console changes** + +- Support importing a single local CSV file of up to 250 MiB per task to [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters, increased from the previous limit of 50 MiB. + + For more information, see [Import Local Files to TiDB Cloud](/tidb-cloud/tidb-cloud-import-local-files.md). + +## January 14, 2025 + +**General changes** + +- Support a new AWS region for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters: `Jakarta (ap-southeast-3)`. + +- Introduce the Notification feature, which enables you to stay informed instantly with TiDB Cloud updates and alerts through the [TiDB Cloud console](https://tidbcloud.com/). + + For more information, see [Notifications](/tidb-cloud/notifications.md). + +## January 2, 2025 + +**General changes** + +- Support creating TiDB node groups for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters to enhance resource management flexibility. + + For more information, see [Overview of TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md). + +- Support connecting [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters to generic Kafka in AWS and Google Cloud through Private Connect (beta). + + Private Connect leverages Private Link or Private Service Connect technologies from cloud providers to enable changefeeds in the TiDB Cloud VPC to connect to Kafka in customers' VPCs using private IP addresses, as if those Kafkas were hosted directly within the TiDB Cloud VPC. This feature helps prevent VPC CIDR conflicts and meets security compliance requirements. + + - For Apache Kafka in AWS, follow the instructions in [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) to configure the network connection. + + - For Apache Kafka in Google Cloud, follow the instructions in [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) to configure the network connection. + + Note that using this feature incurs additional [Private Data Link costs](/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md#private-data-link-cost). + + For more information, see [Changefeed Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md#network). + +- Introduce additional configurable options for Kafka changefeeds: + + - Support using the Debezium protocol. Debezium is a tool for capturing database changes. It converts each captured database change into a message called an event, and sends these events to Kafka. For more information, see [TiCDC Debezium Protocol](https://docs.pingcap.com/tidb/v8.1/ticdc-debezium). + + - Support defining a single partition dispatcher for all tables, or different partition dispatchers for different tables. + + - Introduce two new dispatcher types for the partition distribution of Kafka messages: timestamp and column value. + + For more information, see [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md). + +- Enhance roles in TiDB Cloud: + + - Introduce the `Project Viewer` and `Organization Billing Viewer` roles to enhance granular access control on TiDB Cloud. + + - Rename the following roles: + + - `Organization Member` to `Organization Viewer` + - `Organization Billing Admin` to `Organization Billing Manager` + - `Organization Console Audit Admin` to `Organization Console Audit Manager` + + For more information, see [Identity Access Management](/tidb-cloud/manage-user-access.md#organization-roles). + +- Regional high availability (beta) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) clusters. + + This feature is designed for workloads that require maximum infrastructure redundancy and business continuity. Key functions include: + + - Nodes are distributed across multiple availability zones to ensure high availability in the event of a zone failure. + - Critical OLTP (Online Transactional Processing) components, such as PD and TiKV, are replicated across availability zones for redundancy. + - Automatic failover minimizes service disruption during a primary zone failure. + + This feature is currently available only in the AWS Tokyo (ap-northeast-1) region and can be enabled only during cluster creation. + + For more information, see [High Availability in TiDB Cloud Serverless](/tidb-cloud/serverless-high-availability.md). + +- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.1.1](https://docs.pingcap.com/tidb/stable/release-8.1.1) to [v8.1.2](https://docs.pingcap.com/tidb/stable/release-8.1.2). + +**Console changes** + +- Strengthen the data export service: + + - Support exporting data from [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#starter) to Google Cloud Storage and Azure Blob Storage through the [TiDB Cloud console](https://tidbcloud.com/). + + - Support exporting data in Parquet files through the [TiDB Cloud console](https://tidbcloud.com/). + + For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) and [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/configure-external-storage-access.md). \ No newline at end of file diff --git a/tidb-cloud/releases/tidb-cloud-release-notes.md b/tidb-cloud/releases/tidb-cloud-release-notes.md new file mode 100644 index 0000000000000..b8bd2b9c31ad8 --- /dev/null +++ b/tidb-cloud/releases/tidb-cloud-release-notes.md @@ -0,0 +1,114 @@ +--- +title: TiDB Cloud Release Notes in 2026 +summary: Learn about the release notes of TiDB Cloud in 2026. +aliases: ['/tidbcloud/supported-tidb-versions','/tidbcloud/release-notes'] +--- + +# TiDB Cloud Release Notes in 2026 + +This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-cloud/) in 2026. + +## February 10, 2026 + +**General changes** + +- **TiDB Cloud Starter** + + - Upgrade the default TiDB version of new [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) clusters from [v7.5.6](https://docs.pingcap.com/tidb/stable/release-7.5.6) to [v8.5.3](https://docs.pingcap.com/tidb/stable/release-8.5.3). + +- **TiDB Cloud Essential** + + - Support built-in alerting. + + Built-in alerting enables you to subscribe to receive instant alerts through email, Slack, Zoom, Flashduty, and PagerDuty. You can also customize alerts by defining specific thresholds for each alert type. + + For more information, see [TiDB Cloud Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md). + +- **TiDB Cloud Dedicated** + + - Support Private Link connectivity for data imports from Azure Blob Storage. + + When importing data from Azure Blob Storage into a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster, you can now select Private Link as the connectivity method to connect via an Azure private endpoint instead of the public internet. This feature enables secure, network-isolated data imports for storage accounts that restrict public access. + + For more information, see [Import Sample Data (SQL Files) from Cloud Storage](/tidb-cloud/import-sample-data.md), [Import CSV Files from Cloud Storage](/tidb-cloud/import-csv-files.md), and [Import Apache Parquet Files from Cloud Storage](/tidb-cloud/import-parquet-files.md). + + - Add "Enable/Disable Public Endpoint" events to the Console Audit Logging in TiDB Cloud for better security tracking. + +## February 3, 2026 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Support sinking changefeed data to Azure Blob Storage. + + [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) now supports sinking changefeed data directly to Azure Blob Storage. This feature enables Azure-based users to archive change data efficiently for downstream analytics and long-term retention. It also reduces costs by eliminating the need for intermediate message queues and maintains format compatibility with existing Amazon S3 and Google Cloud Storage (GCS) sinks. + + For more information, see [Sink to Cloud Storage](/tidb-cloud/changefeed-sink-to-cloud-storage.md). + +## January 27, 2026 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Support Flashduty and PagerDuty as alert subscription channels. + + These integrations are designed to streamline your incident management process and improve operational reliability. + + For more information, see [Subscribe via Flashduty](/tidb-cloud/monitor-alert-flashduty.md) and [Subscribe via PagerDuty](/tidb-cloud/monitor-alert-pagerduty.md). + +## January 20, 2026 + +**General changes** + +- **TiDB Cloud Starter** + + - Display real client IP addresses in the [Slow Query](/tidb-cloud/tune-performance.md#slow-query) view and the [`INFORMATION_SCHEMA.PROCESSLIST`](/information-schema/information-schema-processlist.md) table (beta). + + TiDB Cloud now supports client IP pass-through, enabling the Slow Query view and the `INFORMATION_SCHEMA.PROCESSLIST` table, to display the real client IP address instead of the Load Balancer (LB) IP. This feature helps accurately identify the true source of database requests for better troubleshooting and analysis. + + Currently, this feature is in beta and is available only in the AWS region `Frankfurt (eu-central-1)`. + +- **TiDB Cloud Essential** + + - Support data migration (beta). + + Now you can use the Data Migration feature in the [TiDB Cloud console](https://tidbcloud.com) to seamlessly migrate data from any MySQL-compatible database to your [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) clusters. + + - Supported source databases include various MySQL-compatible systems, such as self-hosted MySQL, Amazon RDS, Alibaba Cloud RDS, and PolarDB. + - Supported connection methods for data migration include public connection and PrivateLink to ensure both ease of use and enterprise-grade security: + + - **Public connection**: quickly connects to your source database over the internet using secure and encrypted channels. + - **PrivateLink**: establishes a secure and private connection between your source VPC and TiDB Cloud, bypassing the public internet to ensure maximum data privacy and reduced network latency. + + Currently, the Data Migration feature only supports logical mode. + + For more information, see [Migrate Existing and Incremental Data Using Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) and [Migrate Incremental Data Using Data Migration](/tidb-cloud/migrate-incremental-data-from-mysql-using-data-migration.md). + + - Display real client IP addresses in the [Slow Query](/tidb-cloud/tune-performance.md#slow-query) view, [DB audit logs](/tidb-cloud/essential-database-audit-logging.md), and the [`INFORMATION_SCHEMA.PROCESSLIST`](/information-schema/information-schema-processlist.md) table (beta) + + TiDB Cloud now supports client IP pass-through, enabling the Slow Query view, DB audit logs, and the `INFORMATION_SCHEMA.PROCESSLIST` table, to display the real client IP address instead of the Load Balancer (LB) IP. This feature helps accurately identify the true source of database requests for better troubleshooting and analysis. + + Currently, this feature is in beta and is available only in the AWS region `Frankfurt (eu-central-1)`. + +**Console changes** + +- Improve the support experience with plan-aware support options. + + The [TiDB Cloud console](https://tidbcloud.com/) now offers plan-aware support options to enhance the support experience across all subscription plans. These updates include: + + - **Plan-aware support redirection**: on the cluster overview page, selecting **Get Support** in the **Actions** column directs you to the most relevant resource based on your subscription plan. Users on the Basic plan are guided to the **Support Plan** panel, and users on paid plans are directed to the **Support Portal**. + - **Refined Help Center menu**: rename help menu items to **Support Options** and **Support Tickets** to better reflect available services. Add tooltips to clarify that technical support tickets are available only for paid plans. + - **Clear community support access**: within the **Support Plan** options, Slack and Discord are clearly identified as the primary technical support channels for Basic plan users. The following documentation is streamlined to clarify support channel policies and community access: [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md), [Connected Care Overview](/tidb-cloud/connected-care-overview.md), and [Connected Care Details](/tidb-cloud/connected-care-detail.md). + - **Action-oriented Support Plan UI**: redesign the **Support Plan** window to prioritize the support options available for your current subscription, rather than generic plan comparisons. This change helps you quickly identify how to get support based on your active plan. + + For more information, see [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +## January 15, 2026 + +**General changes** + +- **TiDB Cloud Dedicated** + + - Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.5.4](https://docs.pingcap.com/tidb/stable/release-8.5.4/) to [v8.5.5](https://docs.pingcap.com/tidb/stable/release-8.5.5/). diff --git a/tidb-cloud/scalability-concepts.md b/tidb-cloud/scalability-concepts.md index 8a2dd30b02707..efb66f22051e9 100644 --- a/tidb-cloud/scalability-concepts.md +++ b/tidb-cloud/scalability-concepts.md @@ -5,11 +5,11 @@ summary: Learn about scalability concepts for TiDB Cloud. # Scalability -TiDB Cloud Dedicated lets you adjust its compute and storage resources separately to match your data volume or workload changes. TiDB Cloud Dedicated can do scaling without any service disruption. This flexibility allows organizations to optimize their infrastructure costs while maintaining high performance and availability. +TiDB Cloud offers multiple deployment options with flexible scalability to meet the needs of different workloads. -> **Note:** -> -> [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) scales automatically based on your application's workload changes. However, you cannot manually scale a TiDB Cloud Serverless cluster. +- [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) is ideal for prototyping, development, and early-stage workloads. It provides a simplified, cost-effective way to get started with TiDB Cloud, with automatic scaling built in. +- [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) is built for production workloads that require more robust scalability and predictable performance under growing traffic or data volume. +- TiDB Cloud Dedicated lets you adjust its compute and storage resources separately to match your data volume or workload changes. TiDB Cloud Dedicated can do scaling without any service disruption. This flexibility allows organizations to optimize their infrastructure costs while maintaining high performance and availability. > **Tip:** > diff --git a/tidb-cloud/scale-tidb-cluster.md b/tidb-cloud/scale-tidb-cluster.md index 1348dd17c5b3d..2fec6e68ff546 100644 --- a/tidb-cloud/scale-tidb-cluster.md +++ b/tidb-cloud/scale-tidb-cluster.md @@ -7,7 +7,7 @@ summary: Learn how to scale your TiDB Cloud cluster. > **Note:** > -> - [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) scales automatically based on your application's workload changes. However, you cannot manually scale a TiDB Cloud Serverless cluster. +> - [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) and [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) scale automatically based on your application's workload changes. However, you cannot manually scale a {{{ .starter }}} or {{{ .essential }}} cluster. > - When a cluster is in the **MODIFYING** status, you cannot perform any new scaling operations on it. You can scale a TiDB cluster in the following dimensions: diff --git a/tidb-cloud/secure-connections-to-serverless-clusters.md b/tidb-cloud/secure-connections-to-serverless-clusters.md index 3d5134e11eda4..d5ff993cd32ac 100644 --- a/tidb-cloud/secure-connections-to-serverless-clusters.md +++ b/tidb-cloud/secure-connections-to-serverless-clusters.md @@ -1,21 +1,25 @@ --- -title: TLS Connections to TiDB Cloud Serverless -summary: Introduce TLS connections in TiDB Cloud Serverless. +title: TLS Connections to {{{ .starter }}} or Essential +summary: Introduce TLS connections in {{{ .starter }}} and {{{ .essential }}}. aliases: ['/tidbcloud/secure-connections-to-serverless-tier-clusters'] --- -# TLS Connections to TiDB Cloud Serverless +# TLS Connections to {{{ .starter }}} or Essential -Establishing a secure TLS connection between your client and your TiDB Cloud Serverless cluster is one of the basic security practices for connecting to your databases. The server certificate for TiDB Cloud Serverless is issued by an independent third-party certificate provider. You can easily connect to your TiDB Cloud Serverless cluster without downloading a server-side digital certificate. +Establishing a secure TLS connection between your client and your {{{ .starter }}} or {{{ .essential }}} cluster is one of the basic security practices for connecting to your databases. The server certificate for TiDB Cloud is issued by an independent third-party certificate provider. You can easily connect to your TiDB Cloud cluster without downloading a server-side digital certificate. + +> **Note:** +> +> To learn how to establish TLS connections to TiDB Cloud Dedicated clusters, see [TLS Connections to TiDB Cloud Dedicated](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md). ## Prerequisites - Log in to TiDB Cloud via [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) or [SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md). -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/tidb-cloud-quickstart.md). +- [Create a TiDB Cloud cluster](/tidb-cloud/tidb-cloud-quickstart.md). -## TLS connection to a TiDB Cloud Serverless cluster +## TLS connection to a TiDB Cloud cluster -In the [TiDB Cloud console](https://tidbcloud.com/), you can get examples of different connection methods and connect to your TiDB Cloud Serverless cluster as follows: +In the [TiDB Cloud console](https://tidbcloud.com/), you can get examples of different connection methods and connect to your TiDB Cloud cluster as follows: 1. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your cluster to go to its overview page. @@ -23,27 +27,27 @@ In the [TiDB Cloud console](https://tidbcloud.com/), you can get examples of dif 3. In the dialog, keep the default setting of the connection type as `Public`, and select your preferred connection method and operating system. -4. If you have not set a password yet, click **Generate Password** to generate a random password for your TiDB Cloud Serverless cluster. The password will be automatically embedded in the sample connection string for connecting to your cluster easily. +4. If you have not set a password yet, click **Generate Password** to generate a random password for your cluster. The password will be automatically embedded in the sample connection string for connecting to your cluster easily. > **Note:** > > - The random password consists of 16 characters, including uppercase and lowercase letters, numbers, and special characters. > - After you close this dialog, the generated password will not show again, so you need to save the password in a secure location. If you forget it, you can click **Reset Password** in this dialog to reset it. - > - The TiDB Cloud Serverless cluster can be accessed through the internet. If you need to use the password elsewhere, it is recommended that you reset it to ensure database security. + > - The TiDB Cloud cluster can be accessed through the internet. If you need to use the password elsewhere, it is recommended that you reset it to ensure database security. 5. Connect to your cluster with the connection string. > **Note:** > - > When you connect to a TiDB Cloud Serverless cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). + > When you connect to a TiDB Cloud cluster, you must include the prefix for your cluster in the user name and wrap the name with quotation marks. For more information, see [User name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix). ## Root certificate management ### Root certificate issuance and validity -TiDB Cloud Serverless uses certificates from [Let's Encrypt](https://letsencrypt.org/) as a Certificate Authority (CA) for TLS connection between clients and TiDB Cloud Serverless clusters. Once the TiDB Cloud Serverless certificate expires, it will be automatically rotated without affecting the normal operations of your cluster and the established TLS secure connection. +TiDB Cloud uses certificates from [Let's Encrypt](https://letsencrypt.org/) as a Certificate Authority (CA) for TLS connection between clients and TiDB Cloud clusters. Once the TiDB Cloud certificate expires, it will be automatically rotated without affecting the normal operations of your cluster and the established TLS secure connection. -If the client uses the system's root CA stores by default, such as Java and Go, you can easily connect securely to TiDB Cloud Serverless clusters without specifying the path of CA roots. However, some drivers and ORMs do not use the system root CA stores. In those cases, you need to configure the CA root path of the drivers or ORMs to your system root CA stores. For example, when you use [mysqlclient](https://github.com/PyMySQL/mysqlclient) to connect a TiDB Cloud Serverless cluster in Python on macOS, you need to set `ca: /etc/ssl/cert.pem` in the `ssl` argument. +If the client uses the system's root CA stores by default, such as Java and Go, you can easily connect securely to TiDB Cloud clusters without specifying the path of CA roots. However, some drivers and ORMs do not use the system root CA stores. In those cases, you need to configure the CA root path of the drivers or ORMs to your system root CA stores. For example, when you use [mysqlclient](https://github.com/PyMySQL/mysqlclient) to connect a TiDB Cloud cluster in Python on macOS, you need to set `ca: /etc/ssl/cert.pem` in the `ssl` argument. If you are using a GUI client, such as DBeaver, which does not accept a certificate file with multiple certificates inside, you must download the [ISRG Root X1](https://letsencrypt.org/certs/isrgrootx1.pem) certificate. @@ -86,22 +90,22 @@ In different operating systems, the default storage paths of the root certificat Windows does not offer a specific path to the CA root. Instead, it uses the [registry](https://learn.microsoft.com/en-us/windows-hardware/drivers/install/local-machine-and-current-user-certificate-stores) to store certificates. For this reason, to specify the CA root path on Windows, take the following steps: 1. Download the [ISRG Root X1 certificate](https://letsencrypt.org/certs/isrgrootx1.pem) and then save it in a path you prefer, such as ``. -2. Use the path (``) as your CA root path when you connect to a TiDB Cloud Serverless cluster. +2. Use the path (``) as your CA root path when you connect to a TiDB Cloud cluster. ## FAQs -### Which TLS versions are supported to connect to my TiDB Cloud Serverless cluster? +### Which TLS versions are supported to connect to my TiDB Cloud cluster? -For security reasons, TiDB Cloud Serverless only supports TLS 1.2 and TLS 1.3, and does not support TLS 1.0 and TLS 1.1 versions. See IETF [Deprecating TLS 1.0 and TLS 1.1](https://datatracker.ietf.org/doc/rfc8996/) for details. +For security reasons, TiDB Cloud only supports TLS 1.2 and TLS 1.3, and does not support TLS 1.0 and TLS 1.1 versions. See IETF [Deprecating TLS 1.0 and TLS 1.1](https://datatracker.ietf.org/doc/rfc8996/) for details. -### Is two-way TLS authentication between my connection client and TiDB Cloud Serverless supported? +### Is two-way TLS authentication between my connection client and TiDB Cloud supported? No. -TiDB Cloud Serverless only supports one-way TLS authentication, which means your client uses the public key to verify the signature of your TiDB Cloud cluster certificate's private key while the cluster does not validate the client. +TiDB Cloud only supports one-way TLS authentication, which means your client uses the public key to verify the signature of your TiDB Cloud cluster certificate's private key while the cluster does not validate the client. -### Does TiDB Cloud Serverless have to configure TLS to establish a secure connection? +### Does TiDB Cloud have to configure TLS to establish a secure connection? -For standard connection, TiDB Cloud Serverless only allows TLS connections and prohibits non-SSL/TLS connections. The reason is that SSL/TLS is one of the most basic security measures for you to reduce the risk of data exposure to the internet when you connect to the TiDB Cloud Serverless cluster through the internet. +For standard connection, TiDB Cloud only allows TLS connections and prohibits non-SSL/TLS connections. The reason is that SSL/TLS is one of the most basic security measures for you to reduce the risk of data exposure to the internet when you connect to the TiDB Cloud cluster through the internet. For private endpoint connection, because it supports highly secure and one-way access to the TiDB Cloud service and does not expose your data to the public internet, configuring TLS is optional. diff --git a/tidb-cloud/security-concepts.md b/tidb-cloud/security-concepts.md index f4afd891004d3..c4da4ca74e275 100644 --- a/tidb-cloud/security-concepts.md +++ b/tidb-cloud/security-concepts.md @@ -164,17 +164,32 @@ TiDB Cloud ensures secure cluster connectivity and data transmission through rob ### Private endpoints + + +- Enables secure connectivity for SQL clients within your Virtual Private Cloud (VPC) to TiDB Cloud Dedicated clusters. + +- Supported by [AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md), [Azure Private Link](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md), [Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md), and [Alibaba Cloud Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md). + + + + + - Enables secure connectivity for SQL clients within your Virtual Private Cloud (VPC) to TiDB Cloud Dedicated clusters. - Supported by [AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md), [Azure Private Link](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md), and [Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md). + + **Best practices:** Use private endpoints in production to minimize public exposure and review configurations regularly. ### TLS (Transport Layer Security) - Encrypts communication between clients and servers to secure data transmission. -- Setup guides available for both [Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md) and [Dedicated](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md) clusters. +- Setup guides: + + - [TLS Connections to {{{ .starter }}} or Essential](/tidb-cloud/secure-connections-to-serverless-clusters.md) + - [TLS Connections to TiDB Cloud Dedicated](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md) **Best practices:** Ensure TLS certificates are current and rotate them periodically. @@ -204,7 +219,7 @@ TiDB Cloud safeguards static data with advanced encryption capabilities, ensurin - Encrypts static data and backups with CMEK keys when enabled. -- For TiDB Cloud Dedicated clusters without CMEK, TiDB Cloud uses escrow keys; TiDB Cloud Serverless clusters rely exclusively on escrow keys. +- For TiDB Cloud Dedicated clusters without CMEK, TiDB Cloud uses escrow keys; {{{ .starter }}} and {{{ .essential }}} clusters rely exclusively on escrow keys. **Best practices:** @@ -214,7 +229,7 @@ TiDB Cloud safeguards static data with advanced encryption capabilities, ensurin - Leverage CMEK for industries requiring strict compliance, such as HIPAA and GDPR. -For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys](/tidb-cloud/tidb-cloud-encrypt-cmek.md). +For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys on AWS](/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md) and [Encryption at Rest Using Customer-Managed Encryption Keys on Azure](/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md). ## Audit logging diff --git a/tidb-cloud/security-overview.md b/tidb-cloud/security-overview.md new file mode 100644 index 0000000000000..db53f208ed40e --- /dev/null +++ b/tidb-cloud/security-overview.md @@ -0,0 +1,40 @@ +--- +title: Security Overview +summary: Learn about the comprehensive security framework of TiDB Cloud, including identity management, network isolation, data protection, access control, and auditing. +--- + +# Security Overview + +TiDB Cloud provides a comprehensive and flexible security framework, covering all stages of the data lifecycle. The platform offers full protection across identity and access management, network security and isolation, data access control, database access control, and audit logging. + +## Identity and access management + +TiDB Cloud supports multiple authentication methods, including [email and password login](/tidb-cloud/tidb-cloud-password-authentication.md), [standard SSO](/tidb-cloud/tidb-cloud-sso-authentication.md), and [organization-level SSO](/tidb-cloud/tidb-cloud-org-sso-authentication.md). + +TiDB Cloud provides layered role and permission management, and you can enable multi-factor authentication (MFA) to strengthen account security. Flexible [identity and access controls](/tidb-cloud/manage-user-access.md) let you manage project and resource access with fine-grained permissions, ensuring that you can maintain the principle of least privilege. + +## Network security and isolation + +TiDB Cloud provides private endpoints, VPC Peering, and IP access lists for network isolation and access control. + +You can encrypt all communications using TLS to ensure the confidentiality and integrity of data in transit. Network access controls ensure that only authorized sources can access cluster resources, enhancing overall security. + +## Data access control + +For cluster types that support Customer-Managed Encryption Keys (CMEK), TiDB Cloud provides encryption for both data at rest and backups. + +Combined with robust key management mechanisms, you can control the lifecycle and usage of encryption keys, further enhancing data security and compliance. + +For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys on AWS](/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md) and [Encryption at Rest Using Customer-Managed Encryption Keys on Azure](/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md). + +## Database access control + +TiDB Cloud provides a user- and role-based access control mechanism, combining static and dynamic privileges. You can assign roles to users to manage and distribute permissions in a more fine-grained way. + +For [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, you can [configure and manage the root account password](/tidb-cloud/configure-security-settings.md) and restrict access through [IP access lists](/tidb-cloud/configure-ip-access-list.md) to protect sensitive accounts. + +## Audit logging + +TiDB Cloud provides audit logs for both console and database operations to support activity tracking, compliance monitoring, and security incident investigation. + +Audit logs record your actions, operation times, and sources, providing reliable evidence for enterprise security management. diff --git a/tidb-cloud/select-cluster-tier.md b/tidb-cloud/select-cluster-tier.md index dd3a41fb3b561..6728ee5673fa2 100644 --- a/tidb-cloud/select-cluster-tier.md +++ b/tidb-cloud/select-cluster-tier.md @@ -1,49 +1,38 @@ --- -title: Select Your Cluster Tier -summary: Learn how to select your cluster tier on TiDB Cloud. +title: Select Your Cluster Plan +summary: Learn how to select your cluster plan on TiDB Cloud. aliases: ['/tidbcloud/developer-tier-cluster'] --- -# Select Your Cluster Tier +# Select Your Cluster Plan -The cluster tier determines the throughput and performance of your cluster. +The cluster plan determines the throughput and performance of your cluster. -TiDB Cloud provides the following two options of cluster tiers. Before creating a cluster, you need to consider which option suits your need better. +TiDB Cloud provides the following options of cluster plans. Whether you are just getting started or scaling to meet the increasing application demands, these service plans provide the flexibility and capability you need. Before creating a cluster, you need to consider which option suits your need better. -- [TiDB Cloud Serverless](#tidb-cloud-serverless) +- [TiDB Cloud Starter](#starter) +- [{{{ .essential }}}](#essential) - [TiDB Cloud Dedicated](#tidb-cloud-dedicated) -## TiDB Cloud Serverless - - -TiDB Cloud Serverless is a fully managed, multi-tenant TiDB offering. It delivers an instant, autoscaling MySQL-compatible database and offers a generous free tier and consumption based billing once free limits are exceeded. - -### Cluster plans +> **Note:** +> +> Some of TiDB Cloud features are partially supported or not supported on {{{ .starter }}} and {{{ .essential }}}. See [{{{ .starter }}} and Essential Limitations](/tidb-cloud/serverless-limitations.md) for details. -TiDB Cloud Serverless offers two service plans to meet different user requirements. Whether you are just getting started or scaling to meet the increasing application demands, these service plans provide the flexibility and capability you need. +## {{{ .starter }}} {#starter} -#### Free cluster plan +TiDB Cloud Starter is a fully managed, multi-tenant TiDB offering. It delivers an instant, autoscaling MySQL-compatible database and offers a generous free quota and consumption based billing once free limits are exceeded. -The free cluster plan is ideal for those who are getting started with TiDB Cloud Serverless. It provides developers and small teams with the following essential features: +The free cluster plan is ideal for those who are getting started with {{{ .starter }}}. It provides developers and small teams with the following essential features: - **No cost**: This plan is completely free, with no credit card required to get started. - **Storage**: Provides an initial 5 GiB of row-based storage and 5 GiB of columnar storage. -- **Request Units**: Includes 50 million [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit) for database operations. -- **Easy upgrade**: Offers a smooth transition to the [scalable cluster plan](#scalable-cluster-plan) as your needs grow. - -#### Scalable cluster plan - -For applications experiencing growing workloads and needing scalability in real time, the scalable cluster plan provides the flexibility and performance to keep pace with your business growth with the following features: - -- **Enhanced capabilities**: Includes all capabilities of the free cluster plan, along with the capacity to handle larger and more complex workloads, as well as advanced security features. -- **Automatic scaling**: Automatically adjusts storage and computing resources to efficiently meet changing workload demands. -- **Predictable pricing**: Although this plan requires a credit card, you are only charged for the resources you use, ensuring cost-effective scalability. +- **Request Units**: Includes 50 million [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru) for database operations. ### Usage quota -For each organization in TiDB Cloud, you can create a maximum of five [free clusters](#free-cluster-plan) by default. To create more TiDB Cloud Serverless clusters, you need to add a credit card and create [scalable clusters](#scalable-cluster-plan) for the usage. +For each organization in TiDB Cloud, you can create a maximum of five free {{{ .starter }}} clusters by default. To create more {{{ .starter }}} clusters, you need to add a credit card and specify a spending limit. -For the first five TiDB Cloud Serverless clusters in your organization, whether they are free or scalable, TiDB Cloud provides a free usage quota for each of them as follows: +For the first five {{{ .starter }}} clusters in your organization, whether they are free or scalable, TiDB Cloud provides a free usage quota for each of them as follows: - Row-based storage: 5 GiB - Columnar storage: 5 GiB @@ -53,13 +42,22 @@ A Request Unit (RU) is a unit of measure used to represent the amount of resourc Once a cluster reaches its usage quota, it immediately denies any new connection attempts until you [increase the quota](/tidb-cloud/manage-serverless-spend-limit.md#update-spending-limit) or the usage is reset upon the start of a new month. Existing connections established before reaching the quota will remain active but will experience throttling. For example, when the row-based storage of a cluster exceeds 5 GiB for a free cluster, the cluster automatically restricts any new connection attempts. -To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details). +To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). + +## {{{ .essential }}} {#essential} + +For applications experiencing growing workloads and needing scalability in real time, the Essential cluster plan provides the flexibility and performance to keep pace with your business growth with the following features: -### User name prefix +- **Enhanced capabilities**: includes all capabilities of the Starter plan, along with the capacity to handle larger and more complex workloads, as well as advanced security features. +- **Automatic scaling**: automatically adjusts storage and computing resources to efficiently meet changing workload demands. +- **High availability**: built-in fault tolerance and redundancy ensure your applications remain available and resilient, even during infrastructure failures. +- **Predictable pricing**: billed based on storage and Request Capacity Units (RCUs) of the compute resources, offering transparent, usage-based pricing that scales with your needs, so you only pay for what you use without surprises. + +## User name prefix -For each TiDB Cloud Serverless cluster, TiDB Cloud generates a unique prefix to distinguish it from other clusters. +For each {{{ .starter }}} or {{{ .essential }}} cluster, TiDB Cloud generates a unique prefix to distinguish it from other clusters. Whenever you use or set a database user name, you must include the prefix in the user name. For example, assume that the prefix of your cluster is `3pTAoNNegb47Uc8`. @@ -71,7 +69,7 @@ Whenever you use or set a database user name, you must include the prefix in the > **Note:** > - > TiDB Cloud Serverless requires TLS connection. To find the CA root path on your system, see [Root certificate default path](/tidb-cloud/secure-connections-to-serverless-clusters.md#root-certificate-default-path). + > {{{ .starter }}} and {{{ .essential }}} require TLS connection. To find the CA root path on your system, see [Root certificate default path](/tidb-cloud/secure-connections-to-serverless-clusters.md#root-certificate-default-path). - To create a database user: @@ -85,10 +83,6 @@ To get the prefix for your cluster, take the following steps: 2. Click the name of your target cluster to go to its overview page, and then click **Connect** in the upper-right corner. A connection dialog is displayed. 3. In the dialog, get the prefix from the connection string. -### TiDB Cloud Serverless special terms and conditions - -Some of TiDB Cloud features are partially supported or not supported on TiDB Cloud Serverless. See [TiDB Cloud Serverless Limitations](/tidb-cloud/serverless-limitations.md) for details. - ## TiDB Cloud Dedicated TiDB Cloud Dedicated is for production use with the benefits of cross-zone high availability, horizontal scaling, and [HTAP](https://en.wikipedia.org/wiki/Hybrid_transactional/analytical_processing). diff --git a/tidb-cloud/serverless-audit-logging.md b/tidb-cloud/serverless-audit-logging.md deleted file mode 100644 index 3df88aecfbf68..0000000000000 --- a/tidb-cloud/serverless-audit-logging.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: TiDB Cloud Serverless Database Audit Logging -summary: Learn about how to audit a TiDB Cloud Serverless cluster in TiDB Cloud. ---- - -# TiDB Cloud Serverless Database Audit Logging (Beta) - -TiDB Cloud Serverless provides you with a database audit logging feature to record a history of user access details (such as any SQL statements executed) in logs. - -> **Note:** -> -> Currently, the database audit logging feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for TiDB Cloud Serverless database audit logging" in the **Description** field and click **Submit**. - -To assess the effectiveness of user access policies and other information security measures of your organization, it is a security best practice to conduct a periodic analysis of the database audit logs. - -The audit logging feature is disabled by default. To audit a cluster, you need to enable audit logging for it. - -## Enable audit logging - -To enable audit logging for a TiDB Cloud Serverless cluster, use the [TiDB Cloud CLI](/tidb-cloud/ticloud-auditlog-config.md). - -```shell -ticloud serverless audit-log config -c --enabled -``` - -To disable audit logging for a TiDB Cloud Serverless cluster, use the [TiDB Cloud CLI](/tidb-cloud/ticloud-auditlog-config.md). - -```shell -ticloud serverless audit-log config -c --enabled=false -``` - -> **Note:** -> -> Only enabling audit logging will not generate audit logs. You need to configure filters to specify what events to log. For more information, see [Manage audit logging filter rules](#manage-audit-logging-filter-rules). - -## Manage audit logging filter rules - -To filter the audit logging, you need to create a filter rule to specify which events to log. You can use the [TiDB Cloud CLI](/tidb-cloud/ticloud-auditlog-filter-create.md) to manage the filter rules. - -The filter rule contains the following fields: - -- `users`: A list of user names to filter audit events. You can use the wildcard `%` to match any user name. -- `filters`: A list of filter objects. Each filter object can contain the following fields: - - - `classes`: A list of event classes to filter audit events. For example, `["QUERY", "EXECUTE"]`. - - `tables`: A list of table filters. For more information, see [Table filters]. - - `statusCodes`: A list of status codes to filter audit events. `1` means success, `0` means failure. - -Here is the summary of all event classes in database audit logging: - -| Event Class | Description | Parent-class | -|---------------|--------------------------------------------------------------------------------------------------|---------------| -| CONNECTION | Record all operations related to connections, such as handshaking, connections, disconnections, connection reset, and changing users | - | -| CONNECT | Record all operations of the handshaking in connections | CONNECTION | -| DISCONNECT | Record all operations of the disconnections | CONNECTION | -| CHANGE_USER | Record all operations of changing users | CONNECTION | -| QUERY | Record all operations of SQL statements, including all errors about querying and modifying data | - | -| TRANSACTION | Record all operations related to transactions, such as `BEGIN`, `COMMIT`, and `ROLLBACK` | QUERY | -| EXECUTE | Record all operations of the `EXECUTE` statements | QUERY | -| QUERY_DML | Record all operations of the DML statements, including `INSERT`, `REPLACE`, `UPDATE`, `DELETE`, and `LOAD DATA` | QUERY | -| INSERT | Record all operations of the `INSERT` statements | QUERY_DML | -| REPLACE | Record all operations of the `REPLACE` statements | QUERY_DML | -| UPDATE | Record all operations of the `UPDATE` statements | QUERY_DML | -| DELETE | Record all operations of the `DELETE` statements | QUERY_DML | -| LOAD DATA | Record all operations of the `LOAD DATA` statements | QUERY_DML | -| SELECT | Record all operations of the `SELECT` statements | QUERY | -| QUERY_DDL | Record all operations of the DDL statements | QUERY | -| AUDIT | Record all operations related to setting TiDB database auditing, including setting system variables and calling system functions | - | -| AUDIT_FUNC_CALL | Record all operations of calling system functions related to TiDB database auditing | AUDIT | - -### Create a filter rule - -To create a filter rule that captures all audit logs, run the following command: - -```shell -ticloud serverless audit-log filter create --cluster-id --name --rule '{"users":["%@%"],"filters":[{}]}' -``` - -To create a filter rule that filters ALL EXECUTE events, run the following command: - -```shell -ticloud serverless audit-log filter create --cluster-id --name --rule '{"users":["%@%"],"filters":[{"classes":["EXECUTE"]]}' -``` - -### Update a filter rule - -To disable a filter rule, run the following command: - -```shell -ticloud serverless audit-log filter update --cluster-id --name --enabled=false -``` - -To update a filter rule, run the following command: - -```shell -ticloud serverless audit-log filter update --cluster-id --name --rule '{"users":["%@%"],"filters":[{"classes":["QUERY"],"tables":["test.t"]}]}' -``` - -Note that you need to pass the complete `--rule` field when updating. - -### Delete a filter rule - -To delete a filter rule, run the following command: - -```shell -ticloud serverless audit-log filter delete --cluster-id --name -``` - -## Configure audit logging - -### Data redaction - -TiDB Cloud Serverless redacts sensitive data in the audit logs by default. Take the following SQL statement as an example: - -```sql -INSERT INTO `test`.`users` (`id`, `name`, `password`) VALUES (1, 'Alice', '123456'); -``` - -It is redacted as follows: - -```sql -INSERT INTO `test`.`users` (`id`, `name`, `password`) VALUES ( ... ); -``` - -If you want to disable redaction, use the [TiDB Cloud CLI](/tidb-cloud/ticloud-auditlog-config.md). - -```shell -ticloud serverless audit-log config --cluster-id --unredacted -``` - -### Log file rotation - -TiDB Cloud Serverless generates a new audit log file when either of the following conditions is met: - -- The size of the current log file reaches 100 MiB. -- One hour has passed since the previous log generation. Depending on the internal scheduling mechanism, log generation might be delayed by a few minutes. - -> **Note:** -> -> Currently, Log file rotation settings are not configurable. TiDB Cloud Serverless automatically rotates the audit log files based on the preceding conditions. - -## Access audit logging - -TiDB Cloud Serverless audit logs are stored as readable text files named `YYYY-MM-DD-.log`. - -Currently, audit logs are stored within TiDB Cloud for 365 days. After this period, logs are automatically deleted. - -> **Note:** -> -> Contact [TiDB Cloud Support](https://docs.pingcap.com/tidbcloud/tidb-cloud-support) if you need to save audit logs in external storage (such as AWS S3, Azure Blob Storage, and Google Cloud Storage). - -To view and download audit logs, use the [TiDB Cloud CLI](/tidb-cloud/ticloud-auditlog-download.md): - -```shell -ticloud serverless audit-log download --cluster-id --output-path --start-date --end-date -``` - -- `start-date`: The start date of the audit log you want to download in the format of `YYYY-MM-DD`, for example `2025-01-01`. -- `end-date`: The end date of the audit log you want to download in the format of `YYYY-MM-DD`, for example `2025-01-01`. - -> **Note:** -> -> TiDB Cloud Serverless does not guarantee sequential ordering of audit logs. The log file named `YYYY-MM-DD-.log` might contain the audit logs in previous days. -> If you want to retrieve all logs from a specific date (for example, January 1, 2025), specifying `--start-date 2025-01-01` and `--end-date 2025-01-02` usually works. But under extreme conditions, you might need to download all log files and order them by the `TIME` field. - -## Audit logging fields - -For each database event record in audit logs, TiDB provides the following fields: - -### General information - -All classes of audit logs contain the following information: - -| Field | Description | -|---------------|-----------------------------------------------------------------------------------------------| -| ID | The unique identifier that identifies the audit record of an operation | -| TIME | The timestamp of the audit record | -| EVENT | The event classes of the audit record. Multiple event types are separated by commas (`,`) | -| USER | The username of the audit record | -| ROLES | The roles of the user at the time of the operation | -| CONNECTION_ID | The identifier of the user's connection | -| TABLES | The accessed tables related to this audit record | -| STATUS_CODE | The status code of the audit record. `1` means success, and `0` means failure. | -| KEYSPACE_NAME | The keyspace name of the audit record. | -| SERVERLESS_TENANT_ID | The ID of the serverless tenant that the cluster belongs to. | -| SERVERLESS_TSERVERLESS_PROJECT_ID | The ID of the serverless project that the cluster belongs to. | -| SERVERLESS_CLUSTER_ID | The ID of the serverless cluster that the audit record belongs to. | -| REASON | The error message of the audit record. Only recorded when an error occurs during the operation. | - -### SQL statement information - -When the event class is `QUERY` or a subclass of `QUERY`, the audit logs contain the following information: - -| Field | Description | -|----------------|---------------------------------------------------------------------------------------------------------------| -| CURRENT_DB | The name of the current database. | -| SQL_TEXT | The executed SQL statements. If audit log redaction is enabled, the redacted SQL statements are recorded. | -| EXECUTE_PARAMS | The parameters for the `EXECUTE` statements. Recorded only when the event classes include `EXECUTE` and redaction is disabled. | -| AFFECTED_ROWS | The number of affected rows of the SQL statements. Recorded only when the event classes include `QUERY_DML`. | - -### Connection information - -When the event class is `CONNECTION` or a subclass of `CONNECTION`, the audit logs contain the following information: - -| Field | Description | -|-----------------|-----------------------------------------------------------------------------------------------| -| CURRENT_DB | The name of the current database. When the event classes include DISCONNECT, this information is not recorded. | -| CONNECTION_TYPE | The type of connection, including Socket, UnixSocket, and SSL/TLS. | -| PID | The process ID of the current connection. | -| SERVER_VERSION | The current version of the connected TiDB server. | -| SSL_VERSION | The current version of SSL in use. | -| HOST_IP | The current IP address of the connected TiDB server. | -| HOST_PORT | The current port of the connected TiDB server. | -| CLIENT_IP | The current IP address of the client. | -| CLIENT_PORT | The current port of the client. | - -### Audit operation information - -When the event class is `AUDIT` or a subclass of `AUDIT`, the audit logs contain the following information: - -| Field | Description | -|----------------|---------------------------------------------------------------------------------------------------------------| -| AUDIT_OP_TARGET| The objects of the setting related to TiDB database auditing. | -| AUDIT_OP_ARGS | The arguments of the setting related to TiDB database auditing. | - -## Audit logging limitations - -- Audit logging is only available via TiDB Cloud CLI at present. -- Audit logs can only be stored in TiDB Cloud at present. -- TiDB Cloud Serverless does not guarantee the sequential order of audit logs, which means you might have to review all log files to view the latest events. To sort the logs chronologically, you can use the `TIME` field in the audit logs. diff --git a/tidb-cloud/serverless-export.md b/tidb-cloud/serverless-export.md index 4fcc67ad645da..d0c5efb4b8f94 100644 --- a/tidb-cloud/serverless-export.md +++ b/tidb-cloud/serverless-export.md @@ -1,15 +1,15 @@ --- -title: Export Data from TiDB Cloud Serverless -summary: Learn how to export data from TiDB Cloud Serverless clusters. +title: Export Data from {{{ .starter }}} or Essential +summary: Learn how to export data from {{{ .starter }}} or {{{ .essential }}} clusters. --- -# Export Data from TiDB Cloud Serverless +# Export Data from {{{ .starter }}} or Essential -TiDB Cloud Serverless Export (Beta) is a service that enables you to export data from a TiDB Cloud Serverless cluster to a local file or an external storage service. You can use the exported data for backup, migration, data analysis, or other purposes. +TiDB Cloud enables you to export data from a {{{ .starter }}} or Essential cluster to a local file or an external storage service. You can use the exported data for backup, migration, data analysis, or other purposes. -While you can also export data using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html) and TiDB [Dumpling](https://docs.pingcap.com/tidb/dev/dumpling-overview), TiDB Cloud Serverless Export offers a more convenient and efficient way to export data from a TiDB Cloud Serverless cluster. It brings the following benefits: +While you can also export data using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html) and TiDB [Dumpling](https://docs.pingcap.com/tidb/dev/dumpling-overview), the export feature provided by TiDB Cloud offers a more convenient and efficient way to export data from a cluster. It brings the following benefits: -- Convenience: the export service provides a simple and easy-to-use way to export data from a TiDB Cloud Serverless cluster, eliminating the need for additional tools or resources. +- Convenience: the export service provides a simple and easy-to-use way to export data from a cluster, eliminating the need for additional tools or resources. - Isolation: the export service uses separate computing resources, ensuring isolation from the resources used by your online services. - Consistency: the export service ensures the consistency of the exported data without causing locks, which does not affect your online services. @@ -35,7 +35,7 @@ You can export data to the following locations: ### A local file -To export data from a TiDB Cloud Serverless cluster to a local file, you need to export data [using the TiDB Cloud console](#export-data-to-a-local-file) or [using the TiDB Cloud CLI](/tidb-cloud/ticloud-serverless-export-create.md), and then download the exported data using the TiDB Cloud CLI. +To export data from a TiDB Cloud cluster to a local file, you need to export data [using the TiDB Cloud console](#export-data-to-a-local-file) or [using the TiDB Cloud CLI](/tidb-cloud/ticloud-serverless-export-create.md), and then download the exported data using the TiDB Cloud CLI. Exporting data to a local file has the following limitations: @@ -52,7 +52,7 @@ To export data to Amazon S3, you need to provide the following information: - [An access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html): make sure the access key has the `s3:PutObject` and `s3:ListBucket` permissions. - [A role ARN](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html): make sure the role ARN (Amazon Resource Name) has the `s3:PutObject` and `s3:ListBucket` permissions. Note that only clusters hosted on AWS support the role ARN. -For more information, see [Configure Amazon S3 access](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). +For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). ### Google Cloud Storage @@ -61,7 +61,7 @@ To export data to Google Cloud Storage, you need to provide the following inform - URI: `gs:////` - Access credential: a **base64 encoded** [service account key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) for your bucket. Make sure the service account key has the `storage.objects.create` permission. -For more information, see [Configure GCS access](/tidb-cloud/serverless-external-storage.md#configure-gcs-access). +For more information, see [Configure GCS access](/tidb-cloud/configure-external-storage-access.md#configure-gcs-access). ### Azure Blob Storage @@ -70,16 +70,16 @@ To export data to Azure Blob Storage, you need to provide the following informat - URI: `azure://.blob.core.windows.net///` or `https://.blob.core.windows.net///` - Access credential: a [shared access signature (SAS) token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) for your Azure Blob Storage container. Make sure the SAS token has the `Read` and `Write` permissions on the `Container` and `Object` resources. -For more information, see [Configure Azure Blob Storage access](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). +For more information, see [Configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access). ### Alibaba Cloud OSS To export data to Alibaba Cloud OSS, you need to provide the following information: - URI: `oss:////` -- Access credential: An [AccessKey pair](https://www.alibabacloud.com/help/en/ram/user-guide/create-an-accesskey-pair) for your Alibaba Cloud account. Make sure the AccessKey pair has the `oss:PutObject`, `oss:ListBuckets` and `oss:GetBucketInfo` permissions to allow data export to the OSS bucket. +- Access credential: An [AccessKey pair](https://www.alibabacloud.com/help/en/ram/user-guide/create-an-accesskey-pair) for your Alibaba Cloud account. Make sure the AccessKey pair has the `oss:PutObject` and `oss:GetBucketInfo` permissions. -For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/serverless-external-storage.md#configure-alibaba-cloud-object-storage-service-oss-access). +For more information, see [Configure Alibaba Cloud Object Storage Service (OSS) access](/tidb-cloud/configure-external-storage-access.md#configure-alibaba-cloud-object-storage-service-oss-access). ## Export options @@ -126,9 +126,9 @@ You can compress the exported Parquet data using the following algorithms: ### Data conversion -When exporting data to the Parquet format, the data conversion between TiDB Cloud Serverless and Parquet is as follows: +When exporting data to the Parquet format, the data conversion between TiDB and Parquet is as follows: -| TiDB Cloud Serverless Type | Parquest primitive type | Parquet logical type | +| TiDB data type | Parquest primitive type | Parquet logical type | |----------------------------|-------------------------|----------------------------------------------| | VARCHAR | BYTE_ARRAY | String(UTF8) | | TIME | BYTE_ARRAY | String(UTF8) | @@ -242,7 +242,7 @@ When exporting data to the Parquet format, the data conversion between TiDB Clou - **Compression**: choose **Gzip**, **Snappy**, **Zstd**, or **None**. - **Folder URI**: enter the URI of the Amazon S3 with the `s3:////` format. - **Bucket Access**: choose one of the following access credentials and then fill in the credential information: - - **AWS Role ARN**: enter the role ARN that has the permission to access the bucket. It is recommended to create the role ARN with AWS CloudFormation. For more information, see [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-amazon-s3-access). + - **AWS Role ARN**: enter the role ARN that has the permission to access the bucket. It is recommended to create the role ARN with AWS CloudFormation. For more information, see [Configure Amazon S3 access](/tidb-cloud/configure-external-storage-access.md#configure-amazon-s3-access). - **AWS Access Key**: enter the access key ID and access key secret that have the permission to access the bucket. 4. Click **Export**. @@ -323,7 +323,7 @@ ticloud serverless export create -c --target-type GCS --gcs.uri .blob.core.windows.net///` format. - - **SAS Token**: enter the SAS token that has the permission to access the container. It is recommended to create a SAS token with the [Azure ARM template](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/). For more information, see [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md#configure-azure-blob-storage-access). + - **SAS Token**: enter the SAS token that has the permission to access the container. It is recommended to create a SAS token with the [Azure ARM template](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/). For more information, see [Configure Azure Blob Storage access](/tidb-cloud/configure-external-storage-access.md#configure-azure-blob-storage-access). 4. Click **Export**. @@ -416,13 +416,15 @@ ticloud serverless export cancel -c -e ## Export speed -The export speed depends on your [cluster plan](/tidb-cloud/select-cluster-tier.md#cluster-plans). For details, see the following table: +The export speed depends on your [cluster plan](/tidb-cloud/select-cluster-tier.md): -| Plan | Export speed | -|:-------------------|:-------------------| -| Free cluster plan | Up to 25 MiB/s | -| Scalable cluster plan | Up to 100 MiB/s | +- **{{{ .starter }}}**: + + - If the spending limit is set to 0, the export speed is up to 25 MiB/s. + - If the spending limit is greater than 0, the export speed is up to 100 MiB/s. + +- **{{{ .essential }}}**: up to 100 MiB/s. ## Pricing -The export service is free during the beta period. You only need to pay for the [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit) generated during the export process of successful or canceled tasks. For failed export tasks, you will not be charged. +The export service is free during the beta period. You only need to pay for the [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru) generated during the export process of successful or canceled tasks. For failed export tasks, you will not be charged. diff --git a/tidb-cloud/serverless-faqs.md b/tidb-cloud/serverless-faqs.md index db37af82e20c8..470a0ad66dc1d 100644 --- a/tidb-cloud/serverless-faqs.md +++ b/tidb-cloud/serverless-faqs.md @@ -1,52 +1,67 @@ --- -title: TiDB Cloud Serverless FAQs -summary: Learn about the most frequently asked questions (FAQs) relating to TiDB Cloud Serverless. +title: "{{{ .starter }}} FAQs" +summary: Learn about the most frequently asked questions (FAQs) relating to {{{ .starter }}}. aliases: ['/tidbcloud/serverless-tier-faqs'] --- -# TiDB Cloud Serverless FAQs +# {{{ .starter }}} FAQs -This document lists the most frequently asked questions about TiDB Cloud Serverless. +This document lists the most frequently asked questions about {{{ .starter }}}. ## General FAQs -### What is TiDB Cloud Serverless? +### What is {{{ .starter }}}? -TiDB Cloud Serverless offers the TiDB database with full HTAP capabilities for you and your organization. It is a fully managed, auto-scaling deployment of TiDB that lets you start using your database immediately, develop and run your application without caring about the underlying nodes, and automatically scale based on your application's workload changes. +{{{ .starter }}} offers the TiDB database with full HTAP capabilities for you and your organization. It is a fully managed, auto-scaling deployment of TiDB that lets you start using your database immediately, develop and run your application without caring about the underlying nodes, and automatically scale based on your application's workload changes. -### How do I get started with TiDB Cloud Serverless? +### What is the relationship between TiDB Cloud Starter and TiDB Cloud Serverless? -Get started with the 5-minute [TiDB Cloud Quick Start](/tidb-cloud/tidb-cloud-quickstart.md). +TiDB Cloud Starter is the new name for TiDB Cloud Serverless, effective August 12, 2025. -### How many TiDB Cloud Serverless clusters can I create in TiDB Cloud? +Before it was renamed to Starter, the Serverless tier of TiDB Cloud served as the entry point for thousands of developers, providing a production-ready database that scales automatically, starts in seconds, and costs nothing until you exceed a generous free quota. -For each organization in TiDB Cloud, you can create a maximum of five [free clusters](/tidb-cloud/select-cluster-tier.md#free-cluster-plan) by default. To create more TiDB Cloud Serverless clusters, you need to add a credit card and create [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan) for the usage. +While "serverless" accurately reflects how the service works behind the scenes, many first-time users found the term abstract and overloaded with different meanings. -### Are all TiDB Cloud features fully supported on TiDB Cloud Serverless? +To make the purpose of this entry tier clearer, we’ve renamed it to Starter, the fastest way to begin building with TiDB Cloud. Everything you know about the Serverless tier remains the same: -Some of TiDB Cloud features are partially supported or not supported on TiDB Cloud Serverless. For more information, see [TiDB Cloud Serverless Limitations and Quotas](/tidb-cloud/serverless-limitations.md). +- A fully managed database with both row-based and columnar storage, ideal for hybrid OLTP and OLAP workloads. +- Automatic and request-driven scaling, no capacity planning or manual tuning required. +- Built-in vector search and full-text search to power GenAI retrieval, chatbots, and other AI applications. +- Always-free monthly quota for up to five clusters per organization (5 GiB row data + 5 GiB columnar data + 50 million [RUs](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru) per cluster). -### When will TiDB Cloud Serverless be available on cloud platforms other than AWS, such as Google Cloud or Azure? +### How do I get started with {{{ .starter }}}? -We are actively working on expanding TiDB Cloud Serverless to other cloud platforms, including Google Cloud and Azure. However, we do not have an exact timeline for now as we currently focus on filling gaps and ensuring seamless functionality across all environments. Rest assured, we are working hard to make TiDB Cloud Serverless available on more cloud platforms, and we will keep our community updated as we progress. +Get started with the 5-minute [TiDB Cloud Quick Start](/tidb-cloud/tidb-cloud-quickstart.md) -### I created a Developer Tier cluster before TiDB Cloud Serverless was available. Can I still use my cluster? +### How many {{{ .starter }}} clusters can I create in TiDB Cloud? -Yes, your Developer Tier cluster has been automatically migrated to the TiDB Cloud Serverless cluster, providing you with an improved user experience without any disruptions to your prior usage. +For each organization in TiDB Cloud, you can create a maximum of five [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) clusters by default. To create more {{{ .starter }}} clusters, you need to add a credit card and set the [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for the usage. -### What is columnar storage in TiDB Cloud Serverless? +### Are all TiDB Cloud features fully supported on {{{ .starter }}}? -Columnar storage in TiDB Cloud Serverless acts as an additional replica of row-based storage, ensuring strong consistency. Unlike traditional row-based storage, which stores data in rows, columnar storage organizes data in columns, optimizing it for data analytics tasks. +Some of TiDB Cloud features are partially supported or not supported on {{{ .starter }}}. For more information, see [{{{ .starter }}} Limitations and Quotas](/tidb-cloud/serverless-limitations.md). + +### When will {{{ .starter }}} be available on cloud platforms other than AWS, such as Google Cloud or Azure? + +We are actively working on expanding {{{ .starter }}} to other cloud platforms, including Google Cloud and Azure. However, we do not have an exact timeline for now as we currently focus on filling gaps and ensuring seamless functionality across all environments. Rest assured, we are working hard to make {{{ .starter }}} available on more cloud platforms, and we will keep our community updated as we progress. + +### I created a Developer Tier cluster before {{{ .starter }}} was available. Can I still use my cluster? + +Yes, your Developer Tier cluster has been automatically migrated to the {{{ .starter }}} cluster, providing you with an improved user experience without any disruptions to your prior usage. + +### What is columnar storage in {{{ .starter }}}? + +Columnar storage in {{{ .starter }}} acts as an additional replica of row-based storage, ensuring strong consistency. Unlike traditional row-based storage, which stores data in rows, columnar storage organizes data in columns, optimizing it for data analytics tasks. Columnar storage is a key feature that enables the Hybrid Transactional and Analytical Processing (HTAP) capabilities of TiDB by seamlessly blending transactional and analytical workloads. -To efficiently manage columnar storage data, TiDB Cloud Serverless uses a separate elastic TiFlash engine. During query execution, the optimizer guides the cluster to automatically decide whether to retrieve data from row-based or columnar storage. +To efficiently manage columnar storage data, {{{ .starter }}} uses a separate elastic TiFlash engine. During query execution, the optimizer guides the cluster to automatically decide whether to retrieve data from row-based or columnar storage. -### When should I use columnar storage in TiDB Cloud Serverless? +### When should I use columnar storage in {{{ .starter }}}? -Consider using columnar storage in TiDB Cloud Serverless in the following scenarios: +Consider using columnar storage in {{{ .starter }}} in the following scenarios: - Your workload involves analytical tasks that require efficient data scanning and aggregation. - You prioritize improved performance, especially for analytics workloads. @@ -54,9 +69,9 @@ Consider using columnar storage in TiDB Cloud Serverless in the following scenar In these scenarios, columnar storage can significantly improve query performance and provide a seamless experience for mixed workloads in your system. -### How to use columnar storage in TiDB Cloud Serverless? +### How to use columnar storage in {{{ .starter }}}? -Using columnar storage in TiDB Cloud Serverless is similar to using it in TiFlash. You can enable columnar storage at both the table and database levels: +Using columnar storage in {{{ .starter }}} is similar to using it in TiFlash. You can enable columnar storage at both the table and database levels: - Table level: Assign a TiFlash replica to a table to enable columnar storage for that specific table. - Database level: Configure TiFlash replicas for all tables in a database to use columnar storage across the entire database. @@ -65,41 +80,45 @@ Once a TiFlash replica is set up for a table, TiDB automatically replicates data For more information about how to set up TiFlash replicas, see [Create TiFlash replicas](/tiflash/create-tiflash-replicas.md). +### Why is my connection disconnected after being idle for several minutes? + +When you connect via a Public Endpoint, your connection passes through various network providers and intermediate devices. These devices might have their own short idle timeouts, which can interrupt your connection prematurely. For more information, see [Connection limitations](/tidb-cloud/serverless-limitations.md#connection). + ## Billing and metering FAQs ### What are Request Units? -TiDB Cloud Serverless adopts a pay-as-you-go model, meaning that you only pay for the storage space and cluster usage. In this model, all cluster activities such as SQL queries, bulk operations, and background jobs are quantified in [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit). RU is an abstract measurement for the size and intricacy of requests initiated on your cluster. For more information, see [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details/). +{{{ .starter }}} adopts a pay-as-you-go model, meaning that you only pay for the storage space and cluster usage. In this model, all cluster activities such as SQL queries, bulk operations, and background jobs are quantified in [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru). RU is an abstract measurement for the size and intricacy of requests initiated on your cluster. For more information, see [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). -### Is there any free plan available for TiDB Cloud Serverless? +### Is there any free plan available for {{{ .starter }}}? -For the first five TiDB Cloud Serverless clusters in your organization, TiDB Cloud provides a free usage quota for each of them as follows: +For the first five {{{ .starter }}} clusters in your organization, TiDB Cloud provides a free usage quota for each of them as follows: - Row-based storage: 5 GiB - Columnar storage: 5 GiB -- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit): 50 million RUs per month +- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru): 50 million RUs per month -If you are using a scalable cluster, usage beyond the free quota will be charged. For a free cluster, once the free quota is reached, the read and write operations on this cluster will be throttled until you upgrade to a scalable cluster or the usage is reset upon the start of a new month. +If the monthly spending limit is set for a {{{ .starter }}} cluster, usage beyond the free quota will be charged. For a free cluster, once the free quota is reached, the read and write operations on this cluster will be throttled until you set a monthly spending limit or the usage is reset upon the start of a new month. -For more information, see [TiDB Cloud Serverless usage quota](/tidb-cloud/select-cluster-tier.md#usage-quota). +For more information, see [{{{ .starter }}} usage quota](/tidb-cloud/select-cluster-tier.md#usage-quota). ### What are the limitations of the free plan? -Under the free plan, cluster performance is limited due to non-scalable resources. This results in a restriction on memory allocation per query to 256 MiB and might cause observable bottlenecks in request units (RUs) per second. To maximize cluster performance and avoid these limitations, you can upgrade to a [scalable cluster](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan). +Under the free plan, cluster performance is limited due to non-scalable resources. This results in a restriction on memory allocation per query to 256 MiB and might cause observable bottlenecks in request units (RUs) per second. To maximize cluster performance and avoid these limitations, you can [set a monthly spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for your {{{ .starter }}} cluster. ### How can I estimate the number of RUs required by my workloads and plan my monthly budget? To get the RU consumption of individual SQL statements, you can use the [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md#ru-request-unit-consumption) SQL statement. However, it is important to note that the RUs usage returned in `EXPLAIN ANALYZE` does not incorporate egress RUs, as egress usage is measured separately in the gateway, which is unknown to the TiDB server. -To get the RUs and storage used by your cluster, view the **Usage this month** pane on your cluster overview page. With your past resource usage data and real-time resource usage in this pane, you can track your cluster's resource consumption and estimate a reasonable spending limit. If the free quota cannot meet your requirement, you can upgrade to a [scalable cluster](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan) and edit the spending limit. For more information, see [TiDB Cloud Serverless usage quota](/tidb-cloud/select-cluster-tier.md#usage-quota). +To get the RUs and storage used by your cluster, view the **Usage this month** pane on your cluster overview page. With your past resource usage data and real-time resource usage in this pane, you can track your cluster's resource consumption and estimate a reasonable spending limit. If the free quota cannot meet your requirement, you can edit the spending limit for additional resources. For more information, see [{{{ .starter }}} usage quota](/tidb-cloud/select-cluster-tier.md#usage-quota). ### How can I optimize my workload to minimize the number of RUs consumed? Ensure that your queries have been carefully optimized for optimal performance by following the guidelines in [Optimizing SQL Performance](/develop/dev-guide-optimize-sql-overview.md). To identify the SQL statements that consume the most RUs, navigate to the [**Diagnosis**](/tidb-cloud/tune-performance.md#view-the-diagnosis-page) page of your cluster, and then check the **SQL Statements** tab, where you can observe SQL execution and view the top statements sorted by **Total RU** or **Mean RU**. For more information, see [Statement Analysis](/tidb-cloud/tune-performance.md#statement-analysis). In addition, minimizing the amount of egress traffic is also crucial for reducing RUs consumption. To achieve this, it is recommended to return only the necessary columns and rows in your query, which in turn helps reduce network egress traffic. This can be achieved by carefully selecting and filtering the columns and rows to be returned, thereby optimizing network utilization. -### How storage is metered for TiDB Cloud Serverless? +### How storage is metered for {{{ .starter }}}? -The storage is metered based on the amount of data stored in a TiDB Cloud Serverless cluster, measured in GiB per month. It is calculated by multiplying the total size of all the tables and indexes (excluding data compression or replicas) with the number of hours the data is stored in that month. +The storage is metered based on the amount of data stored in a {{{ .starter }}} cluster, measured in GiB per month. It is calculated by multiplying the total size of all the tables and indexes (excluding data compression or replicas) with the number of hours the data is stored in that month. ### Why does the storage usage size remain unchanged after dropping a table or database immediately? @@ -107,7 +126,7 @@ This is because TiDB retains dropped tables and databases for a certain period o ### Why are there RU consumptions when I'm not actively running any queries? -RU consumptions can occur in various scenarios. One common scenario is during background queries, such as synchronizing schema changes between TiDB instances. Another scenario is when certain web console features generate queries, like loading schemas. These processes use RUs even without explicit user triggers. +RU consumptions can occur in various scenarios. One common scenario is during background queries, including but not limited to synchronizing schema changes between TiDB instances, executing DDL jobs, refreshing privileges, refreshing SQL bindings, and refreshing global variables. Another scenario is when certain web console features generate queries, like loading schemas. These processes use RUs even without explicit user triggers. ### Why is there a spike in RU usage when my workload is steady? @@ -115,21 +134,21 @@ A spike in RU usage can occur due to necessary background jobs in TiDB. These jo ### What happens when my cluster exhausts its free quota or exceeds its spending limit? -Once a cluster reaches its free quota or spending limit, the cluster immediately denies any new connection attempts until the quota is increased or the usage is reset at the start of a new month. Existing connections established before reaching the quota will remain active but will experience throttling. For more information, see [TiDB Cloud Serverless Limitations and Quotas](/tidb-cloud/serverless-limitations.md#usage-quota). +Once a cluster reaches its free quota or spending limit, the cluster immediately denies any new connection attempts until the quota is increased or the usage is reset at the start of a new month. Existing connections established before reaching the quota will remain active but will experience throttling. For more information, see [{{{ .starter }}} Limitations and Quotas](/tidb-cloud/serverless-limitations.md#usage-quota). ### Why do I observe spikes in RU usage while importing data? -During the data import process of a TiDB Cloud Serverless cluster, RU consumption occurs only when the data is successfully imported, which leads to spikes in RU usage. +During the data import process of a {{{ .starter }}} cluster, RU consumption occurs only when the data is successfully imported, which leads to spikes in RU usage. -### What costs are involved when using columnar storage in TiDB Cloud Serverless? +### What costs are involved when using columnar storage in {{{ .starter }}}? -The pricing for columnar storage in TiDB Cloud Serverless is similar to that for row-based storage. When you use columnar storage, an additional replica is created to store your data (without indexes). The replication of data from row-based to columnar storage does not incur extra charges. +The pricing for columnar storage in {{{ .starter }}} is similar to that for row-based storage. When you use columnar storage, an additional replica is created to store your data (without indexes). The replication of data from row-based to columnar storage does not incur extra charges. -For detailed pricing information, see [TiDB Cloud Serverless pricing details](https://www.pingcap.com/tidb-serverless-pricing-details/). +For detailed pricing information, see [{{{ .starter }}} pricing details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). ### Is using columnar storage more expensive? -Columnar storage in TiDB Cloud Serverless incurs additional costs due to the extra replica, which requires more storage and resources for data replication. However, columnar storage becomes more cost-effective when running analytical queries. +Columnar storage in {{{ .starter }}} incurs additional costs due to the extra replica, which requires more storage and resources for data replication. However, columnar storage becomes more cost-effective when running analytical queries. According to the TPC-H benchmark test, the cost of running analytic queries on columnar storage is about one-third of the cost when using row-based storage. @@ -137,17 +156,17 @@ Therefore, while there might be an initial cost due to the extra replica, the re ## Security FAQs -### Is my TiDB Cloud Serverless shared or dedicated? +### Is my {{{ .starter }}} shared or dedicated? The serverless technology is designed for multi-tenancy and the resources used by all clusters are shared. To get managed TiDB service with isolated infrastructure and resources, you can upgrade it to [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated). -### How does TiDB Cloud Serverless ensure security? +### How does {{{ .starter }}} ensure security? -- Your connections are encrypted by Transport Layer Security (TLS). For more information about using TLS to connect to TiDB Cloud Serverless, see [TLS Connection to TiDB Cloud Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md). -- All persisted data on TiDB Cloud Serverless is encrypted-at-rest using the tool of the cloud provider that your cluster is running in. +- Your connections are encrypted by Transport Layer Security (TLS). For more information about using TLS to connect to {{{ .starter }}}, see [TLS Connection to {{{ .starter }}}](/tidb-cloud/secure-connections-to-serverless-clusters.md). +- All persisted data on {{{ .starter }}} is encrypted-at-rest using the tool of the cloud provider that your cluster is running in. ## Maintenance FAQ ### Can I upgrade the version of TiDB that my cluster is running on? -No. TiDB Cloud Serverless clusters are upgraded automatically as we roll out new TiDB versions on TiDB Cloud. You can see what version of TiDB your cluster is running in the [TiDB Cloud console](https://tidbcloud.com/project/clusters) or in the latest [release note](https://docs.pingcap.com/tidbcloud/tidb-cloud-release-notes). Alternatively, you can also connect to your cluster and use `SELECT version()` or `SELECT tidb_version()` to check the TiDB version. +No. {{{ .starter }}} clusters are upgraded automatically as we roll out new TiDB versions on TiDB Cloud. You can see what version of TiDB your cluster is running in the [TiDB Cloud console](https://tidbcloud.com/project/clusters) or in the latest [release note](https://docs.pingcap.com/tidbcloud/tidb-cloud-release-notes). Alternatively, you can also connect to your cluster and use `SELECT version()` or `SELECT tidb_version()` to check the TiDB version. diff --git a/tidb-cloud/serverless-high-availability.md b/tidb-cloud/serverless-high-availability.md index e288e9329f771..be95f68373a4a 100644 --- a/tidb-cloud/serverless-high-availability.md +++ b/tidb-cloud/serverless-high-availability.md @@ -1,31 +1,69 @@ --- -title: High Availability in TiDB Cloud Serverless -summary: Learn about the high availability architecture of TiDB Cloud Serverless. Discover Zonal and Regional High Availability options, automated backups, failover processes, and how TiDB ensures data durability and business continuity. +title: High Availability in TiDB Cloud +summary: Learn about the high availability architecture of TiDB Cloud. Discover Zonal and Regional High Availability options, automated backups, failover processes, and how TiDB ensures data durability and business continuity. --- -# High Availability in TiDB Cloud Serverless +# High Availability in TiDB Cloud -TiDB Cloud Serverless is designed with robust mechanisms to maintain high availability and data durability by default, preventing single points of failure and ensuring continuous service even in the face of disruptions. As a fully managed service based on the battle-tested TiDB Open Source product, it inherits TiDB's core high availability (HA) features and augments them with additional cloud-native capabilities. +TiDB Cloud is designed with robust mechanisms to maintain high availability and data durability by default, preventing single points of failure and ensuring continuous service even in the face of disruptions. As a fully managed service based on the battle-tested TiDB Open Source product, it inherits TiDB's core high availability (HA) features and augments them with additional cloud-native capabilities. + +> **Note:** +> +> - This document is only applicable for {{{ .starter }}} and {{{ .essential }}}{{{ .starter }}}, {{{ .essential }}}, and {{{ .premium }}}. +> - For high availability in TiDB Cloud Dedicated, see [High Availability in TiDB Cloud Dedicated](/tidb-cloud/high-availability-with-multi-az.md). ## Overview TiDB ensures high availability and data durability using the Raft consensus algorithm. This algorithm consistently replicates data changes across multiple nodes, allowing TiDB to handle read and write requests even in the event of node failures or network partitions. This approach provides both high data durability and fault tolerance. -TiDB Cloud Serverless extends these capabilities with two types of high availability to meet different operational requirements: +TiDB Cloud extends these capabilities with zonal high availability and regional high availability to meet different operational requirements. -- **Zonal high availability (default)**: This option places all nodes within a single availability zone, reducing network latency. It ensures high availability without requiring application-level redundancy across zones, making it suitable for applications that prioritize low latency within a single zone. Zonal high availability is available in all regions that support TiDB Cloud Serverless. For more information, see [Zonal high availability architecture](#zonal-high-availability-architecture). + -- **Regional high availability (beta)**: This option distributes nodes across multiple availability zones, offering maximum infrastructure isolation and redundancy. It provides the highest level of availability but requires application-level redundancy across zones. It is recommended to choose this option if you need maximum availability protection against infrastructure failures within a zone. Note that it increases latency and might incur cross-zone data transfer fees. This feature is available in selected regions with multi-availability zone support and can only be enabled during cluster creation. For more information, see [Regional high availability architecture](#regional-high-availability-architecture). +> **Note:** +> +> - For {{{ .starter }}} clusters, only zonal high availability is enabled, and it is not configurable. +> - For {{{ .essential }}} clusters hosted in the AWS Tokyo (ap-northeast-1) region or any Alibaba Cloud region, regional high availability is enabled by default. You can change it to zonal high availability as needed during cluster creation. For {{{ .essential }}} clusters hosted in other regions, only zonal high availability is enabled, and it is not configurable. -## Zonal high availability architecture + + + > **Note:** > -> Zonal high availability is the default option and is available in all AWS regions that support TiDB Cloud Serverless. +> - For {{{ .starter }}} clusters, only zonal high availability is enabled, and it is not configurable. +> - For {{{ .premium }}} clusters, only regional high availability is enabled, and it is not configurable. +> - For {{{ .essential }}} clusters hosted in the AWS Tokyo (ap-northeast-1) region or any Alibaba Cloud region, regional high availability is enabled by default. You can change it to zonal high availability as needed during cluster creation. For {{{ .essential }}} clusters hosted in other regions, only zonal high availability is enabled, and it is not configurable. + + + +- **Zonal high availability**: This option places all nodes within a single availability zone, reducing network latency. It ensures high availability without requiring application-level redundancy across zones, making it suitable for applications that prioritize low latency within a single zone. For more information, see [Zonal high availability architecture](#zonal-high-availability-architecture). + +- **Regional high availability (beta)**: This option distributes nodes across multiple availability zones, offering maximum infrastructure isolation and redundancy. It provides the highest level of availability but requires application-level redundancy across zones. It is recommended to choose this option if you need maximum availability protection against infrastructure failures within a zone. Note that it increases latency and might incur cross-zone data transfer fees. This feature is available in regions with more than three availability zones and can only be enabled during cluster creation. For more information, see [Regional high availability architecture](#regional-high-availability-architecture). + +## Zonal high availability architecture When you create a cluster with the default zonal high availability, all components, including Gateway, TiDB, TiKV, and TiFlash compute/write nodes, run in the same availability zone. The placement of these components in the data plane offer infrastructure redundancy with virtual machine pools, which minimizes failover time and network latency due to colocation. -![TiDB Cloud Serverless zonal high availability](/media/tidb-cloud/serverless-zonal-high-avaliability-aws.png) + + +- The following diagram shows the architecture of zonal high availability on AWS: + + ![zonal high availability on AWS](/media/tidb-cloud/zonal-high-avaliability-aws.png) + +- The following diagram shows the architecture of zonal high availability on Alibaba Cloud: + + ![zonal high availability on Alibaba Cloud](/media/tidb-cloud/zonal-high-avaliability-alibaba-cloud.png) + + + + + +The following diagram shows the architecture of zonal high availability on AWS: + +![zonal high availability on AWS](/media/tidb-cloud/zonal-high-avaliability-aws.png) + + In zonal high availability architecture: @@ -35,14 +73,28 @@ In zonal high availability architecture: ### Failover process -TiDB Cloud Serverless ensures a transparent failover process for your applications. During a failover: +TiDB Cloud ensures a transparent failover process for your applications. During a failover: + + - A new replica is created to replace the failed one. -- Servers providing storage services recover local caches from persisted data on Amazon S3, restoring the system to a consistent state with the replicas. +- Servers providing storage services recover local caches from persisted data on Amazon S3 or Alibaba Cloud OSS (depending on your cloud provider), restoring the system to a consistent state with the replicas. + +In the storage layer, persisted data is regularly pushed to Amazon S3 or Alibaba Cloud OSS (depending on your cloud provider) for high durability. Moreover, immediate updates are not only replicated across multiple TiKV servers but also stored on the EBS of each server, which further replicates the data for additional durability. TiDB automatically resolves issues by backing off and retrying in milliseconds, ensuring the failover process remains seamless for client applications. + + + + + +- A new replica is created to replace the failed one. + +- Servers providing storage services recover local caches from persisted data on Amazon S3 (depending on your cloud provider), restoring the system to a consistent state with the replicas. In the storage layer, persisted data is regularly pushed to Amazon S3 for high durability. Moreover, immediate updates are not only replicated across multiple TiKV servers but also stored on the EBS of each server, which further replicates the data for additional durability. TiDB automatically resolves issues by backing off and retrying in milliseconds, ensuring the failover process remains seamless for client applications. + + The gateway and computing layers are stateless, so failover involves restarting them elsewhere immediately. Applications should implement retry logic for their connections. While the zonal setup provides high availability, it cannot handle an entire zone failure. If the zone becomes unavailable, downtime will occur until the zone and its dependent services are restored. ## Regional high availability architecture @@ -51,10 +103,27 @@ When you create a cluster with regional high availability, critical OLTP (Online > **Note:** > -> - Regional high availability is currently in beta and only available in the AWS Tokyo (`ap-northeast-1`) region. -> - You can enable regional high availability only during cluster creation. +> Regional high availability is currently in beta. + + + +- The following diagram shows the architecture of regional high availability on AWS: + + ![regional high availability on AWS](/media/tidb-cloud/regional-high-avaliability-aws.png) + +- The following diagram shows the architecture of regional high availability on Alibaba Cloud: + + ![regional high availability on Alibaba Cloud](/media/tidb-cloud/regional-high-avaliability-alibaba-cloud.png) -![TiDB Cloud Serverless regional high availability](/media/tidb-cloud/serverless-regional-high-avaliability-aws.png) + + + + +The following diagram shows the architecture of regional high availability on AWS: + +![regional high availability](/media/tidb-cloud/regional-high-avaliability-aws.png) + + In regional high availability architecture: @@ -66,7 +135,7 @@ In regional high availability architecture: In the rare event of a primary zone failure scenario, which could be caused by a natural disaster, configuration change, software issue, or hardware failure, critical OLTP workload components, including Gateway and TiDB, are automatically launched in the standby availability zone. Traffic is automatically redirected to the standby zone to ensure swift recovery and maintain business continuity. -TiDB Cloud Serverless minimizes service disruption and ensures business continuity during a primary zone failure by performing the following actions: +TiDB Cloud minimizes service disruption and ensures business continuity during a primary zone failure by performing the following actions: - Automatically create new replicas of Gateway and TiDB in the standby availability zone. - Use the elastic load balancer to detect active gateway replicas in the standby availability zone and redirect OLTP traffic from the failed primary zone. @@ -79,23 +148,35 @@ Applications are unaffected by failures in non-primary zones and remain unaware Database backups are essential for business continuity and disaster recovery, helping to protect your data from corruption or accidental deletion. With backups, you can restore your database to a specific point in time within the retention period, minimizing data loss and downtime. -TiDB Cloud Serverless provides robust automated backup mechanisms to ensure continuous data protection: +TiDB Cloud provides robust automated backup mechanisms to ensure continuous data protection: - **Daily full backups**: A full backup of your database is created once a day, capturing the entire database state. - **Continuous transaction log backups**: Transaction logs are backed up continuously, approximately every 5 minutes, though the exact frequency depends on database activity. These automated backups enable you to restore your database either from a full backup or from a specific point in time by combining full backups with continuous transaction logs. This flexibility ensures that you can recover your database to a precise point just before an incident occurs. + + +> **Note:** +> +> Automatic backups, including snapshot-based and continuous backups for Point-in-Time Recovery (PITR), are performed on Amazon S3 or Alibaba Cloud OSS (depending on your cloud provider), which provides regional-level high durability. + + + + + > **Note:** > > Automatic backups, including snapshot-based and continuous backups for Point-in-Time Recovery (PITR), are performed on Amazon S3, which provides regional-level high durability. + + ## Impact on sessions during failures During a failure, ongoing transactions on the failed server might be interrupted. Although failover is transparent to applications, you must implement logic to handle recoverable failures during active transactions. Different failure scenarios are handled as follows: -- **TiDB failures**: If a TiDB instance fails, client connections are unaffected because TiDB Cloud Serverless automatically reroutes traffic through the gateway. While transactions on the failed TiDB instance might be interrupted, the system ensures that committed data is preserved, and new transactions are handled by another available TiDB instance. -- **Gateway failures**: If the Gateway fails, client connections are disrupted. However, TiDB Cloud Serverless gateways are stateless and can restart immediately in a new zone or server. Traffic is automatically redirected to the new gateway, minimizing downtime. +- **TiDB failures**: If a TiDB instance fails, client connections are unaffected because TiDB Cloud automatically reroutes traffic through the gateway. While transactions on the failed TiDB instance might be interrupted, the system ensures that committed data is preserved, and new transactions are handled by another available TiDB instance. +- **Gateway failures**: If the Gateway fails, client connections are disrupted. However, TiDB Cloud gateways are stateless and can restart immediately in a new zone or server. Traffic is automatically redirected to the new gateway, minimizing downtime. It is recommended to implement retry logic in your application to handle recoverable failures. For implementation details, refer to your driver or ORM documentation (for example, [JDBC](https://dev.mysql.com/doc/connector-j/en/connector-j-config-failover.html)). diff --git a/tidb-cloud/serverless-limitations.md b/tidb-cloud/serverless-limitations.md index 799eae047278f..81ce6a1b483bf 100644 --- a/tidb-cloud/serverless-limitations.md +++ b/tidb-cloud/serverless-limitations.md @@ -1,32 +1,37 @@ --- -title: TiDB Cloud Serverless Limitations and Quotas -summary: Learn about the limitations of TiDB Cloud Serverless. +title: Limitations and Quotas of {{{ .starter }}} and Essential +summary: Learn about the limitations of {{{ .starter }}}. aliases: ['/tidbcloud/serverless-tier-limitations'] --- -# TiDB Cloud Serverless Limitations and Quotas +# Limitations and Quotas of {{{ .starter }}} and Essential -TiDB Cloud Serverless works with almost all workloads that TiDB supports, but there are some feature differences between TiDB Self-Managed or TiDB Cloud Dedicated clusters and TiDB Cloud Serverless clusters. This document describes the limitations of TiDB Cloud Serverless. +{{{ .starter }}} and Essential work with almost all workloads that TiDB supports, but there are some feature differences compared with TiDB Self-Managed or TiDB Cloud Dedicated clusters. This document describes the limitations of {{{ .starter }}} and {{{ .essential }}}. -We are constantly filling in the feature gaps between TiDB Cloud Serverless and TiDB Cloud Dedicated. If you require these features or capabilities in the gap, use [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) or [contact us](https://www.pingcap.com/contact-us/?from=en) for a feature request. +We are constantly filling in the feature gaps between {{{ .starter }}}/Essential and TiDB Cloud Dedicated. If you require these features or capabilities in the gap, use [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) or [contact us](https://www.pingcap.com/contact-us/?from=en) for a feature request. ## Limitations ### Audit logs -- [Database audit logging](/tidb-cloud/tidb-cloud-auditing.md) is currently unavailable. +- [Database audit logging](/tidb-cloud/essential-database-audit-logging.md) is currently unavailable for {{{ .starter }}} clusters. ### Connection -- Only [Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) and [Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) can be used. You cannot use [VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) to connect to TiDB Cloud Serverless clusters.  -- No [IP Access list](/tidb-cloud/configure-ip-access-list.md) support. +- Only [Public Endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md) and [Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) can be used. You cannot use [VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) to connect to {{{ .starter }}} or {{{ .essential }}} clusters. +- No [Firewall Rules](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md) support for Private Endpoint. +- Your database client connections might be terminated unexpectedly if they remain open for more than 30 minutes. This can occur when a TiDB server shuts down, restarts, or undergoes maintenance, potentially causing application disruptions. To avoid this issue, configure a maximum connection lifetime. It is recommended to start with 5 minutes and increase it gradually if it affects tail latency. For more information, see [Recommended settings for connection pools](/develop/dev-guide-connection-parameters.md). + +> **Note:** +> +> Due to a [limitation of AWS Global Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/introduction-how-it-works.html#about-idle-timeout), the idle timeout for a Public Endpoint connection on AWS is 340 seconds. For the same reason, you cannot use TCP keep-alive packets to keep the connection open. ### Encryption -- Data persisted in your TiDB Cloud Serverless cluster is encrypted using the encryption tool provided by the cloud provider that manages your cluster. For [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan), an optional second layer of encryption is available during the cluster creation process, providing an additional level of security beyond the default encryption at rest. -- Using [customer-managed encryption keys (CMEK)](/tidb-cloud/tidb-cloud-encrypt-cmek.md) is currently unavailable. +- Data persisted in your {{{ .starter }}} or {{{ .essential }}} cluster is encrypted using the encryption tool provided by the cloud provider that manages your cluster. For {{{ .starter }}} (with spending limit > 0) and {{{ .essential }}} clusters, an optional second layer of encryption is available during the cluster creation process, providing an additional level of security beyond the default encryption at rest. +- Using [customer-managed encryption keys (CMEK)](/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md) is currently unavailable. ### Maintenance window @@ -34,22 +39,22 @@ We are constantly filling in the feature gaps between TiDB Cloud Serverless and ### Monitoring and diagnosis -- [Third-party Monitoring integrations](/tidb-cloud/third-party-monitoring-integrations.md) are currently unavailable. +- [Third-party Monitoring integrations](/tidb-cloud/third-party-monitoring-integrations.md) are currently unavailable. - [Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md) is currently unavailable. - [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) is currently unavailable. ### Self-service upgrades -- TiDB Cloud Serverless is a fully managed deployment of TiDB. Major and minor version upgrades of TiDB Cloud Serverless are handled by TiDB Cloud and therefore cannot be initiated by users. +- {{{ .starter }}} and {{{ .essential }}} are fully managed deployments of TiDB. Major and minor version upgrades of {{{ .starter }}} and {{{ .essential }}} are handled by TiDB Cloud and therefore cannot be initiated by users. ### Stream data -- [Changefeed](/tidb-cloud/changefeed-overview.md) is not supported for TiDB Cloud Serverless currently. -- [Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) is not supported for TiDB Cloud Serverless currently. +- [Changefeed](/tidb-cloud/changefeed-overview.md) is not supported for {{{ .starter }}} currently. +- [Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) is not supported for {{{ .starter }}} currently. ### Time to live (TTL) -- In TiDB Cloud Serverless, the [`TTL_JOB_INTERVAL`](/time-to-live.md#ttl-job) attribute for a table is fixed at `15m` and cannot be modified. This means that TiDB Cloud Serverless schedules a background job every 15 minutes to clean up expired data. +- In {{{ .starter }}} and {{{ .essential }}}, the [`TTL_JOB_INTERVAL`](/time-to-live.md#ttl-job) attribute for a table is fixed at `15m` and cannot be modified. This means that {{{ .starter }}} and {{{ .essential }}} schedule a background job every 15 minutes to clean up expired data. ### Others @@ -58,20 +63,20 @@ We are constantly filling in the feature gaps between TiDB Cloud Serverless and ## Usage quota -For each organization in TiDB Cloud, you can create a maximum of five [free clusters](/tidb-cloud/select-cluster-tier.md#free-cluster-plan) by default. To create more TiDB Cloud Serverless clusters, you need to add a credit card and create [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan) for the usage. +For each organization in TiDB Cloud, you can create a maximum of five [free {{{ .starter }}} clusters](/tidb-cloud/select-cluster-tier.md#starter) by default. To create more {{{ .starter }}} clusters, you need to add a credit card and [set a monthly spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for the usage. -For the first five TiDB Cloud Serverless clusters in your organization, whether they are free or scalable, TiDB Cloud provides a free usage quota for each of them as follows: +For the first five {{{ .starter }}} clusters in your organization, TiDB Cloud provides a free usage quota for each of them as follows: - Row-based storage: 5 GiB - Columnar storage: 5 GiB -- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit): 50 million RUs per month +- [Request Units (RUs)](/tidb-cloud/tidb-cloud-glossary.md#request-unit-ru): 50 million RUs per month -The Request Unit (RU) is a unit of measurement used to track the resource consumption of a query or transaction. It is a metric that allows you to estimate the computational resources required to process a specific request in the database. The request unit is also the billing unit for TiDB Cloud Serverless service. +The Request Unit (RU) is a unit of measurement used to track the resource consumption of a query or transaction. It is a metric that allows you to estimate the computational resources required to process a specific request in the database. The request unit is also the billing unit for {{{ .starter }}} service. Once a cluster reaches its usage quota, it immediately denies any new connection attempts until you [increase the quota](/tidb-cloud/manage-serverless-spend-limit.md#update-spending-limit) or the usage is reset upon the start of a new month. Existing connections established before reaching the quota will remain active but will experience throttling. -To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details). +To learn more about the RU consumption of different resources (including read, write, SQL CPU, and network egress), the pricing details, and the throttled information, see [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). -If you want to create a TiDB Cloud Serverless cluster with an additional quota, you can select the scalable cluster plan and edit the spending limit on the cluster creation page. For more information, see [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +If you want to create a {{{ .starter }}} cluster with an additional quota, you can set the monthly spending limit on the cluster creation page. For more information, see [Create a {{{ .starter }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md). -After creating a TiDB Cloud Serverless cluster, you can still check and edit the spending limit on your cluster overview page. For more information, see [Manage Spending Limit for TiDB Cloud Serverless Clusters](/tidb-cloud/manage-serverless-spend-limit.md). +After creating a {{{ .starter }}} cluster, you can still check and edit the spending limit on your cluster overview page. For more information, see [Manage Spending Limit for {{{ .starter }}} Clusters](/tidb-cloud/manage-serverless-spend-limit.md). diff --git a/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md b/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md new file mode 100644 index 0000000000000..fbb6a9c393493 --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md @@ -0,0 +1,93 @@ +--- +title: Connect to Alibaba Cloud ApsaraDB RDS for MySQL via a Private Link Connection +summary: Learn how to connect to an Alibaba Cloud ApsaraDB RDS for MySQL instance using an Alibaba Cloud Endpoint Service private link connection. +--- + +# Connect to Alibaba Cloud ApsaraDB RDS for MySQL via a Private Link Connection + +This document describes how to connect a {{{ .essential }}} cluster to an [Alibaba Cloud ApsaraDB RDS for MySQL](https://www.alibabacloud.com/en/product/apsaradb-for-rds-mysql) instance using an [Alibaba Cloud Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md). + +## Prerequisites + +- You have an existing ApsaraDB RDS for MySQL instance or the permissions required to create one. + +- Verify that your account has the following permissions to manage networking components: + + - Manage load balancer + - Manage endpoint services + +- Your {{{ .essential }}} cluster is on Alibaba Cloud, and it is active. Retrieve and save the following details for later use: + + - Alibaba Cloud account ID + - Availability Zones (AZ) + +To view the Alibaba Cloud account ID and availability zones, do the following: + +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the cluster overview page of the TiDB cluster, and then click **Settings** > **Networking** in the left navigation pane. +2. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. +3. In the displayed dialog, you can find the Alibaba Cloud account ID and availability zones. + +## Step 1. Set up an ApsaraDB RDS for MySQL instance + +Identify an Alibaba Cloud ApsaraDB RDS for MySQL that you want to use, or [create a new RDS](https://www.alibabacloud.com/help/en/rds/apsaradb-rds-for-mysql/step-1-create-an-apsaradb-rds-for-mysql-instance-and-configure-databases). + +Your ApsaraDB RDS for MySQL instance must meet the following requirements: + +- Region match: the instance must reside in the same Alibaba Cloud region as your {{{ .essential }}} cluster. +- AZ (Availability Zone) availability: the availability zones must overlap with those of your {{{ .essential }}} cluster. +- Network accessibility: the instance must be configured with a proper IP allowlist and be accessible within the VPC. + +> **Note** +> +> Cross-region connections for ApsaraDB RDS for MySQL are not supported. + +## Step 2. Expose the ApsaraDB RDS for MySQL instance as an endpoint service + +You need to set up the load balancer and the endpoint service in the Alibaba Cloud console. + +### Step 2.1. Set up the load balancer + +Set up the load balancer in the same region as your ApsaraDB RDS for MySQL as follows: + +1. Go to [Server Groups](https://slb.console.alibabacloud.com/nlb/ap-southeast-1/server-groups) to create a server group. Provide the following information: + + - **Server Group Type**: select `IP` + - **VPC**: enter the VPC where your ApsaraDB RDS for MySQL is located + - **Backend Server Protocol**: select `TCP` + +2. Click the created server group to add backend servers, and then add the IP address of your ApsaraDB RDS for MySQL instance. + + You can ping the RDS endpoint to get the IP address. + +3. Go to [NLB](https://slb.console.alibabacloud.com/nlb) to create a network load balancer. Provide the following information: + + - **Network Type**: select `Internal-facing` + - **VPC**: select the VPC where your ApsaraDB RDS for MySQL is located + - **Zone**: it must overlap with your {{{ .essential }}} cluster + - **IP Version**: select `IPv4` + +4. Find the load balancer you created, and then click **Create Listener**. Provide the following information: + + - **Listener Protocol**: select `TCP` + - **Listener Port**: enter the database port, for example, `3306` for MySQL + - **Server Group**: choose the server group you created in the previous step + +### Step 2.2. Set up an endpoint service + +To set up the endpoint service in the same region as your ApsaraDB RDS for MySQL, take the following steps: + +1. Go to [Endpoint Service](https://vpc.console.alibabacloud.com/endpointservice) to create an endpoint service. Provide the following information: + + - **Service Resource Type**: select `NLB` + - **Select Service Resource**: select all zones that the NLB is in, and choose the NLB that you created in the previous step + - **Automatically Accept Endpoint Connections**: it is recommended to choose `No` + +2. Go to the details page of the endpoint service, and copy the **Endpoint Service Name**, for example, `com.aliyuncs.privatelink..xxxxx`. You need to use it for TiDB Cloud later. + +3. On the details page of the endpoint service, click the **Service Whitelist** tab, click **Add to Whitelist**, and then enter the Alibaba Cloud account ID that you obtained in [Prerequisites](#prerequisites) + +## Step 3. Create a private link connection in TiDB Cloud + +You can create a private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + +For more information, see [Create an Alibaba Cloud Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md#create-an-alibaba-cloud-endpoint-service-private-link-connection). diff --git a/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md b/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md new file mode 100644 index 0000000000000..77d2da048d9de --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md @@ -0,0 +1,74 @@ +--- +title: Connect to Confluent Cloud on AWS via a Private Link Connection +summary: Learn how to connect to a Confluent Cloud Dedicated cluster on AWS using an AWS Endpoint Service private link connection. +--- + +# Connect to Confluent Cloud on AWS via a Private Link Connection + +This document describes how to connect a {{{ .essential }}} cluster to a [Confluent Cloud Dedicated cluster](https://docs.confluent.io/cloud/current/clusters/cluster-types.html) on AWS using an [AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md). + +> **Note** +> +> Among all Confluent Cloud cluster types on AWS, only Confluent Cloud Dedicated clusters support private link connections. + +## Prerequisites + +- You have a [Confluent Cloud](https://confluent.cloud/) account. + +- Your {{{ .essential }}} is hosted on AWS, and it is active. Retrieve and save the following details for later use: + + - AWS Account ID + - Availability Zones (AZ) + +To view the AWS account ID and availability zones, do the following: + +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the cluster overview page of the TiDB cluster, and then click **Settings** > **Networking** in the left navigation pane. +2. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. +3. In the displayed dialog, you can find the AWS account ID and availability zones. + +## Step 1. Set up a Confluent Cloud network + +Identify a Confluent Cloud network that you want to use, or [create a new Confluent Cloud network on AWS](https://docs.confluent.io/cloud/current/networking/ccloud-network/aws.html#create-ccloud-network-aws). + +The Confluent Cloud network must meet the following requirements: + +- Type: the network must be a **PrivateLink** network. +- Region match: the network must reside in the same AWS region as your {{{ .essential }}} cluster. +- AZ (Availability Zone) availability: the availability zones of the network must overlap with those of your {{{ .essential }}} cluster. + +To get the unique name of the Confluent Cloud network, take the following steps: + +1. In the [Confluent Cloud Console](https://confluent.cloud/), navigate to the [**Environments**](https://confluent.cloud/environments) page, and then click the environment where your Confluent Cloud network is located. +2. Click **Network management** and choose **For dedicated clusters** to find the network you created. +3. Go to the **Network overview** page to obtain the DNS subdomain of the Confluent Cloud network. +4. Extract the unique name of your Confluent Cloud network from the DNS subdomain. For example, if the DNS subdomain is `use1-az1.domnprzqrog.us-east-1.aws.confluent.cloud`, then the unique name is `domnprzqrog.us-east-1`. +5. Save the unique name for later use. + +## Step 2. Add a PrivateLink Access to the network + +Add a PrivateLink Access to the network you identified or set up in [Step 1](#step-1-set-up-a-confluent-cloud-network). For more information, see [Add a PrivateLink Access in Confluent Cloud](https://docs.confluent.io/cloud/current/networking/private-links/aws-privatelink.html#add-a-privatelink-access-in-ccloud). + +During the process, you need to: + +- Provide the TiDB Cloud AWS account ID that you obtain in [Prerequisites](#prerequisites). +- Save the `VPC Service Endpoint` provided by Confluent Cloud for later use, usually in the `com.amazonaws.vpce..vpce-svc-xxxxxxxxxxxxxxxxx` format. + +## Step 3. Create a Confluent Cloud Dedicated cluster under the network + +Create a Confluent Cloud Dedicated cluster under the existing network you set up in [Step 1](#step-1-set-up-a-confluent-cloud-network). For more information, see [Create a dedicated cluster in Confluent Cloud](https://docs.confluent.io/cloud/current/clusters/create-cluster.html#create-ak-clusters). + +## Step 4. Create a private link connection in TiDB Cloud + +To create a private link connection in TiDB Cloud, do the following: + +1. Create a private link connection in TiDB Cloud using the `VPC Service Endpoint` from Confluent Cloud. + + For more information, see [Create an AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md#create-an-aws-endpoint-service-private-link-connection). + + > **Note:** + > + > For Confluent Cloud Dedicated clusters on AWS, you do not need to go to the detail page of your endpoint service on the AWS console to manually accept the endpoint connection request from TiDB Cloud. Confluent Cloud processes it automatically. + +2. Attach the Confluent Cloud service domains to the private link connection so that dataflow services in TiDB Cloud can access the Confluent cluster. + + For more information, see [Attach domains to a private link connection](/tidb-cloud/serverless-private-link-connection.md#attach-domains-to-a-private-link-connection). diff --git a/tidb-cloud/serverless-private-link-connection-to-aws-rds.md b/tidb-cloud/serverless-private-link-connection-to-aws-rds.md new file mode 100644 index 0000000000000..efb618239a566 --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection-to-aws-rds.md @@ -0,0 +1,112 @@ +--- +title: Connect to Amazon RDS via a Private Link Connection +summary: Learn how to connect to an Amazon RDS instance using an AWS Endpoint Service private link connection. +--- + +# Connect to Amazon RDS via a Private Link Connection + +This document describes how to connect a {{{ .essential }}} cluster to an [Amazon RDS](https://aws.amazon.com/rds/) instance using an [AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md). + +## Prerequisites + +- You have an existing Amazon RDS instance or the permissions required to create one. + +- Your account has the following permissions to manage networking components: + + - Manage security groups + - Manage load balancer + - Manage endpoint services + +- Your {{{ .essential }}} is hosted on AWS, and it is active. Retrieve and save the following details for later use: + + - AWS Account ID + - Availability Zones (AZ) + +To view the AWS account ID and availability zones, do the following: + +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the cluster overview page of the TiDB cluster, and then click **Settings** > **Networking** in the left navigation pane. +2. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. +3. In the displayed dialog, you can find the AWS account ID and availability zones. + +## Step 1. Set up the Amazon RDS instance + +Identify an Amazon RDS instance to use, or [create a new one](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreateDBInstance.html). + +The Amazon RDS instance must meet the following requirements: + +- Region match: the instance must reside in the same AWS region as your {{{ .essential }}} cluster. +- The [subnet group](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html#USER_VPC.Subnets) of your Amazon RDS instance must have availability zones that overlap with those of your {{{ .essential }}} cluster. +- Set your Amazon RDS instance with a proper security group, and ensure that it is accessible within the VPC. For example, you can create a security group with the following rules: + + - An inbound rule that allows MySQL/Aurora: + - Type: `MySQL/Aurora` + - Source: `Anywhere-IPv4` + + - An outbound rule that allows MySQL/Aurora: + - Type: `MySQL/Aurora` + - Destination: `Anywhere-IPv4` + +> **Note** +> +> To connect to a cross-region RDS instance, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +## Step 2. Expose the Amazon RDS instance as an endpoint service + +You need to set up the load balancer and the AWS Endpoint Service in the AWS console. + +### Step 2.1. Set up the load balancer + +To set up the load balancer in the same region as your RDS, take the following steps: + +1. Go to [Target groups](https://console.aws.amazon.com/ec2/home#CreateTargetGroup) to create a target group. Provide the following information: + + - **Target type**: select `IP addresses`. + - **Protocol and Port**: set the protocol to `TCP` and the port to your database port, for example `3306` for MySQL. + - **IP address type**: select `IPv4`. + - **VPC**: select the VPC where your RDS is located. + - **Register targets**: register the IP addresses of your Amazon RDS instance. You can ping the RDS endpoint to get the IP address. + + For more information, see [Create a target group for your Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-target-group.html). + +2. Go to [Load balancers](https://console.aws.amazon.com/ec2/home#LoadBalancers) to create a network load balancer. Provide the following information: + + - **Schema**: select `Internal` + - **Load balancer IP address type**: select `IPv4` + - **VPC**: select the VPC where your RDS is located + - **Availability Zones**: select the availability zones that overlap with your {{{ .essential }}} cluster + - **Security groups**: create a new security group with the following rules: + - An inbound rule that allows MySQL/Aurora: + - Type: `MySQL/Aurora` + - Source: `Anywhere-IPv4` + + - An outbound rule that allows MySQL/Aurora: + - Type: `MySQL/Aurora` + - Destination: `Anywhere-IPv4` + + - **Listeners and routing**: + - **Protocol and Port**: set the protocol to `TCP` and the port to your database port, for example `3306` for MySQL + - **Target group**: select the target group that you created in the previous step + + For more information, see [Create a Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-network-load-balancer.html). + +### Step 2.2. Set up the AWS Endpoint Service + +To set up the endpoint service in the same region as your RDS, take the following steps: + +1. Go to [Endpoint services](https://console.aws.amazon.com/vpcconsole/home#EndpointServices) to create an endpoint service. Provide the following information: + + - **Load balancer type**: select `Network` + - **Available load balancers**: enter the load balancer you created in the previous step + - **Supported Regions**: leave it empty if you do not have cross-region requirements + - **Require acceptance for endpoint**: it is recommended to select `Acceptance required` + - **Supported IP address types**: select `IPv4` + +2. Go to the details page of the endpoint service, and then copy the endpoint service name, in the format of `com.amazonaws.vpce..vpce-svc-xxxxxxxxxxxxxxxxx`. You need to provide it to TiDB Cloud. + +3. On the details page of the endpoint service, click the **Allow principals** tab, and then add the AWS account ID that you obtained in [Prerequisites](#prerequisites) to the allowlist, for example, `arn:aws:iam:::root`. + +## Step 3. Create an AWS Endpoint Service private link connection in TiDB Cloud + +You can create a private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + +For more information, see [Create an AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md#create-an-aws-endpoint-service-private-link-connection). diff --git a/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md b/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md new file mode 100644 index 0000000000000..64539e2b7d0a8 --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md @@ -0,0 +1,672 @@ +--- +title: Connect to Alibaba Cloud Self-Hosted Kafka via Private Link Connection +summary: Learn how to connect to an Alibaba Cloud self-hosted Kafka using an Alibaba Cloud Endpoint Service private link connection. +--- + +# Connect to Alibaba Cloud Self-Hosted Kafka via a Private Link Connection + +The document describes how to connect a {{{ .essential }}} cluster to a self-hosted Kafka cluster in Alibaba Cloud, using an [Alibaba Cloud Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md). + +The mechanism works as follows: + +1. The private link connection connects to your Alibaba Cloud endpoint service using the bootstrap port, which returns broker external addresses defined in `advertised.listeners`. +2. The private link connection connects to your endpoint service with broker external addresses. +3. The Alibaba Cloud endpoint service forwards requests to your load balancers. +4. Load balancers forward requests to the corresponding Kafka brokers based on port mapping. + +For example, the port mapping is as follows: + +| Broker external address port | Load balancer listener port | Load balancer backend server | +|----------------------------|------------------------------|-------------| +| 9093 | 9093 | broker-node1:39092| +| 9094 | 9094 | broker-node2:39092| +| 9095 | 9095 | broker-node3:39092| + +## Prerequisites + +- Ensure that you have a Kafka cluster or have the following permissions to set up one. + + - Manage ECS nodes + - Manage VPC and vSwitch + - Connect to ECS nodes to configure Kafka nodes + +- Ensure that you have the following permissions to set up a load balancer and endpoint service in your Alibaba Cloud account. + + - Manage load balancers + - Manage endpoint services + +- Your {{{ .essential }}} is hosted on Alibaba Cloud, and it is active. Retrieve and save the following details for later use: + + - Alibaba Cloud account ID + - Availability Zones (AZ) + +To view the Alibaba Cloud account ID and availability zones, do the following: + +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the cluster overview page of the TiDB cluster, and then click **Settings** > **Networking** in the left navigation pane. +2. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. +3. In the displayed dialog, you can find the Alibaba Cloud account ID and availability zones. + +The following table shows an example of the deployment information. + +| Information | Value | Note | +|--------|-----------------|---------------------------| +| Region | `ap-southeast-1` | N/A | +| TiDB Cloud Alibaba Cloud account | `` | N/A | +| AZ IDs |
  • `ap-southeast-1a`
  • `ap-southeast-1b`
  • `ap-southeast-1c`
| N/A | +| Kafka Advertised Listener Pattern | <broker_id>.unique_name.alicloud.plc.tidbcloud.com:<port>| `unique_name` is a placeholder and will be replaced with the actual value in [Step 4](#step-4-replace-the-unique-name-placeholder-in-kafka-configuration) | + +## Step 1. Set up a Kafka cluster + +If you need to deploy a new cluster, follow the instructions in [Deploy a new Kafka cluster](#deploy-a-new-kafka-cluster). + +If you need to expose an existing cluster, follow the instructions in [Reconfigure a running Kafka cluster](#reconfigure-a-running-kafka-cluster). + +### Deploy a new Kafka cluster + +#### 1. Set up the Kafka VPC + +The Kafka VPC requires the following: + +- Three private vSwitches for brokers, one for each AZ. +- One public vSwitch in any AZ with a bastion node that can connect to the internet and three private vSwitches, which makes it easy to set up the Kafka cluster. In a production environment, you might have your own bastion node that can connect to the Kafka VPC. + +Take the following steps to create the Kafka VPC. + +**1.1. Create the Kafka VPC** + +1. Go to [Alibaba Cloud Console > VPC dashboard](https://vpc.console.alibabacloud.com/vpc), and switch to the region in which you want to deploy Kafka. + +2. Click **Create VPC**. Fill in the information on the **VPC settings** page as follows. + + 1. Enter **Name**, for example, `Kafka VPC`. + 2. Select the region in which you want to set up the private link connection in TiDB Cloud. + 3. Select **Manually enter an IPv4 CIDR block**, and enter the IPv4 CIDR, for example, `10.0.0.0/16`. + 4. Create a vSwitch and configure IPv4 CIDR for each AZ in which you want to deploy Kafka brokers. For example: + + - broker-ap-southeast-1a vSwitch in `ap-southeast-1a`: 10.0.0.0/18 + - broker-ap-southeast-1b vSwitch in `ap-southeast-1b`: 10.0.64.0/18 + - broker-ap-southeast-1c vSwitch in `ap-southeast-1c`: 10.0.128.0/18 + - bastion vSwitch in `ap-southeast-1a`: 10.0.192.0/18 + + 5. Use the default values for other options. Click **Ok**. + +3. On the VPC detail page, take note of the VPC ID, for example, `vpc-t4nfx2vcqazc862e9fg06`. + +#### 2. Set up Kafka brokers + +**2.1. Create a bastion node** + +Go to the [ECS console](https://ecs.console.alibabacloud.com/home#/). Create the bastion node in the bastion vSwitch. + +- **Network and Zone**: `Kafka VPC` and `bastion` vSwitch. +- **Instance and Image**: `ecs.t5-lc1m2.small` instance type and `Alibaba Cloud Linux` image. +- **Network and Security Groups**: select `Assign Public IPv4 Address`. +- **Key pair**: `kafka-vpc-key-pair`. Create a new key pair named `kafka-vpc-key-pair`. Download `kafka-vpc-key-pair.pem` to your local machine for later configuration. +- **Security Group**: create a new security group to allow SSH login from anywhere. You can narrow the rule for safety in the production environment. +- **Instance Name**: `bastion-node`. + +**2.2. Create broker nodes** + +Go to the [ECS console](https://ecs.console.alibabacloud.com/home#/). Create three broker nodes in vSwitches, one for each AZ. + +- Broker 1 in vSwitch `broker-ap-southeast-1a` + + - **Network and Zone**: `Kafka VPC` and `broker-ap-southeast-1a` vSwitch + - **Instance and Image**: `ecs.t5-lc1m2.small` instance type and `Alibaba Cloud Linux` image + - **Key pair**: reuse `kafka-vpc-key-pair`. + - **Instance Name**: `broker-node1` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. Inbound rule: + - **Protocol**: `TCP` + - **Port range**: `All` + - **Source**: `10.0.0.0/16` + +- Broker 2 in vSwitch `broker-ap-southeast-1b` + + - **Network and Zone**: `Kafka VPC` and `broker-ap-southeast-1b` vSwitch + - **Instance and Image**: `ecs.t5-lc1m2.small` instance type and `Alibaba Cloud Linux` image + - **Key pair**: reuse `kafka-vpc-key-pair`. + - **Instance Name**: `broker-node2` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. Inbound rule: + - **Protocol**: `TCP` + - **Port range**: `All` + - **Source**: `10.0.0.0/16` + +- Broker 3 in vSwitch `broker-ap-southeast-1c` + + - **Network and Zone**: `Kafka VPC` and `broker-ap-southeast-1c` vSwitch + - **Instance and Image**: `ecs.t5-lc1m2.small` instance type and `Alibaba Cloud Linux` image + - **Key pair**: reuse `kafka-vpc-key-pair`. + - **Instance Name**: `broker-node3` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. Inbound rule: + - **Protocol**: `TCP` + - **Port range**: `All` + - **Source**: `10.0.0.0/16` + +**2.3. Prepare Kafka runtime binaries** + +1. Go to the detail page of the bastion node. Get the **Public IPv4 address**. Use SSH to log in to the node with the previously downloaded `kafka-vpc-key-pair.pem`. + + ```shell + chmod 400 kafka-vpc-key-pair.pem + scp -i "kafka-vpc-key-pair.pem" kafka-vpc-key-pair.pem root@{bastion_public_ip}:~/ # replace {bastion_public_ip} with the IP address of your bastion node + ssh -i "kafka-vpc-key-pair.pem" root@{bastion_public_ip} + ``` + +2. Download binaries to the bastion node. + + ```shell + # Download Kafka and OpenJDK, and then extract the files. You can choose the binary version based on your preference. + wget https://archive.apache.org/dist/kafka/3.7.1/kafka_2.13-3.7.1.tgz + tar -zxf kafka_2.13-3.7.1.tgz + wget https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz + tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz + ``` + +3. Copy binaries to each broker node from the bastion node. + + ```shell + # Replace {broker-node1-ip} with your broker-node1 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz root@{broker-node1-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node1-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz root@{broker-node1-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node1-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + + # Replace {broker-node2-ip} with your broker-node2 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz root@{broker-node2-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node2-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz root@{broker-node2-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node2-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + + # Replace {broker-node3-ip} with your broker-node3 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz root@{broker-node3-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node3-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz root@{broker-node3-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" root@{broker-node3-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + ``` + +**2.4. Set up Kafka nodes on each broker node** + +**2.4.1 Set up a KRaft Kafka cluster with three nodes** + +Each node will act as a broker and controller role. Do the following for each broker: + +1. For the `listeners` item, all three brokers are the same and act as broker and controller roles: + + 1. Configure the same CONTROLLER listener for all **controller** role nodes. If you only want to add the **broker** role nodes, you do not need the CONTROLLER listener in `server.properties`. + 2. Configure two **broker** listeners, `INTERNAL` for internal access and `EXTERNAL` for external access from TiDB Cloud. + +2. For the `advertised.listeners` item, do the following: + + 1. Configure an INTERNAL advertised listener for every broker with the internal IP of the broker node. Advertised internal Kafka clients use this address to visit the broker. + 2. Configure an EXTERNAL advertised listener based on **Kafka Advertised Listener Pattern** you get from TiDB Cloud for each broker node to help TiDB Cloud differentiate between different brokers. Different EXTERNAL advertised listeners help the Kafka client from TiDB Cloud route requests to the right broker. + + - `` differentiates brokers from Kafka Private Link Service access points. Plan a port range for EXTERNAL advertised listeners of all brokers. These ports do not have to be actual ports listened to by brokers. They are ports listened to by the load balancer for Private Link Service that will forward requests to different brokers. + - `AZ ID` in **Kafka Advertised Listener Pattern** indicates where the broker is deployed. TiDB Cloud will route requests to different endpoint DNS names based on the AZ ID. + + It is recommended to configure different broker IDs for different brokers to make it easy for troubleshooting. + +3. The planning values are as follows: + + - **CONTROLLER port**: `29092` + - **INTERNAL port**: `9092` + - **EXTERNAL**: `39092` + - **EXTERNAL advertised listener ports range**: `9093~9095` + +**2.4.2. Create a configuration file** + +Use SSH to log in to every broker node. Create a configuration file `~/config/server.properties` with the following content. + +```properties +# brokers in ap-southeast-1a + +# broker-node1 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern is ".unique_name.alicloud.plc.tidbcloud.com:". +# 2.2 If there are more broker role nodes, you can configure them in the same way. +process.roles=broker,controller +node.id=1 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node1-ip}:9092,EXTERNAL://b1.unique_name.alicloud.plc.tidbcloud.com:9093 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +```properties +# brokers in ap-southeast-1b + +# broker-node2 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern is ".unique_name.alicloud.plc.tidbcloud.com:". +# 2.2 If there are more broker role nodes, you can configure them in the same way. +process.roles=broker,controller +node.id=2 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node2-ip}:9092,EXTERNAL://b2.unique_name.alicloud.plc.tidbcloud.com:9094 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +```properties +# brokers in ap-southeast-1c + +# broker-node3 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern is ".unique_name.alicloud.plc.tidbcloud.com:". +# 2.2 If there are more broker role nodes, you can configure them in the same way. +process.roles=broker,controller +node.id=3 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node3-ip}:9092,EXTERNAL://b3.ap-southeast-1c.unique_name.alicloud.plc.tidbcloud.com:9095 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +**2.4.3 Start Kafka brokers** + +Create a script, and then execute it to start the Kafka broker in each broker node. + +```shell +#!/bin/bash + +# Get the directory of the current script +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# Set JAVA_HOME to the Java installation within the script directory +export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" +# Define the vars +KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" +KAFKA_STORAGE_CMD=$KAFKA_DIR/kafka-storage.sh +KAFKA_START_CMD=$KAFKA_DIR/kafka-server-start.sh +KAFKA_DATA_DIR=$SCRIPT_DIR/data +KAFKA_LOG_DIR=$SCRIPT_DIR/log +KAFKA_CONFIG_DIR=$SCRIPT_DIR/config + +# Cleanup step, which makes it easy for multiple experiments +# Find all Kafka process IDs +KAFKA_PIDS=$(ps aux | grep 'kafka.Kafka' | grep -v grep | awk '{print $2}') +if [ -z "$KAFKA_PIDS" ]; then + echo "No Kafka processes are running." +else + # Kill each Kafka process + echo "Killing Kafka processes with PIDs: $KAFKA_PIDS" + for PID in $KAFKA_PIDS; do + kill -9 $PID + echo "Killed Kafka process with PID: $PID" + done + echo "All Kafka processes have been killed." +fi + +rm -rf $KAFKA_DATA_DIR +mkdir -p $KAFKA_DATA_DIR +rm -rf $KAFKA_LOG_DIR +mkdir -p $KAFKA_LOG_DIR + +# Magic id: BRl69zcmTFmiPaoaANybiw, you can use your own +$KAFKA_STORAGE_CMD format -t "BRl69zcmTFmiPaoaANybiw" -c "$KAFKA_CONFIG_DIR/server.properties" > $KAFKA_LOG_DIR/server_format.log +LOG_DIR=$KAFKA_LOG_DIR nohup $KAFKA_START_CMD "$KAFKA_CONFIG_DIR/server.properties" & +``` + +**2.5. Test the cluster setting in the bastion node** + +1. Test the Kafka bootstrap. + + ```shell + export JAVA_HOME=~/jdk-22.0.2 + + # Bootstrap from INTERNAL listener + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:9092 | grep 9092 + # Expected output (the actual order might be different) + {broker-node1-ip}:9092 (id: 1 rack: null) -> ( + {broker-node2-ip}:9092 (id: 2 rack: null) -> ( + {broker-node3-ip}:9092 (id: 3 rack: null) -> ( + + # Bootstrap from EXTERNAL listener + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:39092 + # Expected output for the last 3 lines (the actual order might be different) + # The difference in the output from "bootstrap from INTERNAL listener" is that exceptions or errors might occur because advertised listeners cannot be resolved in Kafka VPC. + # We will make them resolvable on the TiDB Cloud side and route requests to the right broker when you create a changefeed that connects to this Kafka cluster via Private Link. + b1.unique_name.alicloud.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b2.unique_name.alicloud.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b3.unique_name.alicloud.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + ``` + +2. Create a producer script `produce.sh` in the bastion node. + + ```shell + #!/bin/bash + BROKER_LIST=$1 # "{broker_address1},{broker_address2}..." + + # Get the directory of the current script + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + # Set JAVA_HOME to the Java installation within the script directory + export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" + # Define the Kafka directory + KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" + TOPIC="test-topic" + + # Create a topic if it does not exist + create_topic() { + echo "Creating topic if it does not exist..." + $KAFKA_DIR/kafka-topics.sh --create --topic $TOPIC --bootstrap-server $BROKER_LIST --if-not-exists --partitions 3 --replication-factor 3 + } + + # Produce messages to the topic + produce_messages() { + echo "Producing messages to the topic..." + for ((chrono=1; chrono <= 10; chrono++)); do + message="Test message "$chrono + echo "Create "$message + echo $message | $KAFKA_DIR/kafka-console-producer.sh --broker-list $BROKER_LIST --topic $TOPIC + done + } + create_topic + produce_messages + ``` + +3. Create a consumer script `consume.sh` in the bastion node. + + ```shell + #!/bin/bash + + BROKER_LIST=$1 # "{broker_address1},{broker_address2}..." + + # Get the directory of the current script + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + # Set JAVA_HOME to the Java installation within the script directory + export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" + # Define the Kafka directory + KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" + TOPIC="test-topic" + CONSUMER_GROUP="test-group" + # Consume messages from the topic + consume_messages() { + echo "Consuming messages from the topic..." + $KAFKA_DIR/kafka-console-consumer.sh --bootstrap-server $BROKER_LIST --topic $TOPIC --from-beginning --timeout-ms 5000 --consumer-property group.id=$CONSUMER_GROUP + } + consume_messages + ``` + +4. Execute `produce.sh` and `consume.sh` to verify that the Kafka cluster is running. These scripts will also be reused for later network connection testing. The script will create a topic with `--partitions 3 --replication-factor 3`. Ensure that all these three brokers contain data. Ensure that the script will connect to all three brokers to guarantee that network connection will be tested. + + ```shell + # Test write message. + sh produce.sh {one_of_broker_ip}:9092 + ``` + + ```shell + # Expected output + Creating topic if it does not exist... + + Producing messages to the topic... + Create Test message 1 + >>Create Test message 2 + >>Create Test message 3 + >>Create Test message 4 + >>Create Test message 5 + >>Create Test message 6 + >>Create Test message 7 + >>Create Test message 8 + >>Create Test message 9 + >>Create Test message 10 + ``` + + ```shell + # Test read message + sh consume.sh {one_of_broker_ip}:9092 + ``` + + ```shell + # Expected example output (the actual message order might be different) + Consuming messages from the topic... + Test message 3 + Test message 4 + Test message 5 + Test message 9 + Test message 10 + Test message 6 + Test message 8 + Test message 1 + Test message 2 + Test message 7 + [2024-11-01 08:54:27,547] ERROR Error processing message, terminating consumer process: (kafka.tools.ConsoleConsumer$) + org.apache.kafka.common.errors.TimeoutException + Processed a total of 10 messages + ``` + +### Reconfigure a running Kafka cluster + +Ensure that your Kafka cluster is deployed in the same region and AZs as the TiDB cluster. If any brokers are in different AZs, move them to the correct ones. + +#### 1. Configure the EXTERNAL listener for brokers + +The following configuration applies to a Kafka KRaft cluster. The ZK mode configuration is similar. + +1. Plan configuration changes. + + 1. Configure an EXTERNAL **listener** for every broker for external access from TiDB Cloud. Select a unique port as the EXTERNAL port, for example, `39092`. + 2. Configure an EXTERNAL **advertised listener** based on **Kafka Advertised Listener Pattern** you get from TiDB Cloud for every broker node to help TiDB Cloud differentiate between different brokers. Different EXTERNAL advertised listeners help Kafka clients from TiDB Cloud route requests to the right broker. + + - `` differentiates brokers from Kafka Private Link Service access points. Plan a port range for EXTERNAL advertised listeners of all brokers, for example, `range from 9093`. These ports do not have to be actual ports listened to by brokers. They are ports listened to by the load balancer for Private Link Service that will forward requests to different brokers. + + It is recommended to configure different broker IDs for different brokers to make it easy for troubleshooting. + +2. Use SSH to log in to each broker node. Modify the configuration file of each broker with the following content: + + ```properties + # brokers in ap-southeast-1a + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern is ".unique_name.alicloud.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b1.unique_name.alicloud.plc.tidbcloud.com:9093", replace with "b" prefix plus "node.id" properties, replace with a unique port(9093) in EXTERNAL advertised listener ports range + advertised.listeners=...,EXTERNAL://b1.unique_name.alicloud.plc.tidbcloud.com:9093 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + + ```properties + # brokers in ap-southeast-1b + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern is ".unique_name.alicloud.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b2.unique_name.alicloud.plc.tidbcloud.com:9094". Replace with "b" prefix plus "node.id" properties, and replace with a unique port(9094) in EXTERNAL advertised listener ports range. + advertised.listeners=...,EXTERNAL://b2.unique_name.alicloud.plc.tidbcloud.com:9094 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + + ```properties + # brokers in ap-southeast-1c + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern is ".unique_name.alicloud.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b2.unique_name.alicloud.plc.tidbcloud.com:9095". Replace with "b" prefix plus "node.id" properties, and replace with a unique port(9095) in EXTERNAL advertised listener ports range. + advertised.listeners=...,EXTERNAL://b3.unique_name.alicloud.plc.tidbcloud.com:9095 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + +3. After you reconfigure all the brokers, restart your Kafka brokers one by one. + +#### 2. Test EXTERNAL listener settings in your internal network + +You can download the Kafka and OpenJDK on your Kafka client node. + +```shell +# Download Kafka and OpenJDK, and then extract the files. You can choose the binary version based on your preference. +wget https://archive.apache.org/dist/kafka/3.7.1/kafka_2.13-3.7.1.tgz +tar -zxf kafka_2.13-3.7.1.tgz +wget https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz +tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz +``` + +Execute the following script to test if the bootstrap works as expected. + +```shell +export JAVA_HOME=/root/jdk-22.0.2 + +# Bootstrap from the EXTERNAL listener +./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:39092 + +# Expected output for the last 3 lines (the actual order might be different) +# There will be some exceptions or errors because advertised listeners cannot be resolved in your Kafka network. +# We will make them resolvable on the TiDB Cloud side and route requests to the right broker when you create a changefeed that connects to this Kafka cluster via Private Link. +b1.ap-southeast-1a.unique_name.alicloud.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +b2.ap-southeast-1b.unique_name.alicloud.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +b3.ap-southeast-1c.unique_name.alicloud.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +``` + +## Step 2. Expose the Kafka cluster as a private link service + +### 1. Set up the load balancer + +Create a network load balancer with four server groups with different ports. One server group is for bootstrap, and the others will map to different brokers. + +1. bootstrap server group => 9092 => broker-node1:39092,broker-node2:39092,broker-node3:39092 +2. broker server group 1 => 9093 => broker-node1:39092 +3. broker server group 2 => 9094 => broker-node2:39092 +4. broker server group 3 => 9095 => broker-node3:39092 + +If you have more broker role nodes, you need to add more mappings. Ensure that you have at least one node in the bootstrap target group. It is recommended to add three nodes, one for each AZ for resilience. + +Do the following to set up the load balancer: + +1. Go to [Server Groups](https://slb.console.alibabacloud.com/nlb/ap-southeast-1/server-groups) to create four server groups. + + - Bootstrap server group + + - **Server Group Type**: select `Server` + - **Server Group Name**: `bootstrap-server-group` + - **VPC**: `Kafka VPC` + - **Backend Server Protocol**: select `TCP` + - **Backend servers**: click the created server group and add backend servers, including `broker-node1:39092`, `broker-node2:39092`, and `broker-node3:39092` + + - Broker server group 1 + + - **Server Group Type**: select `Server` + - **Server Group Name**: `broker-server-group-1` + - **VPC**: `Kafka VPC` + - **Backend Server Protocol**: select `TCP` + - **Backend servers**: click the created server group and add the backend server `broker-node1:39092` + + - Broker server group 2 + + - **Server Group Type**: select `Server` + - **Server Group Name**: `broker-server-group-2` + - **VPC**: `Kafka VPC` + - **Backend Server Protocol**: select `TCP` + - **Backend servers**: click the created server group and add the backend server `broker-node2:39092` + + - Broker server group 3 + + - **Server Group Type**: select `Server` + - **Server Group Name**: `broker-server-group-3` + - **VPC**: `Kafka VPC` + - **Backend Server Protocol**: select `TCP` + - **Backend servers**: click the created server group and add the backend server `broker-node3:39092` + +2. Go to [NLB](https://slb.console.alibabacloud.com/nlb) to create a network load balancer. + + - **Network Type**: select `Internal-facing` + - **VPC**: `Kafka VPC` + - **Zone**: + - `ap-southeast-1a` with `broker-ap-southeast-1a vswitch` + - `ap-southeast-1b` with `broker-ap-southeast-1b vswitch` + - `ap-southeast-1c` with `broker-ap-southeast-1c vswitch` + - **IP Version**: select `IPv4` + - **Instance Name**: `kafka-nlb` + - Click **Create Now** to create the load balancer. + +3. Find the load balancer you created, and then click **Create Listener** to create four TCP listeners. + + - Bootstrap server group + + - **Listener Protocol**: select `TCP` + - **Listener Port**: `9092` + - **Server Group**: choose the server group `bootstrap-server-group` created previously. + + - Broker server group 1 + + - **Listener Protocol**: select `TCP` + - **Listener Port**: `9093` + - **Server Group**: choose the server group `broker-server-group-1` created previously. + + - Broker server group 2 + + - **Listener Protocol**: select `TCP` + - **Listener Port**: `9094` + - **Server Group**: choose the server group `broker-server-group-2` created previously. + + - Broker server group 3 + + - **Listener Protocol**: select `TCP` + - **Listener Port**: `9095` + - **Server Group**: choose the server group `broker-server-group-3` created previously. + +4. Test the load balancer in the bastion node. This example only tests the Kafka bootstrap. Because the load balancer is listening on the Kafka EXTERNAL listener, the addresses of EXTERNAL advertised listeners can not be resolved in the bastion node. Note down the `kafka-lb` DNS name from the load balancer detail page, for example `nlb-o21d6wyjknamw8hjxb.ap-southeast-1.nlb.aliyuncsslbintl.com`. Execute the script in the bastion node. + + ```shell + # Replace {lb_dns_name} to your actual value + export JAVA_HOME=~/jdk-22.0.2 + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {lb_dns_name}:9092 + + # Expected output for the last 3 lines (the actual order might be different) + b1.unique_name.alicloud.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b2.unique_name.alicloud.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b3.unique_name.alicloud.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + ``` + +### 2. Set up an Alibaba Cloud endpoint service + +Set up the endpoint service in the same region. + +1. Go to [Endpoint service](https://vpc.console.alibabacloud.com/endpointservice) to create an endpoint service. + + - **Service Resource Type**: select `NLB` + - **Select Service Resource**: select all zones that NLB is in, and choose the NLB that you created in the previous step + - **Automatically Accept Endpoint Connections**: it is recommended to choose `No` + +2. Go to the details page of the endpoint service, and copy the **Endpoint Service Name**, for example, `com.aliyuncs.privatelink..xxxxx`. You need to use it for TiDB Cloud later. + +3. On the detail page of the endpoint service, click the **Service Whitelist** tab, click **Add to Whitelist**, and then enter the Alibaba Cloud account ID that you obtained in [Prerequisites](#prerequisites). + +## Step 3. Create a private link connection in TiDB Cloud + +To create a private link connection in TiDB Cloud, do the following: + +1. Create a private link connection in TiDB Cloud using the Alibaba Cloud endpoint service name you obtained from [Step 2](#2-set-up-an-alibaba-cloud-endpoint-service) (for example, `com.aliyuncs.privatelink..xxxxx`). + + For more information, see [Create an Alibaba Cloud Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md#create-an-alibaba-cloud-endpoint-service-private-link-connection). + +2. Attach domains to the private link connection so that dataflow services in TiDB Cloud can access the Kafka cluster. + + For more information, see [Attach domains to a private link connection](/tidb-cloud/serverless-private-link-connection.md#attach-domains-to-a-private-link-connection). Note that in the **Attach Domains** dialog, you need to choose **TiDB Cloud Managed** as the domain type, and copy the unique name of the generated domain for later use. + +## Step 4. Replace the unique name placeholder in Kafka configuration + +1. Go back to your Kafka broker nodes, replace the `unique_name` placeholder in `advertised.listeners` configuration of each broker with the actual unique name you get from the previous step. +2. After you reconfigure all the brokers, restart your Kafka brokers one by one. + +Now, you can use this private link connection and 9092 as the bootstrap port to connect to your Kafka cluster from TiDB Cloud. \ No newline at end of file diff --git a/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md b/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md new file mode 100644 index 0000000000000..6905f6e061532 --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md @@ -0,0 +1,737 @@ +--- +title: Connect to AWS Self-Hosted Kafka via Private Link Connection +summary: Learn how to connect to an AWS Self-Hosted Kafka using an AWS Endpoint Service private link connection. +--- + +# Connect to AWS Self-Hosted Kafka via Private Link Connection + +This document describes how to connect a {{{ .essential }}} cluster to a self-hosted Kafka cluster in AWS using an [AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md). + +The mechanism works as follows: + +1. The private link connection connects to your AWS endpoint service using the bootstrap broker address, which returns the addresses and ports of all Kafka brokers. +2. TiDB Cloud uses the returned broker addresses and ports to establish connections through the private link connection. +3. The AWS endpoint service forwards requests to your load balancers. +4. Load balancers route requests to the corresponding Kafka brokers based on port mapping. + +## Prerequisites + +- Ensure that you have the following permissions to set up a Kafka cluster in your AWS account: + + - Manage EC2 instances + - Manage VPCs + - Manage subnets + - Connect to EC2 instances to configure Kafka nodes + +- Ensure that you have the following permissions to set up a load balancer and endpoint service in your AWS account: + + - Manage security groups + - Manage load balancers + - Manage endpoint services + +- Your {{{ .essential }}} is hosted on AWS, and it is active. Retrieve and save the following details for later use: + + - AWS Account ID + - Availability Zones (AZs) + +To view the AWS account ID and availability zones, do the following: + +1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the cluster overview page of your TiDB cluster, and then click **Settings** > **Networking** in the left navigation pane. +2. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. +3. In the displayed dialog, you can find the AWS account ID and availability zones. + +The following table shows an example of the deployment information. + +| Information | Value | Note | +|--------|-----------------|---------------------------| +| Region | Oregon (`us-west-2`) | N/A | +| Principal of TiDB Cloud AWS Account | `arn:aws:iam:::root` | N/A | +| AZ IDs |
  • `usw2-az1`
  • `usw2-az2`
  • `usw2-az3`
| Align AZ IDs to AZ names in your AWS account.
Example:
  • `usw2-az1` => `us-west-2a`
  • `usw2-az2` => `us-west-2c`
  • `usw2-az3` => `us-west-2b`
| +| Kafka Advertised Listener Pattern |
  • `usw2-az1` => <broker_id>.usw2-az1.unique_name.aws.plc.tidbcloud.com:<port>
  • `usw2-az2` => <broker_id>.usw2-az2.unique_name.aws.plc.tidbcloud.com:<port>
  • `usw2-az3` => <broker_id>.usw2-az3.unique_name.aws.plc.tidbcloud.com:<port>
| Map AZ names to AZ-specified patterns. Make sure that you configure the right pattern to the broker in a specific AZ later.
  • `us-west-2a` => <broker_id>.usw2-az1.unique_name.aws.plc.tidbcloud.com:<port>
  • `us-west-2c` => <broker_id>.usw2-az2.unique_name.aws.plc.tidbcloud.com:<port>
  • `us-west-2b` => <broker_id>.usw2-az3.unique_name.aws.plc.tidbcloud.com:<port>
`unique_name` is a placeholder and will be replaced with the actual value in [Step 4](#step-4-replace-the-unique-name-placeholder-in-kafka-configuration). | + +## Step 1. Set up a Kafka cluster + +If you need to deploy a new cluster, follow the instructions in [Deploy a new Kafka cluster](#deploy-a-new-kafka-cluster). + +If you need to expose an existing cluster, follow the instructions in [Reconfigure a running Kafka cluster](#reconfigure-a-running-kafka-cluster). + +### Deploy a new Kafka cluster + +#### 1. Set up the Kafka VPC + +The Kafka VPC requires the following: + +- Three private subnets for brokers, one for each AZ. +- One public subnet in any AZ with a bastion node that can connect to the internet and three private subnets, which makes it easy to set up the Kafka cluster. In a production environment, you might have your own bastion node that can connect to the Kafka VPC. + +Before creating subnets, create subnets in AZs based on the mappings of AZ IDs and AZ names. Take the following mapping as an example. + +- `usw2-az1` => `us-west-2a` +- `usw2-az2` => `us-west-2c` +- `usw2-az3` => `us-west-2b` + +Create private subnets in the following AZs: + +- `us-west-2a` +- `us-west-2c` +- `us-west-2b` + +Take the following steps to create the Kafka VPC. + +**1.1. Create the Kafka VPC** + +1. Go to [AWS Console > VPC dashboard](https://console.aws.amazon.com/vpcconsole/home?#vpcs:), and switch to the region in which you want to deploy Kafka. + +2. Click **Create VPC**. Fill in the information on the **VPC settings** page as follows. + + 1. Select **VPC only**. + 2. Enter a tag in **Name tag**, for example, `Kafka VPC`. + 3. Select **IPv4 CIDR manual input**, and enter the IPv4 CIDR, for example, `10.0.0.0/16`. + 4. Use the default values for other options. Click **Create VPC**. + 5. On the VPC detail page, take note of the VPC ID, for example, `vpc-01f50b790fa01dffa`. + +**1.2. Create private subnets in the Kafka VPC** + +1. Go to the [Subnets Listing page](https://console.aws.amazon.com/vpcconsole/home?#subnets:). +2. Click **Create subnet**. +3. Select **VPC ID** (`vpc-01f50b790fa01dffa` in this example) that you noted down before. +4. Add three subnets with the following information. It is recommended that you put the AZ IDs in the subnet names to make it easy to configure the brokers later, because TiDB Cloud requires encoding the AZ IDs in the broker's `advertised.listener` configuration. + + - Subnet1 in `us-west-2a` + - **Subnet name**: `broker-usw2-az1` + - **Availability Zone**: `us-west-2a` + - **IPv4 subnet CIDR block**: `10.0.0.0/18` + + - Subnet2 in `us-west-2c` + - **Subnet name**: `broker-usw2-az2` + - **Availability Zone**: `us-west-2c` + - **IPv4 subnet CIDR block**: `10.0.64.0/18` + + - Subnet3 in `us-west-2b` + - **Subnet name**: `broker-usw2-az3` + - **Availability Zone**: `us-west-2b` + - **IPv4 subnet CIDR block**: `10.0.128.0/18` + +5. Click **Create subnet**. The **Subnets Listing** page is displayed. + +**1.3. Create the public subnet in the Kafka VPC** + +1. Click **Create subnet**. +2. Select **VPC ID** (`vpc-01f50b790fa01dffa` in this example) that you noted down before. +3. Add the public subnet in any AZ with the following information: + + - **Subnet name**: `bastion` + - **IPv4 subnet CIDR block**: `10.0.192.0/18` + +4. Configure the bastion subnet to the Public subnet. + + 1. Go to [VPC dashboard > Internet gateways](https://console.aws.amazon.com/vpcconsole/home#igws:). Create an Internet Gateway with the name `kafka-vpc-igw`. + 2. On the **Internet gateways Detail** page, in **Actions**, click **Attach to VPC** to attach the Internet Gateway to the Kafka VPC. + 3. Go to [VPC dashboard > Route tables](https://console.aws.amazon.com/vpcconsole/home#CreateRouteTable:). Create a route table to the Internet Gateway in Kafka VPC and add a new route with the following information: + + - **Name**: `kafka-vpc-igw-route-table` + - **VPC**: `Kafka VPC` + - **Route**: + - **Destination**: `0.0.0.0/0` + - **Target**: `Internet Gateway`, `kafka-vpc-igw` + + 4. Attach the route table to the bastion subnet. On the **Detail** page of the route table, click **Subnet associations > Edit subnet associations** to add the bastion subnet and save changes. + +#### 2. Set up Kafka brokers + +**2.1. Create a bastion node** + +Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:). Create the bastion node in the bastion subnet. + +- **Name**: `bastion-node` +- **Amazon Machine Image**: `Amazon Linux` +- **Instance Type**: `t2.small` +- **Key pair**: `kafka-vpc-key-pair`. Create a new key pair named `kafka-vpc-key-pair`. Download `kafka-vpc-key-pair.pem` to your local machine for later configuration. +- Network settings + + - **VPC**: `Kafka VPC` + - **Subnet**: `bastion` + - **Auto-assign public IP**: `Enable` + - **Security Group**: create a new security group allow SSH login from anywhere. You can narrow the rule for safety in the production environment. + +**2.2. Create broker nodes** + +Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:). Create three broker nodes in broker subnets, one for each AZ. + +- Broker 1 in subnet `broker-usw2-az1` + + - **Name**: `broker-node1` + - **Amazon Machine Image**: `Amazon Linux` + - **Instance Type**: `t2.large` + - **Key pair**: reuse `kafka-vpc-key-pair` + - Network settings + + - **VPC**: `Kafka VPC` + - **Subnet**: `broker-usw2-az1` + - **Auto-assign public IP**: `Disable` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. + - **Protocol**: `TCP` + - **Port range**: `0 - 65535` + - **Source**: `10.0.0.0/16` + +- Broker 2 in subnet `broker-usw2-az2` + + - **Name**: `broker-node2` + - **Amazon Machine Image**: `Amazon Linux` + - **Instance Type**: `t2.large` + - **Key pair**: reuse `kafka-vpc-key-pair` + - Network settings + + - **VPC**: `Kafka VPC` + - **Subnet**: `broker-usw2-az2` + - **Auto-assign public IP**: `Disable` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. + - **Protocol**: `TCP` + - **Port range**: `0 - 65535` + - **Source**: `10.0.0.0/16` + +- Broker 3 in subnet `broker-usw2-az3` + + - **Name**: `broker-node3` + - **Amazon Machine Image**: `Amazon Linux` + - **Instance Type**: `t2.large` + - **Key pair**: reuse `kafka-vpc-key-pair` + - Network settings + + - **VPC**: `Kafka VPC` + - **Subnet**: `broker-usw2-az3` + - **Auto-assign public IP**: `Disable` + - **Security Group**: create a new security group to allow all TCP from Kafka VPC. You can narrow the rule for safety in the production environment. + - **Protocol**: `TCP` + - **Port range**: `0 - 65535` + - **Source**: `10.0.0.0/16` + +**2.3. Prepare Kafka runtime binaries** + +1. Go to the detail page of the bastion node. Get the **Public IPv4 address**. Use SSH to log in to the node with the previously downloaded `kafka-vpc-key-pair.pem`. + + ```shell + chmod 400 kafka-vpc-key-pair.pem + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{bastion_public_ip} # replace {bastion_public_ip} with the IP address of your bastion node, for example, 54.186.149.187 + scp -i "kafka-vpc-key-pair.pem" kafka-vpc-key-pair.pem ec2-user@{bastion_public_ip}:~/ + ``` + +2. Download binaries. + + ```shell + # Download Kafka and OpenJDK, and then extract the files. You can choose the binary version based on your preference. + wget https://archive.apache.org/dist/kafka/3.7.1/kafka_2.13-3.7.1.tgz + tar -zxf kafka_2.13-3.7.1.tgz + wget https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz + tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz + ``` + +3. Copy binaries to each broker node. + + ```shell + # Replace {broker-node1-ip} with your broker-node1 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz ec2-user@{broker-node1-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node1-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz ec2-user@{broker-node1-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node1-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + + # Replace {broker-node2-ip} with your broker-node2 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz ec2-user@{broker-node2-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node2-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz ec2-user@{broker-node2-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node2-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + + # Replace {broker-node3-ip} with your broker-node3 IP address + scp -i "kafka-vpc-key-pair.pem" kafka_2.13-3.7.1.tgz ec2-user@{broker-node3-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node3-ip} "tar -zxf kafka_2.13-3.7.1.tgz" + scp -i "kafka-vpc-key-pair.pem" openjdk-22.0.2_linux-x64_bin.tar.gz ec2-user@{broker-node3-ip}:~/ + ssh -i "kafka-vpc-key-pair.pem" ec2-user@{broker-node3-ip} "tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz" + ``` + +**2.4. Set up Kafka nodes on each broker node** + +**2.4.1 Set up a KRaft Kafka cluster with three nodes** + +Each node will act as a broker and controller role. Do the following for each broker: + +1. For the `listeners` item, all three brokers are the same and act as broker and controller roles: + + 1. Configure the same CONTROLLER listener for all **controller** role nodes. If you only want to add the **broker** role nodes, you do not need the CONTROLLER listener in `server.properties`. + 2. Configure two **broker** listeners, `INTERNAL` for internal access and `EXTERNAL` for external access from TiDB Cloud. + +2. For the `advertised.listeners` item, do the following: + + 1. Configure an INTERNAL advertised listener for every broker with the internal IP of the broker node. Advertised internal Kafka clients use this address to visit the broker. + 2. Configure an EXTERNAL advertised listener based on **Kafka Advertised Listener Pattern** you get from TiDB Cloud for each broker node to help TiDB Cloud differentiate between different brokers. Different EXTERNAL advertised listeners help the Kafka client from TiDB Cloud route requests to the right broker. + + - `` differentiates brokers from Kafka Private Link Service access points. Plan a port range for EXTERNAL advertised listeners of all brokers. These ports do not have to be actual ports listened to by brokers. They are ports listened to by the load balancer for Private Link Service that will forward requests to different brokers. + - `AZ ID` in **Kafka Advertised Listener Pattern** indicates where the broker is deployed. TiDB Cloud will route requests to different endpoint DNS names based on the AZ ID. + + It is recommended to configure different broker IDs for different brokers to make it easy for troubleshooting. + +3. The planning values are as follows: + + - **CONTROLLER port**: `29092` + - **INTERNAL port**: `9092` + - **EXTERNAL**: `39092` + - **EXTERNAL advertised listener ports range**: `9093~9095` + +**2.4.2. Create a configuration file** + +Use SSH to log in to every broker node. Create a configuration file `~/config/server.properties` with the following content. + +```properties +# brokers in usw2-az1 + +# broker-node1 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern for AZ(ID: usw2-az1) is ".usw2-az1.unique_name.aws.plc.tidbcloud.com:". +# 2.2 So the EXTERNAL can be "b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093". Replace with "b" prefix plus "node.id" properties, and replace with a unique port (9093) in the port range of the EXTERNAL advertised listener. +# 2.3 If there are more broker role nodes in the same AZ, you can configure them in the same way. +process.roles=broker,controller +node.id=1 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node1-ip}:9092,EXTERNAL://b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +```properties +# brokers in usw2-az2 + +# broker-node2 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern for AZ(ID: usw2-az2) is ".usw2-az2.unique_name.aws.plc.tidbcloud.com:". +# 2.2 So the EXTERNAL can be "b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094". Replace with "b" prefix plus "node.id" properties, and replace with a unique port (9094) in the port range of the EXTERNAL advertised listener. +# 2.3 If there are more broker role nodes in the same AZ, you can configure them in the same way. +process.roles=broker,controller +node.id=2 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node2-ip}:9092,EXTERNAL://b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +```properties +# brokers in usw2-az3 + +# broker-node3 ~/config/server.properties +# 1. Replace {broker-node1-ip}, {broker-node2-ip}, {broker-node3-ip} with the actual IP addresses. +# 2. Configure EXTERNAL in "advertised.listeners" based on the "Kafka Advertised Listener Pattern" in the "Prerequisites" section. +# 2.1 The pattern for AZ(ID: usw2-az3) is ".usw2-az3.unique_name.aws.plc.tidbcloud.com:". +# 2.2 So the EXTERNAL can be "b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095". Replace with "b" prefix plus "node.id" properties, and replace with a unique port (9095) in the port range of the EXTERNAL advertised listener. +# 2.3 If there are more broker role nodes in the same AZ, you can configure them in the same way. +process.roles=broker,controller +node.id=3 +controller.quorum.voters=1@{broker-node1-ip}:29092,2@{broker-node2-ip}:29092,3@{broker-node3-ip}:29092 +listeners=INTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29092,EXTERNAL://0.0.0.0:39092 +inter.broker.listener.name=INTERNAL +advertised.listeners=INTERNAL://{broker-node3-ip}:9092,EXTERNAL://b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095 +controller.listener.names=CONTROLLER +listener.security.protocol.map=INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL +log.dirs=./data +``` + +**2.4.3 Start Kafka brokers** + +Create a script, and then execute it to start the Kafka broker in each broker node. + +```shell +#!/bin/bash + +# Get the directory of the current script +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# Set JAVA_HOME to the Java installation within the script directory +export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" +# Define the vars +KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" +KAFKA_STORAGE_CMD=$KAFKA_DIR/kafka-storage.sh +KAFKA_START_CMD=$KAFKA_DIR/kafka-server-start.sh +KAFKA_DATA_DIR=$SCRIPT_DIR/data +KAFKA_LOG_DIR=$SCRIPT_DIR/log +KAFKA_CONFIG_DIR=$SCRIPT_DIR/config + +# Cleanup step, which makes it easy for multiple experiments +# Find all Kafka process IDs +KAFKA_PIDS=$(ps aux | grep 'kafka.Kafka' | grep -v grep | awk '{print $2}') +if [ -z "$KAFKA_PIDS" ]; then + echo "No Kafka processes are running." +else + # Kill each Kafka process + echo "Killing Kafka processes with PIDs: $KAFKA_PIDS" + for PID in $KAFKA_PIDS; do + kill -9 $PID + echo "Killed Kafka process with PID: $PID" + done + echo "All Kafka processes have been killed." +fi + +rm -rf $KAFKA_DATA_DIR +mkdir -p $KAFKA_DATA_DIR +rm -rf $KAFKA_LOG_DIR +mkdir -p $KAFKA_LOG_DIR + +# Magic id: BRl69zcmTFmiPaoaANybiw, you can use your own +$KAFKA_STORAGE_CMD format -t "BRl69zcmTFmiPaoaANybiw" -c "$KAFKA_CONFIG_DIR/server.properties" > $KAFKA_LOG_DIR/server_format.log +LOG_DIR=$KAFKA_LOG_DIR nohup $KAFKA_START_CMD "$KAFKA_CONFIG_DIR/server.properties" & +``` + +**2.5. Test the cluster setting in the bastion node** + +1. Test the Kafka bootstrap. + + ```shell + export JAVA_HOME=/home/ec2-user/jdk-22.0.2 + + # Bootstrap from INTERNAL listener + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:9092 | grep 9092 + # Expected output (the actual order might be different) + {broker-node1-ip}:9092 (id: 1 rack: null) -> ( + {broker-node2-ip}:9092 (id: 2 rack: null) -> ( + {broker-node3-ip}:9092 (id: 3 rack: null) -> ( + + # Bootstrap from EXTERNAL listener + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:39092 + # Expected output for the last 3 lines (the actual order might be different) + # The difference in the output from "bootstrap from INTERNAL listener" is that exceptions or errors might occur because advertised listeners cannot be resolved in Kafka VPC. + # We will make them resolvable on the TiDB Cloud side and route requests to the right broker when you create a changefeed that connects to this Kafka cluster via Private Link. + b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + ``` + +2. Create a producer script `produce.sh` in the bastion node. + + ```shell + #!/bin/bash + BROKER_LIST=$1 # "{broker_address1},{broker_address2}..." + + # Get the directory of the current script + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + # Set JAVA_HOME to the Java installation within the script directory + export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" + # Define the Kafka directory + KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" + TOPIC="test-topic" + + # Create a topic if it does not exist + create_topic() { + echo "Creating topic if it does not exist..." + $KAFKA_DIR/kafka-topics.sh --create --topic $TOPIC --bootstrap-server $BROKER_LIST --if-not-exists --partitions 3 --replication-factor 3 + } + + # Produce messages to the topic + produce_messages() { + echo "Producing messages to the topic..." + for ((chrono=1; chrono <= 10; chrono++)); do + message="Test message "$chrono + echo "Create "$message + echo $message | $KAFKA_DIR/kafka-console-producer.sh --broker-list $BROKER_LIST --topic $TOPIC + done + } + create_topic + produce_messages + ``` + +3. Create a consumer script `consume.sh` in the bastion node. + + ```shell + #!/bin/bash + + BROKER_LIST=$1 # "{broker_address1},{broker_address2}..." + + # Get the directory of the current script + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + # Set JAVA_HOME to the Java installation within the script directory + export JAVA_HOME="$SCRIPT_DIR/jdk-22.0.2" + # Define the Kafka directory + KAFKA_DIR="$SCRIPT_DIR/kafka_2.13-3.7.1/bin" + TOPIC="test-topic" + CONSUMER_GROUP="test-group" + # Consume messages from the topic + consume_messages() { + echo "Consuming messages from the topic..." + $KAFKA_DIR/kafka-console-consumer.sh --bootstrap-server $BROKER_LIST --topic $TOPIC --from-beginning --timeout-ms 5000 --consumer-property group.id=$CONSUMER_GROUP + } + consume_messages + ``` + +4. Execute `produce.sh` and `consume.sh` to verify that the Kafka cluster is running. These scripts will also be reused for later network connection testing. The script will create a topic with `--partitions 3 --replication-factor 3`. Ensure that all these three brokers contain data. Ensure that the script will connect to all three brokers to guarantee that network connection will be tested. + + ```shell + # Test write message. + ./produce.sh {one_of_broker_ip}:9092 + ``` + + ```shell + # Expected output + Creating topic if it does not exist... + + Producing messages to the topic... + Create Test message 1 + >>Create Test message 2 + >>Create Test message 3 + >>Create Test message 4 + >>Create Test message 5 + >>Create Test message 6 + >>Create Test message 7 + >>Create Test message 8 + >>Create Test message 9 + >>Create Test message 10 + ``` + + ```shell + # Test read message + ./consume.sh {one_of_broker_ip}:9092 + ``` + + ```shell + # Expected example output (the actual message order might be different) + Consuming messages from the topic... + Test message 3 + Test message 4 + Test message 5 + Test message 9 + Test message 10 + Test message 6 + Test message 8 + Test message 1 + Test message 2 + Test message 7 + [2024-11-01 08:54:27,547] ERROR Error processing message, terminating consumer process: (kafka.tools.ConsoleConsumer$) + org.apache.kafka.common.errors.TimeoutException + Processed a total of 10 messages + ``` + +### Reconfigure a running Kafka cluster + +Ensure that your Kafka cluster is deployed in the same region and AZs as the TiDB cluster. If any brokers are in different AZs, move them to the correct ones. + +#### 1. Configure the EXTERNAL listener for brokers + +The following configuration applies to a Kafka KRaft cluster. The ZK mode configuration is similar. + +1. Plan configuration changes. + + 1. Configure an EXTERNAL **listener** for every broker for external access from TiDB Cloud. Select a unique port as the EXTERNAL port, for example, `39092`. + 2. Configure an EXTERNAL **advertised listener** based on **Kafka Advertised Listener Pattern** you get from TiDB Cloud for every broker node to help TiDB Cloud differentiate between different brokers. Different EXTERNAL advertised listeners help Kafka clients from TiDB Cloud route requests to the right broker. + + - `` differentiates brokers from Kafka Private Link Service access points. Plan a port range for EXTERNAL advertised listeners of all brokers, for example, `range from 9093`. These ports do not have to be actual ports listened to by brokers. They are ports listened to by the load balancer for Private Link Service that will forward requests to different brokers. + - `AZ ID` in **Kafka Advertised Listener Pattern** indicates where the broker is deployed. TiDB Cloud will route requests to different endpoint DNS names based on the AZ ID. + + It is recommended to configure different broker IDs for different brokers to make it easy for troubleshooting. + +2. Use SSH to log in to each broker node. Modify the configuration file of each broker with the following content: + + ```properties + # brokers in usw2-az1 + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern for AZ(ID: usw2-az1) is ".usw2-az1.unique_name.aws.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093", replace with "b" prefix plus "node.id" properties, replace with a unique port(9093) in EXTERNAL advertised listener ports range + advertised.listeners=...,EXTERNAL://b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + + ```properties + # brokers in usw2-az2 + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern for AZ(ID: usw2-az2) is ".usw2-az2.unique_name.aws.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094". Replace with "b" prefix plus "node.id" properties, and replace with a unique port(9094) in EXTERNAL advertised listener ports range. + advertised.listeners=...,EXTERNAL://b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + + ```properties + # brokers in usw2-az3 + + # Add EXTERNAL listener + listeners=INTERNAL:...,EXTERNAL://0.0.0.0:39092 + + # Add EXTERNAL advertised listeners based on the "Kafka Advertised Listener Pattern" in "Prerequisites" section + # 1. The pattern for AZ(ID: usw2-az3) is ".usw2-az3.unique_name.aws.plc.tidbcloud.com:" + # 2. So the EXTERNAL can be "b2.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095". Replace with "b" prefix plus "node.id" properties, and replace with a unique port(9095) in EXTERNAL advertised listener ports range. + advertised.listeners=...,EXTERNAL://b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095 + + # Configure EXTERNAL map + listener.security.protocol.map=...,EXTERNAL:PLAINTEXT + ``` + +3. After you reconfigure all the brokers, restart your Kafka brokers one by one. + +#### 2. Test EXTERNAL listener settings in your internal network + +You can download the Kafka and OpenJDK on your Kafka client node. + +```shell +# Download Kafka and OpenJDK, and then extract the files. You can choose the binary version based on your preference. +wget https://archive.apache.org/dist/kafka/3.7.1/kafka_2.13-3.7.1.tgz +tar -zxf kafka_2.13-3.7.1.tgz +wget https://download.java.net/java/GA/jdk22.0.2/c9ecb94cd31b495da20a27d4581645e8/9/GPL/openjdk-22.0.2_linux-x64_bin.tar.gz +tar -zxf openjdk-22.0.2_linux-x64_bin.tar.gz +``` + +Execute the following script to test if the bootstrap works as expected. + +```shell +export JAVA_HOME=/home/ec2-user/jdk-22.0.2 + +# Bootstrap from the EXTERNAL listener +./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {one_of_broker_ip}:39092 + +# Expected output for the last 3 lines (the actual order might be different) +# There will be some exceptions or errors because advertised listeners cannot be resolved in your Kafka network. +# We will make them resolvable on the TiDB Cloud side and route requests to the right broker when you create a changefeed that connects to this Kafka cluster via Private Link. +b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException +``` + +## Step 2. Expose the Kafka cluster as a private link service + +### 1. Set up the load balancer + +Create a network load balancer with four target groups with different ports. One target group is for bootstrap, and the others will map to different brokers. + +1. bootstrap target group => 9092 => broker-node1:39092,broker-node2:39092,broker-node3:39092 +2. broker target group 1 => 9093 => broker-node1:39092 +3. broker target group 2 => 9094 => broker-node2:39092 +4. broker target group 3 => 9095 => broker-node3:39092 + +If you have more broker role nodes, you need to add more mappings. Ensure that you have at least one node in the bootstrap target group. It is recommended to add three nodes, one for each AZ for resilience. + +Do the following to set up the load balancer: + +1. Go to [Target groups](https://console.aws.amazon.com/ec2/home#CreateTargetGroup:) to create four target groups. + + - Bootstrap target group + + - **Target type**: `Instances` + - **Target group name**: `bootstrap-target-group` + - **Protocol**: `TCP` + - **Port**: `9092` + - **IP address type**: `IPv4` + - **VPC**: `Kafka VPC` + - **Health check protocol**: `TCP` + - **Register targets**: `broker-node1:39092`, `broker-node2:39092`, `broker-node3:39092` + + - Broker target group 1 + + - **Target type**: `Instances` + - **Target group name**: `broker-target-group-1` + - **Protocol**: `TCP` + - **Port**: `9093` + - **IP address type**: `IPv4` + - **VPC**: `Kafka VPC` + - **Health check protocol**: `TCP` + - **Register targets**: `broker-node1:39092` + + - Broker target group 2 + + - **Target type**: `Instances` + - **Target group name**: `broker-target-group-2` + - **Protocol**: `TCP` + - **Port**: `9094` + - **IP address type**: `IPv4` + - **VPC**: `Kafka VPC` + - **Health check protocol**: `TCP` + - **Register targets**: `broker-node2:39092` + + - Broker target group 3 + + - **Target type**: `Instances` + - **Target group name**: `broker-target-group-3` + - **Protocol**: `TCP` + - **Port**: `9095` + - **IP address type**: `IPv4` + - **VPC**: `Kafka VPC` + - **Health check protocol**: `TCP` + - **Register targets**: `broker-node3:39092` + +2. Go to [Load balancers](https://console.aws.amazon.com/ec2/home#LoadBalancers:) to create a network load balancer. + + - **Load balancer name**: `kafka-lb` + - **Schema**: `Internal` + - **Load balancer IP address type**: `IPv4` + - **VPC**: `Kafka VPC` + - **Availability Zones**: + - `usw2-az1` with `broker-usw2-az1 subnet` + - `usw2-az2` with `broker-usw2-az2 subnet` + - `usw2-az3` with `broker-usw2-az3 subnet` + - **Security groups**: create a new security group with the following rules. + - Inbound rule allows all TCP from Kafka VPC: Type - `{ports of target groups}`, for example, `9092-9095`; Source - `{CIDR of TiDB Cloud}`. To get the CIDR of TiDB Cloud in the region, switch to your target project using the combo box in the upper-left corner of the [TiDB Cloud console](https://tidbcloud.com), click **Project Settings** > **Network Access** in the left navigation pane, and then click **Project CIDR** > **AWS**. + - Outbound rule allows all TCP to Kafka VPC: Type - `All TCP`; Destination - `Anywhere-IPv4` + - Listeners and routing: + - Protocol: `TCP`; Port: `9092`; Forward to: `bootstrap-target-group` + - Protocol: `TCP`; Port: `9093`; Forward to: `broker-target-group-1` + - Protocol: `TCP`; Port: `9094`; Forward to: `broker-target-group-2` + - Protocol: `TCP`; Port: `9095`; Forward to: `broker-target-group-3` + +3. Test the load balancer in the bastion node. This example only tests the Kafka bootstrap. Because the load balancer is listening on the Kafka EXTERNAL listener, the addresses of EXTERNAL advertised listeners can not be resolved in the bastion node. Note down the `kafka-lb` DNS name from the load balancer detail page, for example `kafka-lb-77405fa57191adcb.elb.us-west-2.amazonaws.com`. Execute the script in the bastion node. + + ```shell + # Replace {lb_dns_name} to your actual value + export JAVA_HOME=/home/ec2-user/jdk-22.0.2 + ./kafka_2.13-3.7.1/bin/kafka-broker-api-versions.sh --bootstrap-server {lb_dns_name}:9092 + + # Expected output for the last 3 lines (the actual order might be different) + b1.usw2-az1.unique_name.aws.plc.tidbcloud.com:9093 (id: 1 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b2.usw2-az2.unique_name.aws.plc.tidbcloud.com:9094 (id: 2 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + b3.usw2-az3.unique_name.aws.plc.tidbcloud.com:9095 (id: 3 rack: null) -> ERROR: org.apache.kafka.common.errors.DisconnectException + + # You can also try bootstrap in other ports 9093/9094/9095. It will succeed probabilistically because NLB in AWS resolves LB DNS to the IP address of any availability zone and disables cross-zone load balancing by default. + # If you enable cross-zone load balancing in LB, it will succeed. However, it is unnecessary and might cause additional cross-AZ traffic. + ``` + +### 2. Set up an AWS endpoint service + +1. Go to [Endpoint service](https://console.aws.amazon.com/vpcconsole/home#EndpointServices:). Click **Create endpoint service** to create a Private Link service for the Kafka load balancer. + + - **Name**: `kafka-pl-service` + - **Load balancer type**: `Network` + - **Load balancers**: `kafka-lb` + - **Included Availability Zones**: `usw2-az1`,`usw2-az2`, `usw2-az3` + - **Require acceptance for endpoint**: `Acceptance required` + - **Enable private DNS name**: `No` + +2. Note down the **Service name**. You need to provide it to TiDB Cloud, for example `com.amazonaws.vpce.us-west-2.vpce-svc-0f49e37e1f022cd45`. + +3. On the detail page of the kafka-pl-service, click the **Allow principals** tab, and then add the AWS account ID that you obtained in [Prerequisites](#prerequisites) to the allowlist, for example, `arn:aws:iam:::root`. + +## Step 3. Create a private link connection in TiDB Cloud + +To create a private link connection in TiDB Cloud, do the following: + +1. Create a private link connection in TiDB Cloud using the AWS endpoint service name you obtained from [Step 2](#2-set-up-an-aws-endpoint-service) (for example, `com.amazonaws.vpce..vpce-svc-xxxx`). + + For more information, see [Create an AWS Endpoint Service private link connection](/tidb-cloud/serverless-private-link-connection.md#create-an-aws-endpoint-service-private-link-connection). + +2. Attach domains to the private link connection so that dataflow services in TiDB Cloud can access the Kafka cluster. + + For more information, see [Attach domains to a private link connection](/tidb-cloud/serverless-private-link-connection.md#attach-domains-to-a-private-link-connection). Note that in the **Attach Domains** dialog, you need to choose **TiDB Cloud Managed** as the domain type, and copy the unique name of the generated domain for later use. + +## Step 4. Replace the unique name placeholder in Kafka configuration + +1. Go back to your Kafka broker nodes, replace the `unique_name` placeholder in `advertised.listeners` configuration of each broker with the actual unique name you get from the previous step. +2. After you reconfigure all the brokers, restart your Kafka brokers one by one. + +Now, you can use this private link connection and 9092 as the bootstrap port to connect to your Kafka cluster from TiDB Cloud. diff --git a/tidb-cloud/serverless-private-link-connection.md b/tidb-cloud/serverless-private-link-connection.md new file mode 100644 index 0000000000000..61d43b530d8b5 --- /dev/null +++ b/tidb-cloud/serverless-private-link-connection.md @@ -0,0 +1,286 @@ +--- +title: Private Link Connections for Dataflow +summary: Learn how to set up private link connections for Dataflow. +--- + +# Private Link Connections for Dataflow + +Dataflow services in TiDB Cloud, such as Changefeed and Data Migration (DM), require reliable connectivity to external resources such as RDS instances and Kafka clusters. While public endpoints are supported, private link connections provide a superior alternative by offering higher efficiency, lower latency, and enhanced security. + +Private link connections enable direct connectivity between {{{ .essential }}} and your target resources. This ensures that data traveling from TiDB Cloud to your databases on other cloud platforms remains entirely within private network boundaries, significantly reducing the network attack surface and ensuring consistent throughput for critical dataflow workloads. + +## Private link connection types + +Private link connections for dataflow are available in different types, depending on the cloud provider and the service you want to access. Each type enables secure and private network access between your TiDB Cloud cluster and external resources (for example, RDS or Kafka) in the same cloud environment. + +### AWS Endpoint Service + +This type of private link connection enables TiDB Cloud clusters on **AWS** to connect to your [AWS endpoint service](https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html) powered by AWS PrivateLink. + +The private link connection can access various AWS services, such as RDS instances and Kafka services, by associating them with the endpoint service. + +### Alibaba Cloud Endpoint Service + +This type of private link connection enables TiDB Cloud clusters on **Alibaba Cloud** to connect to your [Alibaba Cloud endpoint service](https://www.alibabacloud.com/help/en/privatelink/share-your-service/#51976edba8no7) powered by Alibaba Cloud PrivateLink. + +The private link connection can access various Alibaba Cloud services, such as RDS instances and Kafka services, by associating them with the endpoint service. + +## Create an AWS Endpoint Service private link connection + +You can create an AWS Endpoint Service private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + +Ensure that the AWS endpoint service: + +- Resides in the same region as your TiDB Cloud cluster. +- Add the TiDB Cloud account ID to the **Allow principals** list. +- Has availability zones that overlap with your TiDB Cloud cluster. + +You can get the account ID and availability zones information at the bottom of the **Create Private Link Connection** dialog, or by running the following command: + +```shell +ticloud serverless private-link-connection zones --cluster-id +``` + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **Networking** in the left navigation pane. + +3. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. + +4. In the **Create Private Link Connection** dialog, enter the required information: + + - **Private Link Connection Name**: enter a name for the private link connection. + - **Connection Type**: select **AWS Endpoint Service**. If this option is not displayed, ensure that your cluster is created on AWS. + - **Endpoint Service Name**: enter your AWS endpoint service name, for example, `com.amazonaws.vpce..vpce-svc-xxxxxxxxxxxxxxxxx`. + +5. Click **Create**. + +6. Go to the detail page of your endpoint service on the [AWS console](https://console.aws.amazon.com). In the **Endpoint Connections** tab, accept the endpoint connection request from TiDB Cloud. + +
+ +
+ +To create a private link connection using the TiDB Cloud CLI: + +1. Run the following command: + + ```shell + ticloud serverless private-link-connection create -c --display-name --type AWS_ENDPOINT_SERVICE --aws.endpoint-service-name + ``` + +2. Go to the detail page of your endpoint service on the [AWS console](https://console.aws.amazon.com). In the **Endpoint Connections** tab, accept the endpoint connection request from TiDB Cloud. + +
+
+ +## Create an Alibaba Cloud Endpoint Service private link connection + +You can create an Alibaba Cloud Endpoint Service private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + +Ensure that the Alibaba Cloud endpoint service: + +- Resides in the same region as your TiDB Cloud cluster. +- Add the TiDB Cloud account ID to the **Service Whitelist**. +- Has availability zones that overlap with your TiDB Cloud cluster. + +You can get the account ID and available zones information at the bottom of the **Create Private Link Connection** dialog, or by running the following command: + +```shell +ticloud serverless private-link-connection zones --cluster-id +``` + + +
+ +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **Networking** in the left navigation pane. + +3. In the **Private Link Connection For Dataflow** area, click **Create Private Link Connection**. + +4. In the **Create Private Link Connection** dialog, enter the required information: + + - **Private Link Connection Name**: enter a name for the private link connection. + - **Connection Type**: select **Alibaba Cloud Endpoint Service**. If this option is not displayed, ensure that your cluster is created on Alibaba Cloud. + - **Endpoint Service Name**: enter the Alibaba Cloud endpoint service name, for example, `com.aliyuncs.privatelink..epsrv-xxxxxxxxxxxxxxxxx`. + +5. Click **Create**. + +6. Go to the detail page of your endpoint service on the [Alibaba Cloud console](https://console.alibabacloud.com). In the **Endpoint Connections** tab, allow the endpoint connection request from TiDB Cloud. + +
+ +
+ +To create a private link connection using the TiDB Cloud CLI: + +1. Run the following command: + + ```shell + ticloud serverless private-link-connection create -c --display-name --type ALICLOUD_ENDPOINT_SERVICE --alicloud.endpoint-service-name + ``` + +2. Go to the detail page of your endpoint service on the [Alibaba Cloud console](https://console.alibabacloud.com). In the **Endpoint Connections** tab, allow the endpoint connection request from TiDB Cloud. + +
+
+ +## Attach domains to a private link connection + +You can attach domains to a private link connection. When a domain is attached to the private link connection, all traffic from TiDB Cloud dataflow services to this domain will be routed to this private link connection. It is useful when your service provides custom domains to clients at runtime, such as Kafka advertised listeners. + +Different private link connection types support attaching different domain types. The following table shows supported domain types for each private link connection type. + +| Private link connection type | Supported domain type | +|--------------------------------|-------------------------------------------| +| AWS Endpoint Service |
  • TiDB Cloud managed (`aws.tidbcloud.com`)
  • Confluent Dedicated (`aws.confluent.cloud`)
| +| Alibaba Cloud Endpoint Service | TiDB Cloud managed (`alicloud.tidbcloud.com`) | + +If your domain is not included in this table, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) to request support. + +You can attach domains to a private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +To attach domains to a private link connection using the TiDB Cloud console, do the following: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **Networking** in the left navigation pane. + +3. In the **Private Link Connection For Dataflow** area, choose the target private link connection, and then click **...**. + +4. Click **Attach Domains**. + +5. In the **Attach Domains** dialog, choose the domain type: + + - **TiDB Cloud Managed**: the domains will be generated automatically by TiDB Cloud. In the name of a generated domain, you can get the unique name for the domain. For example, if a generated domain is `*.use1-az1.dvs6nl5jgveztmla3pxkxgh76i.aws.plc.tidbcloud.com`, then the unique name is `dvs6nl5jgveztmla3pxkxgh76i`. Click **Attach Domains** to confirm. + - **Confluent Cloud**: enter the unique name provided by the Confluent Cloud Dedicated cluster to generate the domains, and then click **Attach Domains** to confirm. Refer to [Connect to Confluent Cloud via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md#step-1-set-up-a-confluent-cloud-network) for more information about how to get the unique name. + +
+ +
+ +To attach a TiDB Cloud managed domain using the TiDB Cloud CLI, do the following: + +1. Use `dry run` to preview the domains to be attached. It outputs a unique name for the next step. + + ```shell + ticloud serverless private-link-connection attach-domains -c --private-link-connection-id --type TIDBCLOUD_MANAGED --dry-run + ``` + +2. Attach the domains with the unique name from the previous step. + + ```shell + ticloud serverless private-link-connection attach-domains -c --private-link-connection-id --type TIDBCLOUD_MANAGED --unique-name + ``` + +To attach a Confluent Cloud domain, run the following command: + +```shell +ticloud serverless private-link-connection attach-domains -c --private-link-connection-id --type CONFLUENT --unique-name +``` + +
+
+ +## Detach domains from a private link connection + +You can detach domains from a private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +To detach domains from a private link connection using the TiDB Cloud console, do the following: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **Networking** in the left navigation pane. + +3. In the **Private Link Connection For Dataflow** area, choose the target private link connection, and then click **...**. + +4. Click **Detach Domains**, and then confirm the detachment. + +
+ +
+ +To detach domains from a private link connection using the TiDB Cloud CLI, do the following: + +1. Get the private link connection details to find the `attach-domain-id`: + + ```shell + ticloud serverless private-link-connection get -c --private-link-connection-id + ``` + +2. Detach the domain by the `attach-domain-id`: + + ```shell + ticloud serverless private-link-connection detach-domains -c --private-link-connection-id --attach-domain-id + ``` + +
+
+ +## Delete a private link connection + +You can delete a private link connection using the TiDB Cloud console or the TiDB Cloud CLI. + + +
+ +To delete a private link connection using the TiDB Cloud console, do the following: + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/) and navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +2. Click the name of your target cluster to go to its overview page, and then click **Settings** > **Networking** in the left navigation pane. + +3. In the **Private Link Connection For Dataflow** area, choose the target private link connection, and then click **...**. + +4. Click **Delete**, and then confirm the deletion. + +
+ +
+ +To delete a private link connection, run the following command: + +```shell +ticloud serverless private-link-connection delete -c --private-link-connection-id +``` + +
+
+ +## See also + +- [Connect to Confluent Cloud via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-aws-confluent.md) +- [Connect to Amazon RDS via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-aws-rds.md) +- [Connect to Alibaba Cloud ApsaraDB RDS for MySQL via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-alicloud-rds.md) +- [Connect to AWS Self-Hosted Kafka via Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-aws.md) +- [Connect to Alibaba Cloud Self-Hosted Kafka via a Private Link Connection](/tidb-cloud/serverless-private-link-connection-to-self-hosted-kafka-in-alicloud.md) diff --git a/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md b/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md new file mode 100644 index 0000000000000..4af012f718bad --- /dev/null +++ b/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md @@ -0,0 +1,64 @@ +--- +title: Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint +summary: Learn how to connect to your TiDB Cloud cluster via Alibaba Cloud private endpoint. +--- + +# Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint + +This tutorial walks you through the steps to connect to your {{{ .starter }}} or Essential cluster via a private endpoint on Alibaba Cloud. Connecting through a private endpoint allows secure and private communication between your services and your TiDB Cloud cluster without using the public internet. + +> **Tip:** +> +> To learn how to connect to a {{{ .starter }}} or Essential cluster via AWS PrivateLink, see [Connect to TiDB Cloud via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + +## Restrictions + +- Currently, {{{ .starter }}} and {{{ .essential }}} support private endpoint connections when the endpoint service is hosted on AWS or Alibaba Cloud. If the service is hosted on another cloud provider, the private endpoint is not applicable. +- Cross-region private endpoint connections is not supported. + +## Set up a private endpoint with Alibaba Cloud + +To connect to your {{{ .starter }}} or {{{ .essential }}} cluster via a private endpoint, follow these steps: + +1. [Choose a TiDB cluster](#step-1-choose-a-tidb-cluster) +2. [Create a private endpoint on Alibaba Cloud](#step-2-create-a-private-endpoint-on-alibaba-cloud) +3. [Connect to your TiDB cluster using the private endpoint](#step-3-connect-to-your-tidb-cluster-using-the-private-endpoint) + +### Step 1. Choose a TiDB cluster + +1. On the [**Clusters**](https://{{{.console-url}}}/project/clusters) page, click the name of your target TiDB Cloud cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. +3. In the **Connection Type** drop-down list, select **Private Endpoint**. +4. Take a note of **Service Name**, **Availability Zone ID**, and **Region ID**. + +### Step 2. Create a private endpoint on Alibaba Cloud + +To use the Alibaba Cloud Management Console to create a VPC interface endpoint, perform the following steps: + +1. Sign in to the [Alibaba Cloud Management Console](https://account.alibabacloud.com/login/login.htm). +2. Navigate to **VPC** > **Endpoints**. +3. Under the **Interface Endpoints** tab, click **Create Endpoint**. +4. Fill out the endpoint information: + - **Region**: select the same region as your TiDB Cloud cluster. + - **Endpoint Name**: choose a name for the endpoint. + - **Endpoint Type**: select **Interface Endpoint**. + - **Endpoint Service**: select **Other Endpoint Services**. + +5. In the **Endpoint Service Name** field, paste the service name you copied from TiDB Cloud. +6. Click **Verify**. A green check will appear if the service is valid. +7. Choose the **VPC**, **Security Group**, and **Zone** to use for the endpoint. +8. Click **OK** to create the endpoint. +9. Wait for the endpoint status to become **Active** and the connection status to become **Connected**. + +### Step 3: Connect to your TiDB cluster using the private endpoint + +After you have created the interface endpoint, go back to the TiDB Cloud console and take the following steps: + +1. On the [**Clusters**](https://{{{.console-url}}}/project/clusters) page, click the name of your target cluster to go to its overview page. +2. Click **Connect** in the upper-right corner. A connection dialog is displayed. +3. In the **Connection Type** drop-down list, select **Private Endpoint**. +4. In the **Connect With** drop-down list, select your preferred connection method. The corresponding connection string is displayed at the bottom of the dialog. + + For the host, go to the **Endpoint Details** page in Alibaba Cloud, and copy the **Domain Name of Endpoint Service** as your host. + +5. Connect to your cluster with the connection string. diff --git a/tidb-cloud/set-up-private-endpoint-connections-on-azure.md b/tidb-cloud/set-up-private-endpoint-connections-on-azure.md index 58b1621a761ba..cb0d0d68e08a9 100644 --- a/tidb-cloud/set-up-private-endpoint-connections-on-azure.md +++ b/tidb-cloud/set-up-private-endpoint-connections-on-azure.md @@ -7,11 +7,27 @@ summary: Learn how to connect to TiDB Cloud Dedicated Cluster via Azure Private This document describes how to connect to your TiDB Cloud Dedicated cluster via [Azure Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-overview). + + > **Tip:** > > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with AWS, see [Connect to a TiDB Cloud Dedicated Cluster via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md). > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Google Cloud, see [Connect to a TiDB Cloud Dedicated Cluster via Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) -> - To learn how to connect to a TiDB Cloud Serverless cluster via private endpoint, see [Connect to TiDB Cloud Serverless via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, see the following documents: +> - [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) +> - [Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + + + + + +> **Tip:** +> +> - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with AWS, see [Connect to a TiDB Cloud Dedicated Cluster via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md). +> - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Google Cloud, see [Connect to a TiDB Cloud Dedicated Cluster via Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md) +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, see [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + + TiDB Cloud supports highly secure and one-way access to the TiDB Cloud service hosted in an Azure virtual network via [Azure Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-overview), as if the service were in your own virtual network. You can create a private endpoint in your virtual network, and then connect to the TiDB Cloud service via the endpoint with permission. diff --git a/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md b/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md index 9bebab77f3538..82cfad4bcb797 100644 --- a/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md +++ b/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md @@ -7,11 +7,27 @@ summary: Learn how to connect to your TiDB Cloud cluster via Google Cloud Privat This document describes how to connect to your TiDB Cloud Dedicated cluster via [Private Service Connect](https://cloud.google.com/vpc/docs/private-service-connect). Google Cloud Private Service Connect is a private endpoint service provided by Google Cloud. + + > **Tip:** > > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with AWS, see [Connect to a TiDB Cloud Dedicated Cluster via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md). > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Azure, see [Connect to a TiDB Cloud Dedicated Cluster via Azure Private Link](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md). -> - To learn how to connect to a TiDB Cloud Serverless cluster via private endpoint, see [Connect to TiDB Cloud Serverless via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, see the following documents: +> - [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) +> - [Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + + + + + +> **Tip:** +> +> - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with AWS, see [Connect to a TiDB Cloud Dedicated Cluster via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections.md). +> - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Azure, see [Connect to a TiDB Cloud Dedicated Cluster via Azure Private Link](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md). +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, see [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). + + TiDB Cloud supports highly secure and one-way access to the TiDB Cloud service hosted in a Google Cloud VPC via [Private Service Connect](https://cloud.google.com/vpc/docs/private-service-connect). You can create an endpoint and use it to connect to the TiDB Cloud service . @@ -32,7 +48,12 @@ For more detailed definitions of the private endpoint and endpoint service, see - Only the `Organization Owner` and `Project Owner` roles can create Google Cloud Private Service Connect endpoints. - Each TiDB cluster can handle connections from up to 10 endpoints. - Each Google Cloud project can have up to 10 endpoints connecting to a TiDB Cluster. -- You can create up to 8 TiDB Cloud Dedicated clusters hosted on Google Cloud in a project with the endpoint service configured. +- Starting August 12, 2025, the maximum number of Google Private Service Connect (PSC) connections you can create per region for TiDB Cloud Dedicated clusters on Google Cloud depends on the NAT subnet CIDR block size: + - `/20`: up to 7 PSC connections per region + - `/19`: up to 23 PSC connections per region + - `/18`: up to 55 PSC connections per region + - `/17`: up to 119 PSC connections per region + - `/16`: up to 247 PSC connections per region - The private endpoint and the TiDB cluster to be connected must be located in the same region. - Egress firewall rules must permit traffic to the internal IP address of the endpoint. The [implied allow egress firewall rule](https://cloud.google.com/firewall/docs/firewalls#default_firewall_rules) permits egress to any destination IP address. - If you have created egress deny firewall rules in your VPC network, or if you have created hierarchical firewall policies that modify the implied allowed egress behavior, access to the endpoint might be affected. In this case, you need to create a specific egress allow firewall rule or policy to permit traffic to the internal destination IP address of the endpoint. diff --git a/tidb-cloud/set-up-private-endpoint-connections-serverless.md b/tidb-cloud/set-up-private-endpoint-connections-serverless.md index 15065dd1ba09f..a55c18dd54508 100644 --- a/tidb-cloud/set-up-private-endpoint-connections-serverless.md +++ b/tidb-cloud/set-up-private-endpoint-connections-serverless.md @@ -1,11 +1,11 @@ --- -title: Connect to TiDB Cloud Serverless via Private Endpoint +title: Connect to {{{ .starter }}} or Essential via AWS PrivateLink summary: Learn how to connect to your TiDB Cloud cluster via private endpoint. --- -# Connect to TiDB Cloud Serverless via Private Endpoint +# Connect to {{{ .starter }}} or Essential via AWS PrivateLink -This document describes how to connect to your TiDB Cloud Serverless cluster via private endpoint. +This document describes how to connect to your {{{ .starter }}} or {{{ .essential }}} cluster via AWS PrivateLink. > **Tip:** > @@ -28,8 +28,8 @@ For more detailed definitions of the private endpoint and endpoint service, see ## Restrictions -- Currently, TiDB Cloud supports private endpoint connection to TiDB Cloud Serverless only when the endpoint service is hosted in AWS. If the service is hosted in Google Cloud, the private endpoint is not applicable. -- Private endpoint connection across regions is not supported. +- Currently, TiDB Cloud supports AWS PrivateLink connections only when the endpoint service is hosted in AWS. If the service is hosted in other cloud providers, the AWS PrivateLink connection is not applicable. +- Cross-region private endpoint connections is not supported. ## Prerequisites @@ -37,7 +37,7 @@ Make sure that DNS hostnames and DNS resolution are both enabled in your AWS VPC ## Set up a private endpoint with AWS -To connect to your TiDB Cloud Serverless cluster via a private endpoint, follow these steps: +To connect to your {{{ .starter }}} or {{{ .essential }}} cluster via a private endpoint, follow these steps: 1. [Choose a TiDB cluster](#step-1-choose-a-tidb-cluster) 2. [Create an AWS interface endpoint](#step-2-create-an-aws-interface-endpoint) @@ -45,14 +45,14 @@ To connect to your TiDB Cloud Serverless cluster via a private endpoint, follow ### Step 1. Choose a TiDB cluster -1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the name of your target TiDB Cloud Serverless cluster to go to its overview page. +1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the name of your target {{{ .starter }}} or {{{ .essential }}} cluster to go to its overview page. 2. Click **Connect** in the upper-right corner. A connection dialog is displayed. 3. In the **Connection Type** drop-down list, select **Private Endpoint**. 4. Take a note of **Service Name**, **Availability Zone ID**, and **Region ID**. > **Note:** > - > You only need to create one private endpoint per AWS region, which can be shared by all TiDB Cloud Serverless clusters located in the same region. + > You only need to create one private endpoint per AWS region, which can be shared by all {{{ .starter }}} or {{{ .essential }}} clusters located in the same region. ### Step 2. Create an AWS interface endpoint diff --git a/tidb-cloud/set-up-private-endpoint-connections.md b/tidb-cloud/set-up-private-endpoint-connections.md index 18f74fa20250e..ecc47ba6cca76 100644 --- a/tidb-cloud/set-up-private-endpoint-connections.md +++ b/tidb-cloud/set-up-private-endpoint-connections.md @@ -9,7 +9,7 @@ This document describes how to connect to your TiDB Cloud Dedicated cluster via > **Tip:** > -> - To learn how to connect to a TiDB Cloud Serverless cluster via private endpoint, see [Connect to TiDB Cloud Serverless via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). +> - To learn how to connect to a {{{ .starter }}} or {{{ .essential }}} cluster via AWS PrivateLink, see [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md). > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Azure, see [Connect to a TiDB Cloud Dedicated Cluster via Azure Private Link](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md). > - To learn how to connect to a TiDB Cloud Dedicated cluster via private endpoint with Google Cloud, see [Connect to a TiDB Cloud Dedicated Cluster via Google Cloud Private Service Connect](/tidb-cloud/set-up-private-endpoint-connections-on-google-cloud.md). @@ -28,8 +28,8 @@ For more detailed definitions of the private endpoint and endpoint service, see ## Restrictions -- Only the `Organization Owner` and the `Project Owner` roles can create private endpoints. -- The private endpoint and the TiDB cluster to be connected must be located in the same region. +- Only users with the `Organization Owner` or `Project Owner` role can create private endpoints. +- The private endpoint and the TiDB cluster you want to connect to must be located in the same region. In most scenarios, you are recommended to use private endpoint connection over VPC peering. However, in the following scenarios, you should use VPC peering instead of private endpoint connection: @@ -120,7 +120,7 @@ To use the AWS Management Console to create a VPC interface endpoint, perform th > **Note:** > - > Make sure the selected security group allows inbound access from your EC2 instances on Port 4000 or a customer-defined port. + > Make sure the selected security group allows inbound access from your EC2 instances on port `4000` or a customer-defined port. 9. Click **Create endpoint**. @@ -153,7 +153,7 @@ To enable private DNS using your AWS CLI, copy the following `aws ec2 modify-vpc aws ec2 modify-vpc-endpoint --vpc-endpoint-id ${your_vpc_endpoint_id} --private-dns-enabled ``` -Alternatively, you can find the command on the **Networking** page of your cluster. Locate the private endpoint and click **...*** > **Enable DNS** in the **Action** column. +Alternatively, you can find the command on the **Networking** page of your cluster. Locate the private endpoint and click **...** > **Enable DNS** in the **Action** column.
@@ -184,7 +184,7 @@ After you have accepted the private endpoint connection, you are redirected back ### Private endpoint status reference -When you use private endpoint connections, the statuses of private endpoints or private endpoint services are displayed on the following pages: +When you use private endpoint connections, the statuses of private endpoints and private endpoint services are displayed on the following pages: - Cluster-level **Networking** page: switch to your target cluster using the combo box in the upper-left corner, and then click **Settings** > **Networking** in the left navigation pane. - Project-level **Network Access** page: switch to your target project using the combo box in the upper-left corner, and then click **Project Settings** > **Network Access** in the left navigation pane. @@ -195,7 +195,7 @@ The possible statuses of a private endpoint are explained as follows: - **Pending**: Waiting for processing. - **Active**: Your private endpoint is ready to use. You cannot edit the private endpoint of this status. - **Deleting**: The private endpoint is being deleted. -- **Failed**: The private endpoint creation fails. You can click **Edit** of that row to retry the creation. +- **Failed**: The private endpoint creation fails. You can click **Edit** in that row to retry the creation. The possible statuses of a private endpoint service are explained as follows: @@ -207,6 +207,6 @@ The possible statuses of a private endpoint service are explained as follows: ### I cannot connect to a TiDB cluster via a private endpoint after enabling private DNS. Why? -You might need to properly set the security group for your VPC endpoint in the AWS Management Console. Go to **VPC** > **Endpoints**. Right-click your VPC endpoint and select the proper **Manage security groups**. A proper security group within your VPC that allows inbound access from your EC2 instances on Port 4000 or a customer-defined port. +You might need to properly set the security group for your VPC endpoint in the AWS Management Console. Go to **VPC** > **Endpoints**. To do so, go to **VPC** > **Endpoints**, right-click your VPC endpoint, and select **Manage security groups**. Ensure that the selected security group allows inbound access from your EC2 instances on port `4000` or a customer-defined port. ![Manage security groups](/media/tidb-cloud/private-endpoint/manage-security-groups.png) diff --git a/tidb-cloud/set-up-sink-private-endpoint.md b/tidb-cloud/set-up-sink-private-endpoint.md new file mode 100644 index 0000000000000..b70b45da96ce0 --- /dev/null +++ b/tidb-cloud/set-up-sink-private-endpoint.md @@ -0,0 +1,127 @@ +--- +title: Set Up Private Endpoint for Changefeeds +summary: Learn how to set up a private endpoint for changefeeds. +--- + +# Set Up Private Endpoint for Changefeeds + +This document describes how to create a private endpoint for changefeeds in your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, enabling you to securely stream data to self-hosted Kafka or MySQL through private connectivity. + +## Restrictions + +Within the same VPC, each Private Endpoint Service in AWS, Service Attachment in Google Cloud, or Private Link Service in Azure can have up to 5 private endpoints. If this limit is exceeded, remove any unused private endpoints before creating new ones. + +## Prerequisites + +- Check permissions for private endpoint creation +- Set up your network connection + +### Permissions + +Only users with any of the following roles in your organization can create private endpoints for changefeeds: + +- `Organization Owner` +- `Project Owner` +- `Project Data Access Read-Write` + +For more information about roles in TiDB Cloud, see [User roles](/tidb-cloud/manage-user-access.md#user-roles). + +### Network + +Private endpoints leverage **Private Link** or **Private Service Connect** technologies from cloud providers, enabling resources in your VPC to connect to services in other VPCs through private IP addresses, as if those services were hosted directly within your VPC. + + +
+ +If your changefeed downstream service is hosted on AWS, collect the following information: + +- The name of the Private Endpoint Service for your downstream service +- The availability zones (AZs) where your downstream service is deployed + +If the Private Endpoint Service is not available for your downstream service, follow [Step 2. Expose the Kafka cluster as Private Link Service](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md#step-2-expose-the-kafka-cluster-as-private-link-service) to set up the load balancer and the Private Link Service. + +
+ +
+ +If your changefeed downstream service is hosted on Google Cloud, collect the Service Attachment information of your downstream service. + +If Service Attachment is not available for your downstream service, follow [Step 2. Expose Kafka-proxy as Private Service Connect Service](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md#step-2-expose-kafka-proxy-as-private-service-connect-service) to get the Service Attachment information. + +
+ +
+ +If your changefeed downstream service is hosted on Azure, collect the alias of the Private Link Service of your downstream service. + +If the Private Endpoint Service is not available for your downstream service, follow [Step 2. Expose the Kafka cluster as Private Link Service](/tidb-cloud/setup-azure-self-hosted-kafka-private-link-service.md#step-2-expose-the-kafka-cluster-as-private-link-service) to set up the load balancer and the Private Link Service. + +
+
+ +## Step 1. Open the Networking page for your cluster + +1. Log in to the [TiDB Cloud console](https://tidbcloud.com/). + +2. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the name of your target cluster to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + +3. In the left navigation pane, click **Settings** > **Networking**. + +## Step 2. Configure the private endpoint for changefeeds + +The configuration steps vary depending on the cloud provider where your cluster is deployed. + + +
+ +1. On the **Networking** page, click **Create Private Endpoint** in the **AWS Private Endpoint for Changefeed** section. +2. In the **Create Private Endpoint for Changefeed** dialog, enter a name for the private endpoint. +3. Follow the reminder to authorize the [AWS Principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts) of TiDB Cloud to create an endpoint. +4. Enter the **Endpoint Service Name** that you collected in the [Network](#network) section. +5. Select the **Number of AZs**. Ensure that the number of AZs and the AZ IDs match your Kafka deployment. +6. If this private endpoint is created for Apache Kafka, enable the **Advertised Listener for Kafka** option. +7. Configure the advertised listener for Kafka using either the **TiDB Managed** domain or the **Custom** domain. + + - To use the **TiDB Managed** domain for advertised listeners, enter a unique string in the **Domain Pattern** field, and then click **Generate**. TiDB will generate broker addresses with subdomains for each availability zone. + - To use your own **Custom** domain for advertised listeners, switch the domain type to **Custom**, enter the root domain in the **Custom Domain** field, click **Check**, and then specify the broker subdomains for each availability zone. + +8. Click **Create** to validate the configurations and create the private endpoint. + +
+ +
+ +1. On the **Networking** page, click **Create Private Endpoint** in the **Google Cloud Private Endpoint for Changefeed** section. +2. In the **Create Private Endpoint for Changefeed** dialog, enter a name for the private endpoint. +3. Follow the reminder to authorize the [Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects) of TiDB Cloud to pre-approve endpoint creation, or manually approve the endpoint connection request when you receive it. +4. Enter the **Service Attachment** that you collected in the [Network](#network) section. +5. If this private endpoint is created for Apache Kafka, enable the **Advertised Listener for Kafka** option. +6. Configure the advertised listener for Kafka using either the **TiDB Managed** domain or the **Custom** domain. + + - To use the **TiDB Managed** domain for advertised listeners, enter a unique string in the **Domain Pattern** field, and then click **Generate**. TiDB will generate broker addresses with subdomains for each availability zone. + - To use your own **Custom** domain for advertised listeners, switch the domain type to **Custom**, enter the root domain in the **Custom Domain** field, click **Check**, and then specify the broker subdomains for each availability zone. + +7. Click **Create** to validate the configurations and create the private endpoint. + +
+ +
+ +1. On the **Networking** page, click **Create Private Endpoint** in the **Azure Private Endpoint for Changefeed** section. +2. In the **Create Private Endpoint for Changefeed** dialog, enter a name for the private endpoint. +3. Follow the reminder to authorize the Azure subscription of TiDB Cloud or allow anyone with your alias to access your Private Link service before creating the changefeed. For more information about Private Link service visibility, see [Control service exposure](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview#control-service-exposure) in Azure documentation. +4. Enter the **Alias of Private Link Service** that you collected in the [Network](#network) section. +5. If this private endpoint is created for Apache Kafka, enable the **Advertised Listener for Kafka** option. +6. Configure the advertised listener for Kafka using either the **TiDB Managed** domain or the **Custom** domain. + + - To use the **TiDB Managed** domain for advertised listeners, enter a unique string in the **Domain Pattern** field, and then click **Generate**. TiDB will generate broker addresses with subdomains for each availability zone. + - To use your own **Custom** domain for advertised listeners, switch the domain type to **Custom**, enter the root domain in the **Custom Domain** field, click **Check**, and then specify the broker subdomains for each availability zone. + +7. Click **Create** to validate the configurations and create the private endpoint. + +
+
diff --git a/tidb-cloud/set-up-vpc-peering-connections.md b/tidb-cloud/set-up-vpc-peering-connections.md index 87259ad7c34d1..59b41e57a50dc 100644 --- a/tidb-cloud/set-up-vpc-peering-connections.md +++ b/tidb-cloud/set-up-vpc-peering-connections.md @@ -7,7 +7,7 @@ summary: Learn how to connect to TiDB Cloud Dedicated via VPC peering. > **Note:** > -> VPC peering connection is only available for TiDB Cloud Dedicated clusters hosted on AWS and Google Cloud. You cannot use VPC peering to connect to [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on Azure and [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +> VPC peering connection is only available for TiDB Cloud Dedicated clusters hosted on AWS and Google Cloud. To connect your application to TiDB Cloud via VPC peering, you need to set up [VPC peering](/tidb-cloud/tidb-cloud-glossary.md#vpc-peering) with TiDB Cloud. This document walks you through setting up VPC peering connections [on AWS](#set-up-vpc-peering-on-aws) and [on Google Cloud](#set-up-vpc-peering-on-google-cloud) and connecting to TiDB Cloud via a VPC peering. diff --git a/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md b/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md index e183587662098..f76340f2f521a 100644 --- a/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md +++ b/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md @@ -23,7 +23,9 @@ The document provides an example of connecting to a Kafka Private Link service d ## Prerequisites -1. Ensure that you have the following authorization to set up a Kafka Private Link service in your own AWS account. + + +1. Ensure that you have the following authorization to set up a Kafka Private Link service in your own AWS account. - Manage EC2 nodes - Manage VPC @@ -48,6 +50,31 @@ The document provides an example of connecting to a Kafka Private Link service d 1. Input a unique random string. It can only include numbers or lowercase letters. You will use it to generate **Kafka Advertised Listener Pattern** later. 2. Click **Check usage and generate** to check if the random string is unique and generate **Kafka Advertised Listener Pattern** that will be used to assemble the EXTERNAL advertised listener for Kafka brokers. + + + +1. Ensure that you have the following authorization to set up a Kafka Private Link service in your own AWS account. + + - Manage EC2 nodes + - Manage VPC + - Manage subnets + - Manage security groups + - Manage load balancer + - Manage endpoint services + - Connect to EC2 nodes to configure Kafka nodes + +2. [Create a {{{ .premium }}} instance](/tidb-cloud/premium/create-tidb-instance-premium.md) if you do not have one. + +3. Get the Kafka deployment information from your {{{ .premium }}} instance. + + 1. In the [TiDB Cloud console](https://tidbcloud.com), navigate to the instance overview page of the TiDB instance, and then click **Data** > **Changefeed** in the left navigation pane. + 2. On the overview page, find the region of the TiDB instance. Ensure that your Kafka cluster will be deployed to the same region. + 3. To create a changefeed, refer to the tutorials: + + - [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md) + + + Note down all the deployment information. You need to use it to configure your Kafka Private Link service later. The following table shows an example of the deployment information. @@ -154,15 +181,15 @@ Take the following steps to create the Kafka VPC. Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:). Create the bastion node in the bastion subnet. - **Name**: `bastion-node` -- **Amazon Machine Image**: `Amazon linux` +- **Amazon Machine Image**: `Amazon Linux` - **Instance Type**: `t2.small` -- **Key pair**: `kafka-vpc-key-pair`. Create a new key pair named `kafka-vpc-key-pair`. Download **kafka-vpc-key-pair.pem** to your local for later configuration. +- **Key pair**: `kafka-vpc-key-pair`. Create a new key pair named `kafka-vpc-key-pair`. Download `kafka-vpc-key-pair.pem` to your local machine for later configuration. - Network settings - **VPC**: `Kafka VPC` - **Subnet**: `bastion` - **Auto-assign public IP**: `Enable` - - **Security Group**: create a new security group allow SSH login from anywhere. You can narrow the rule for safety in the production environment. + - **Security Group**: create a new security group to allow SSH login from anywhere. You can narrow the rule for safety in the production environment. **2.2. Create broker nodes** @@ -171,7 +198,7 @@ Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:) - Broker 1 in subnet `broker-usw2-az1` - **Name**: `broker-node1` - - **Amazon Machine Image**: `Amazon linux` + - **Amazon Machine Image**: `Amazon Linux` - **Instance Type**: `t2.large` - **Key pair**: reuse `kafka-vpc-key-pair` - Network settings @@ -187,7 +214,7 @@ Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:) - Broker 2 in subnet `broker-usw2-az2` - **Name**: `broker-node2` - - **Amazon Machine Image**: `Amazon linux` + - **Amazon Machine Image**: `Amazon Linux` - **Instance Type**: `t2.large` - **Key pair**: reuse `kafka-vpc-key-pair` - Network settings @@ -203,7 +230,7 @@ Go to the [EC2 Listing page](https://console.aws.amazon.com/ec2/home#Instances:) - Broker 3 in subnet `broker-usw2-az3` - **Name**: `broker-node3` - - **Amazon Machine Image**: `Amazon linux` + - **Amazon Machine Image**: `Amazon Linux` - **Instance Type**: `t2.large` - **Key pair**: reuse `kafka-vpc-key-pair` - Network settings @@ -523,8 +550,17 @@ LOG_DIR=$KAFKA_LOG_DIR nohup $KAFKA_START_CMD "$KAFKA_CONFIG_DIR/server.properti ### Reconfigure a running Kafka cluster + + Ensure that your Kafka cluster is deployed in the same region and AZs as the TiDB cluster. If any brokers are in different AZs, move them to the correct ones. + + + +Ensure that your Kafka cluster is deployed in the same region and AZs as the TiDB instance. If any brokers are in different AZs, move them to the correct ones. + + + #### 1. Configure the EXTERNAL listener for brokers The following configuration applies to a Kafka KRaft cluster. The ZK mode configuration is similar. @@ -729,7 +765,7 @@ Do the following to set up the load balancer: ## Step 3. Connect from TiDB Cloud -1. Return to the [TiDB Cloud console](https://tidbcloud.com) to create a changefeed for the cluster to connect to the Kafka cluster by **Private Link**. For more information, see [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md). +1. Return to the [TiDB Cloud console](https://tidbcloud.com) to create a changefeed for the clusterinstance to connect to the Kafka cluster by **Private Link**. For more information, see [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md). 2. When you proceed to **Configure the changefeed target > Connectivity Method > Private Link**, fill in the following fields with corresponding values and other fields as needed. diff --git a/tidb-cloud/size-your-cluster.md b/tidb-cloud/size-your-cluster.md index 43fb92bce9261..7584ae528aeb6 100644 --- a/tidb-cloud/size-your-cluster.md +++ b/tidb-cloud/size-your-cluster.md @@ -9,7 +9,7 @@ This document describes how to determine the size of a TiDB Cloud Dedicated clus > **Note:** > -> You cannot change the size of a [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster. +> You cannot change the size of a {{{ .starter }}} or {{{ .essential }}} cluster. ## Size TiDB @@ -39,6 +39,8 @@ The supported vCPU and RAM sizes include the following: > - The node count of TiDB can only be set to 1 or 2, and the node count of TiKV is fixed to 3. > - 4 vCPU TiDB can only be used with 4 vCPU TiKV. > - TiFlash is unavailable. +> +> The **4 vCPU, 16 GiB** size of TiDB is designed for learning, testing, and trial purposes. It is suitable for pre-production environments or small, non-critical workloads. However, it is **NOT** recommended for full-scale production due to performance limitations. If you need lower costs and an SLA guarantee for production, consider using the [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) cluster plan. ### TiDB node count @@ -103,6 +105,8 @@ The supported vCPU and RAM sizes include the following: > - The node count of TiDB can only be set to 1 or 2, and the node count of TiKV is fixed to 3. > - 4 vCPU TiKV can only be used with 4 vCPU TiDB. > - TiFlash is unavailable. +> +> The **4 vCPU, 16 GiB** size of TiKV is designed for learning, testing, and trial purposes. It is suitable for pre-production environments or small, non-critical workloads. However, it is **NOT** recommended for full-scale production due to performance limitations. If you need lower costs and an SLA guarantee for production, consider using the [TiDB Cloud Essential](/tidb-cloud/select-cluster-tier.md#essential) cluster plan. ### TiKV node count @@ -207,7 +211,7 @@ The Standard storage type is applied automatically to new clusters hosted on AWS #### Performance and Plus storage -The Performance and Plus storage provide higher performance and stability, with pricing that reflects these enhanced capabilities. Currently, these two storage types are only available upon request for clusters deployed on AWS. To request the Performance or Plus storage, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for TiKV storage type" in the **Description** field and click **Submit**. +The Performance and Plus storage provide higher performance and stability, with pricing that reflects these enhanced capabilities. Currently, these two storage types are only available upon request for clusters deployed on AWS. To request the Performance or Plus storage, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for TiKV storage type" in the **Description** field, and then click **Submit**. ## Size TiFlash @@ -265,4 +269,4 @@ The Basic storage is ideal for most workloads, providing a balance between perfo #### Plus storage -The Plus storage provides higher performance and stability, with pricing that reflects these enhanced capabilities. Currently, this storage type is only available upon request for clusters deployed on AWS. To request it, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for TiFlash storage type" in the **Description** field and click **Submit**. \ No newline at end of file +The Plus storage provides higher performance and stability, with pricing that reflects these enhanced capabilities. Currently, this storage type is only available upon request for clusters deployed on AWS. To request it, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for TiFlash storage type" in the **Description** field, and then click **Submit**. \ No newline at end of file diff --git a/tidb-cloud/sql-proxy-account.md b/tidb-cloud/sql-proxy-account.md index 6b2314d630aba..814b24c5303b2 100644 --- a/tidb-cloud/sql-proxy-account.md +++ b/tidb-cloud/sql-proxy-account.md @@ -50,8 +50,8 @@ In some cases, the SQL proxy account username is exactly the same as the TiDB Cl | ----------- | ------------ | --------------- | | TiDB Cloud Dedicated | <= 32 characters | Full email address | | TiDB Cloud Dedicated | > 32 characters | `prefix($email, 23)_prefix(base58(sha1($email)), 8)` | -| TiDB Cloud Serverless | <= 15 characters | `serverless_unique_prefix + "." + email` | -| TiDB Cloud Serverless | > 15 characters | `serverless_unique_prefix + "." + prefix($email, 6)_prefix(base58(sha1($email)), 8)` | +| {{{ .starter }}} | <= 15 characters | `serverless_unique_prefix + "." + email` | +| {{{ .starter }}} | > 15 characters | `serverless_unique_prefix + "." + prefix($email, 6)_prefix(base58(sha1($email)), 8)` | Examples: @@ -59,12 +59,12 @@ Examples: | ----------- | ----- | -------- | | TiDB Cloud Dedicated | `user@pingcap.com` | `user@pingcap.com` | | TiDB Cloud Dedicated | `longemailaddressexample@pingcap.com` | `longemailaddressexample_48k1jwL9` | -| TiDB Cloud Serverless | `u1@pingcap.com` | `{user_name_prefix}.u1@pingcap.com` | -| TiDB Cloud Serverless | `longemailaddressexample@pingcap.com` | `{user_name_prefix}.longem_48k1jwL9`| +| {{{ .starter }}} | `u1@pingcap.com` | `{user_name_prefix}.u1@pingcap.com` | +| {{{ .starter }}} | `longemailaddressexample@pingcap.com` | `{user_name_prefix}.longem_48k1jwL9`| > **Note:** > -> In the preceding table, `{user_name_prefix}` is a unique prefix generated by TiDB Cloud to distinguish TiDB Cloud Serverless clusters. For details, see the [user name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix) of TiDB Cloud Serverless clusters. +> In the preceding table, `{user_name_prefix}` is a unique prefix generated by TiDB Cloud to distinguish {{{ .starter }}} clusters. For details, see the [user name prefix](/tidb-cloud/select-cluster-tier.md#user-name-prefix) of {{{ .starter }}} clusters. ## SQL proxy account password diff --git a/tidb-cloud/starter/_index.md b/tidb-cloud/starter/_index.md new file mode 100644 index 0000000000000..1793557b416d8 --- /dev/null +++ b/tidb-cloud/starter/_index.md @@ -0,0 +1,140 @@ +--- +title: TiDB Cloud Documentation +hide_sidebar: true +hide_commit: true +summary: TiDB Cloud is a fully-managed Database-as-a-Service (DBaaS) that brings everything great about TiDB to your cloud. It offers guides, samples, and references for learning, trying, developing, maintaining, migrating, monitoring, tuning, securing, billing, integrating, and referencing. +--- + + + + + +[Why TiDB Cloud](https://docs.pingcap.com/tidbcloud/tidb-cloud-intro/?plan=starter) + +[Key Concepts](https://docs.pingcap.com/tidbcloud/key-concepts/?plan=starter) + +[FAQ](https://docs.pingcap.com/tidbcloud/tidb-cloud-faq/?plan=starter) + + + + + +[Try Out TiDB Cloud](https://docs.pingcap.com/tidbcloud/tidb-cloud-quickstart/?plan=starter) + +[Try Out TiDB + AI](https://docs.pingcap.com/tidbcloud/vector-search-get-started-using-python/?plan=starter) + +[Try Out HTAP](https://docs.pingcap.com/tidbcloud/tidb-cloud-htap-quickstart/?plan=starter) + +[Try Out TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli/?plan=starter) + + + + + +[Developer Guide Overview](https://docs.pingcap.com/tidbcloud/dev-guide-overview/?plan=starter) + +[Quick Start](https://docs.pingcap.com/tidbcloud/dev-guide-build-cluster-in-cloud/?plan=starter) + +[Example Application](https://docs.pingcap.com/tidbcloud/dev-guide-sample-application-spring-boot/?plan=starter) + + + + + +[Create a Cluster](https://docs.pingcap.com/tidbcloud/create-tidb-cluster-serverless/?plan=starter) + +[Connect to a Cluster](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster-serverless/?plan=starter) + +[Use an HTAP Cluster](https://docs.pingcap.com/tidbcloud/tiflash-overview/?plan=starter) + +[Back Up and Restore Data](https://docs.pingcap.com/tidbcloud/backup-and-restore-serverless/?plan=starter) + +[Use API (Beta)](https://docs.pingcap.com/tidbcloud/api-overview/?plan=starter) + +[Use TiDB Cloud CLI](https://docs.pingcap.com/tidbcloud/get-started-with-cli/?plan=starter) + + + + + +[From Amazon RDS for Oracle](https://docs.pingcap.com/tidbcloud/migrate-from-oracle-using-aws-dms/?plan=starter) + +[Import Sample Data](https://docs.pingcap.com/tidbcloud/import-sample-data-serverless/?plan=starter) + +[Import CSV Files](https://docs.pingcap.com/tidbcloud/import-csv-files-serverless/?plan=starter) + +[Import Parquet Files](https://docs.pingcap.com/tidbcloud/import-parquet-files-serverless/?plan=starter) + +[With MySQL CLI](https://docs.pingcap.com/tidbcloud/import-with-mysql-cli-serverless/?plan=starter) + + + + + +[Status and Metrics](https://docs.pingcap.com/tidbcloud/monitor-tidb-cluster/?plan=starter) + +[Built-in Monitoring](https://docs.pingcap.com/tidbcloud/built-in-monitoring/?plan=starter) + + + + + +[Tuning Overview](https://docs.pingcap.com/tidbcloud/tidb-cloud-tune-performance-overview/?plan=starter) + +[Analyze Performance](https://docs.pingcap.com/tidbcloud/tune-performance/?plan=starter) + +[Tune SQL Performance](https://docs.pingcap.com/tidbcloud/tidb-cloud-sql-tuning-overview/?plan=starter) + +[Tune TiFlash Performance](https://docs.pingcap.com/tidbcloud/tune-tiflash-performance/?plan=starter) + + + + + +[Password Authentication](https://docs.pingcap.com/tidbcloud/tidb-cloud-password-authentication/?plan=starter) + +[User Roles](https://docs.pingcap.com/tidbcloud/manage-user-access#user-roles/?plan=starter) + +[Manage User Profiles](https://docs.pingcap.com/tidbcloud/manage-user-access#manage-user-profiles/?plan=starter) + +[Manage Organization Access](https://docs.pingcap.com/tidbcloud/manage-user-access/?plan=starter#manage-organization-access) + +[Manage Project Access](https://docs.pingcap.com/tidbcloud/manage-user-access/?plan=starter#manage-project-access) + +[Configure Firewall Rules for Public Endpoints](https://docs.pingcap.com/tidbcloud/configure-serverless-firewall-rules-for-public-endpoints/?plan=starter) + + + + + +[Pricing](https://www.pingcap.com/tidb-cloud-starter-pricing-details/) + +[Invoices](https://docs.pingcap.com/tidbcloud/tidb-cloud-billing#invoices/?plan=starter) + +[Credits](https://docs.pingcap.com/tidbcloud/tidb-cloud-billing#credits/?plan=starter) + + + + + +[Airbyte](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-airbyte/?plan=starter) + +[Zapier](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-zapier/?plan=starter) + +[Vercel](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-vercel/?plan=starter) + +[Terraform](https://docs.pingcap.com/tidbcloud/terraform-tidbcloud-provider-overview/?plan=starter) + +[Amazon AppFlow](https://docs.pingcap.com/tidbcloud/dev-guide-aws-appflow-integration/?plan=starter) + + + + + +[SQL Reference](https://docs.pingcap.com/tidbcloud/basic-sql-operations/?plan=starter) + +[System Variables](https://docs.pingcap.com/tidbcloud/system-variables/?plan=starter) + + + + diff --git a/tidb-cloud/terraform-get-tidbcloud-provider.md b/tidb-cloud/terraform-get-tidbcloud-provider.md index b695d0c443372..519569464ea0c 100644 --- a/tidb-cloud/terraform-get-tidbcloud-provider.md +++ b/tidb-cloud/terraform-get-tidbcloud-provider.md @@ -126,4 +126,4 @@ Setting `sync` to `true` is recommended, but note that `sync` currently only wor ## Next step -Get started by managing a cluster with the [cluster resource](/tidb-cloud/terraform-use-cluster-resource.md). +Get started by managing a cluster with the [`tidbcloud_serverless_cluster`](/tidb-cloud/terraform-use-serverless-cluster-resource.md) or [`tidbcloud_dedicated_cluster`](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) resource. diff --git a/tidb-cloud/terraform-migrate-cluster-resource.md b/tidb-cloud/terraform-migrate-cluster-resource.md index 4d2c95b44a826..e75ed715e1a4e 100644 --- a/tidb-cloud/terraform-migrate-cluster-resource.md +++ b/tidb-cloud/terraform-migrate-cluster-resource.md @@ -41,12 +41,12 @@ terraform state rm ${your_target_cluster_resource} In your `.tf` file, find the configuration of your target cluster resource and delete the corresponding code. -## Step 4. Add an import block for the new serverless or dedicated cluster resource +## Step 4. Add an import block for the new cluster resource -- If your target cluster is TiDB Cloud Serverless, add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the cluster ID you get from [Step 1](#step-1-identify-the-tidbcloud_cluster-resource-to-migrate): +- If your target cluster is {{{ .starter }}}, add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the cluster ID you get from [Step 1](#step-1-identify-the-tidbcloud_cluster-resource-to-migrate): ``` - # TiDB Cloud Serverless + # {{{ .starter }}} import { to = tidbcloud_serverless_cluster.example id = "${id}" diff --git a/tidb-cloud/terraform-tidbcloud-provider-overview.md b/tidb-cloud/terraform-tidbcloud-provider-overview.md index b248bf10ac2d8..3c6128d285529 100644 --- a/tidb-cloud/terraform-tidbcloud-provider-overview.md +++ b/tidb-cloud/terraform-tidbcloud-provider-overview.md @@ -50,6 +50,7 @@ To get all the available configurations for the resources and data sources, see - [Learn more about Terraform](https://www.terraform.io/docs) - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) -- [Use Cluster Resource](/tidb-cloud/terraform-use-cluster-resource.md) -- [Use Backup Resource](/tidb-cloud/terraform-use-backup-resource.md) -- [Use Restore Resource](/tidb-cloud/terraform-use-restore-resource.md) \ No newline at end of file +- [Use the `tidbcloud_serverless_cluster` Resource](/tidb-cloud/terraform-use-serverless-cluster-resource.md) +- [Use the `tidbcloud_dedicated_cluster` Resource](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) +- [Use the `tidbcloud_backup` Resource](/tidb-cloud/terraform-use-backup-resource.md) +- [Use the `tidbcloud_restore` Resource](/tidb-cloud/terraform-use-restore-resource.md) \ No newline at end of file diff --git a/tidb-cloud/terraform-use-backup-resource.md b/tidb-cloud/terraform-use-backup-resource.md index e8f843e7853d0..5ba7c3c9d9539 100644 --- a/tidb-cloud/terraform-use-backup-resource.md +++ b/tidb-cloud/terraform-use-backup-resource.md @@ -1,9 +1,9 @@ --- -title: Use Backup Resource -summary: Learn how to create a backup of a TiDB Cloud cluster using the backup resource. +title: Use the `tidbcloud_backup` Resource +summary: Learn how to create a backup of a TiDB Cloud cluster using the `tidbcloud_backup` resource. --- -# Use Backup Resource +# Use the `tidbcloud_backup` Resource You can learn how to create a backup of a TiDB Cloud cluster with the `tidbcloud_backup` resource in this document. @@ -15,9 +15,9 @@ The features of the `tidbcloud_backup` resource include the following: ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md). -- The backup and restore feature is unavailable to TiDB Cloud Serverless clusters. To use backup resources, make sure that you have created a TiDB Cloud Dedicated cluster. +- The backup and restore feature introduced in this document is unavailable to {{{ .starter }}} or {{{ .essential }}} clusters. To use `tidbcloud_backup` resources, make sure that you have created a TiDB Cloud Dedicated cluster. -## Create a backup with the backup resource +## Create a backup with the `tidbcloud_backup` resource 1. Create a directory for the backup and enter it. @@ -48,7 +48,7 @@ The features of the `tidbcloud_backup` resource include the following: You need to replace resource values (such as project ID and cluster ID) in the file with your own. - If you have maintained a cluster resource (for example, `example_cluster`) using Terraform, you can also configure the backup resource as follows, without specifying the actual project ID and cluster ID. + If you have maintained a cluster resource (for example, `example_cluster`) using Terraform, you can also configure the `tidbcloud_backup` resource as follows, without specifying the actual project ID and cluster ID. ``` resource "tidbcloud_backup" "example_backup" { @@ -147,7 +147,7 @@ The features of the `tidbcloud_backup` resource include the following: When the status turns to `SUCCESS`, it indicates that you have created a backup for your cluster. Pay attention that the backup cannot be updated after the creation. -Now, you have created a backup for the cluster. If you want to use the backup to restore the cluster, you can [use the restore resources](/tidb-cloud/terraform-use-restore-resource.md). +Now, you have created a backup for the cluster. If you want to use the backup to restore the cluster, you can [use the `tidbcloud_restore` resource](/tidb-cloud/terraform-use-restore-resource.md). ## Update a backup @@ -155,7 +155,7 @@ Backups cannot be updated. ## Delete a backup -To delete a backup, go to the backup directory where the corresponding `backup.tf` file is located, and then run the `terraform destroy` command to destroy the backup resource. +To delete a backup, go to the backup directory where the corresponding `backup.tf` file is located, and then run the `terraform destroy` command to destroy the `tidbcloud_backup` resource. ``` $ terraform destroy diff --git a/tidb-cloud/terraform-use-cluster-resource.md b/tidb-cloud/terraform-use-cluster-resource.md index b129a90032f54..dda82b82dba80 100644 --- a/tidb-cloud/terraform-use-cluster-resource.md +++ b/tidb-cloud/terraform-use-cluster-resource.md @@ -1,13 +1,13 @@ --- -title: Use Cluster Resource (Deprecated) +title: Use the `tidbcloud_cluster` Resource (Deprecated) summary: Learn how to use the cluster resource to create and modify a TiDB Cloud cluster. --- -# Use Cluster Resource (Deprecated) +# Use the `tidbcloud_cluster` Resource (Deprecated) > **Warning:** > -> Starting from [TiDB Cloud Terraform Provider](https://registry.terraform.io/providers/tidbcloud/tidbcloud) v0.4.0, the `tidbcloud_cluster` resource is deprecated. It is recommended to use the `tidbcloud_dedicated_cluster` or `tidbcloud_serverless_cluster` resource instead. For more information, see [Use TiDB Cloud Dedicated Cluster Resource](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) or [Use TiDB Cloud Serverless Cluster Resource](/tidb-cloud/terraform-use-serverless-cluster-resource.md). +> Starting from [TiDB Cloud Terraform Provider](https://registry.terraform.io/providers/tidbcloud/tidbcloud) v0.4.0, the `tidbcloud_cluster` resource is deprecated. It is recommended to use the `tidbcloud_dedicated_cluster` or `tidbcloud_serverless_cluster` resource instead. For more information, see [Use the `tidbcloud_dedicated_cluster` Resource](/tidb-cloud/terraform-use-dedicated-cluster-resource.md) or [Use the `tidbcloud_serverless_cluster` Resource](/tidb-cloud/terraform-use-serverless-cluster-resource.md). You can learn how to manage a TiDB Cloud cluster with the `tidbcloud_cluster` resource in this document. @@ -15,9 +15,9 @@ In addition, you will also learn how to get the necessary information with the ` The features of the `tidbcloud_cluster` resource include the following: -- Create TiDB Cloud Serverless and TiDB Cloud Dedicated clusters. +- Create {{{ .starter }}} and TiDB Cloud Dedicated clusters. - Modify TiDB Cloud Dedicated clusters. -- Delete TiDB Cloud Serverless and TiDB Cloud Dedicated clusters. +- Delete {{{ .starter }}} and TiDB Cloud Dedicated clusters. ## Prerequisites @@ -834,13 +834,13 @@ You can pause a cluster when its status is `AVAILABLE` or resume a cluster when 6. Wait for a moment, then use the `terraform refersh` command to update the state. The status will be changed to `AVAILABLE` finally. -Now, you have created and managed a TiDB Cloud Dedicated cluster with Terraform. Next, you can try creating a backup of the cluster by our [backup resource](/tidb-cloud/terraform-use-backup-resource.md). +Now, you have created and managed a TiDB Cloud Dedicated cluster with Terraform. Next, you can try creating a backup of the cluster by the [`tidbcloud_backup`](/tidb-cloud/terraform-use-backup-resource.md) resource. ## Import a cluster For a TiDB cluster that is not managed by Terraform, you can use Terraform to manage it just by importing it. -For example, you can import a cluster that is not created by Terraform or import a cluster that is [created with the restore resource](/tidb-cloud/terraform-use-restore-resource.md#create-a-restore-task). +For example, you can import a cluster that is not created by Terraform or import a cluster that is [created with the `tidbcloud_restore` resource](/tidb-cloud/terraform-use-restore-resource.md#create-a-restore-task). 1. Create a `import_cluster.tf` file as follows: diff --git a/tidb-cloud/terraform-use-dedicated-cluster-resource.md b/tidb-cloud/terraform-use-dedicated-cluster-resource.md index fba189d1217c4..952b28729a411 100644 --- a/tidb-cloud/terraform-use-dedicated-cluster-resource.md +++ b/tidb-cloud/terraform-use-dedicated-cluster-resource.md @@ -1,13 +1,13 @@ --- -title: Use TiDB Cloud Dedicated Cluster Resource -summary: Learn how to use the TiDB Cloud Dedicated cluster resource to create and modify a TiDB Cloud Dedicated cluster. +title: Use the `tidbcloud_dedicated_cluster` Resource +summary: Learn how to use the `tidbcloud_dedicated_cluster` resource to create and modify a TiDB Cloud Dedicated cluster. --- -# Use TiDB Cloud Dedicated Cluster Resource +# Use the `tidbcloud_dedicated_cluster` Resource This document describes how to manage a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster with the `tidbcloud_dedicated_cluster` resource. -In addition, you will also learn how to get the necessary information with the `tidbcloud_projects` data source and use the `tidbcloud_dedicated_node_group` resource to manage TiDB node groups of your TiDB Cloud Dedicated cluster. +You will also learn how to get the necessary information with the `tidbcloud_projects` data source and use the `tidbcloud_dedicated_node_group` resource to manage TiDB node groups of your TiDB Cloud Dedicated cluster. The features of the `tidbcloud_dedicated_cluster` resource include the following: @@ -130,7 +130,9 @@ You can create a TiDB Cloud Dedicated cluster using the `tidbcloud_dedicated_clu 1. Create a directory for the cluster and enter it. -2. Create a `cluster.tf` file: +2. Create a `cluster.tf` file. + + The following is an example of the `cluster.tf` file: ``` terraform { @@ -166,10 +168,9 @@ You can create a TiDB Cloud Dedicated cluster using the `tidbcloud_dedicated_clu Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the TiDB Cloud Dedicated cluster resource, set the resource type as `tidbcloud_dedicated_cluster`. + - To use the `tidbcloud_dedicated_cluster` resource, set the resource type as `tidbcloud_dedicated_cluster`. - For the resource name, you can define it as needed. For example, `example_cluster`. - - For the resource details, you can configure them according to the Project ID and the TiDB Cloud Dedicated cluster specification information. - - To get the TiDB Cloud Dedicated cluster specification information, see [tidbcloud_dedicated_cluster (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/dedicated_cluster). + - For resource details, you can configure them according to the Project ID and the [`tidbcloud_dedicated_cluster` specification](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/dedicated_cluster). 3. Run the `terraform apply` command. It is not recommended to use `terraform apply --auto-approve` when you apply a resource. @@ -1050,11 +1051,11 @@ $ terraform show ## Import a cluster -For a TiDB cluster that is not managed by Terraform, you can use Terraform to manage it just by importing it. +For a TiDB cluster that is not managed by Terraform, you can bring it under Terraform management by importing it. Import a cluster that is not created by Terraform as follows: -1. Add an import block for the new TiDB Cloud Dedicated cluster resource. +1. Add an import block for the new `tidbcloud_dedicated_cluster` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the cluster ID: @@ -1067,7 +1068,7 @@ Import a cluster that is not created by Terraform as follows: 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Dedicated cluster resource according to the import block: + Generate the new configuration file for the new `tidbcloud_dedicated_cluster` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf diff --git a/tidb-cloud/terraform-use-dedicated-network-container-resource.md b/tidb-cloud/terraform-use-dedicated-network-container-resource.md index 062f55ba35c17..fd83795e8abf9 100644 --- a/tidb-cloud/terraform-use-dedicated-network-container-resource.md +++ b/tidb-cloud/terraform-use-dedicated-network-container-resource.md @@ -1,9 +1,9 @@ --- -title: Use TiDB Cloud Dedicated Network Container Resource -summary: Learn how to use the TiDB Cloud Dedicated network container resource to create and modify a TiDB Cloud Dedicated network container. +title: Use the `tidbcloud_dedicated_network_container` Resource +summary: Learn how to use the `tidbcloud_dedicated_network_container` resource to create and modify a TiDB Cloud Dedicated network container. --- -# Use TiDB Cloud Dedicated Network Container Resource +# Use the `tidbcloud_dedicated_network_container` Resource This document describes how to manage a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) network container using the `tidbcloud_dedicated_network_container` resource. @@ -58,7 +58,7 @@ The following example shows how to create a TiDB Cloud Dedicated network contain Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the TiDB Cloud Dedicated network container resource, set the resource type as `tidbcloud_dedicated_network_container`. + - To use the `tidbcloud_dedicated_network_container` resource, set the resource type as `tidbcloud_dedicated_network_container`. - For the resource name, you can define it as needed, for example, `example`. - If you do not know how to get the values of the required arguments, see [Set a CIDR for a Region](/tidb-cloud/set-up-vpc-peering-connections.md#prerequisite-set-a-cidr-for-a-region). - For more information about the TiDB Cloud Dedicated network container specification, see [tidbcloud_dedicated_network_container (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/dedicated_network_container). @@ -138,11 +138,11 @@ The following example shows how to create a TiDB Cloud Dedicated network contain ## Import a TiDB Cloud Dedicated network container -For a TiDB Cloud Dedicated network container that is not managed by Terraform, you can use Terraform to manage it just by importing it. +For a TiDB Cloud Dedicated network container that is not managed by Terraform, you can bring it under Terraform management by importing it. For example, you can import a network container that is not created by Terraform. -1. Add an import block for the new TiDB Cloud Dedicated network container resource. +1. Add an import block for the new `tidbcloud_dedicated_network_container` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,network_container_id`: @@ -155,7 +155,7 @@ For example, you can import a network container that is not created by Terraform 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Dedicated network container resource according to the import block: + Generate the new configuration file for the new `tidbcloud_dedicated_network_container` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf diff --git a/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md b/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md index 8e34ffefcb7a9..19fe6c2cca0ff 100644 --- a/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md +++ b/tidb-cloud/terraform-use-dedicated-private-endpoint-connection-resource.md @@ -1,9 +1,9 @@ --- -title: Use TiDB Cloud Dedicated Private Endpoint Connection Resource -summary: Learn how to use the TiDB Cloud Dedicated private endpoint connection resource to create and modify a TiDB Cloud Dedicated private endpoint connection. +title: Use `tidbcloud_dedicated_private_endpoint_connection` Resource +summary: Learn how to use the `tidbcloud_dedicated_private_endpoint_connection` resource to create and modify a TiDB Cloud Dedicated private endpoint connection. --- -# Use TiDB Cloud Dedicated Private Endpoint Connection Resource +# Use the `tidbcloud_dedicated_private_endpoint_connection` Resource This document describes how to manage a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) private endpoint connection using the `tidbcloud_dedicated_private_endpoint_connection` resource. @@ -15,7 +15,7 @@ The features of the `tidbcloud_dedicated_private_endpoint_connection` resource i > **Note:** > -> TiDB Cloud Dedicated private endpoint connection resource cannot be modified. If you want to modify a TiDB Cloud Dedicated private endpoint connection, you need to delete the existing one, and then create a new one. +> The `tidbcloud_dedicated_private_endpoint_connection` resource cannot be modified. If you want to modify a TiDB Cloud Dedicated private endpoint connection, you need to delete the existing one, and then create a new one. ## Prerequisites @@ -55,7 +55,7 @@ The following example shows how to create a TiDB Cloud Dedicated private endpoin Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the TiDB Cloud Dedicated private endpoint connection resource, set the resource type as `tidbcloud_dedicated_private_endpoint_connection`. + - To use the `tidbcloud_dedicated_private_endpoint_connection` resource, set the resource type as `tidbcloud_dedicated_private_endpoint_connection`. - For the resource name, you can define it as needed. For example, `example`. - If you do not know how to get the values of the required arguments, see [Connect to a TiDB Cloud Dedicated Cluster via Private Endpoint with AWS](/tidb-cloud/set-up-private-endpoint-connections.md). - To get the TiDB Cloud Dedicated private endpoint connection specification information, see [tidbcloud_private_endpoint_connection (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/dedicated_private_endpoint_connection). @@ -148,7 +148,7 @@ The following example shows how to create a TiDB Cloud Dedicated private endpoin For a TiDB Cloud Dedicated private endpoint connection that is not managed by Terraform, you can start managing it with Terraform by importing it. -1. Add an import block for the new TiDB Cloud Dedicated private endpoint connection resource. +1. Add an import block for the new `tidbcloud_dedicated_private_endpoint_connection` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,dedicated_private_endpoint_connection_id`: @@ -161,7 +161,7 @@ For a TiDB Cloud Dedicated private endpoint connection that is not managed by Te 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Dedicated private endpoint connection resource according to the import block: + Generate the new configuration file for the new `tidbcloud_dedicated_private_endpoint_connection` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf diff --git a/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md b/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md index a0c9b5de2e366..be44dfc083c26 100644 --- a/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md +++ b/tidb-cloud/terraform-use-dedicated-vpc-peering-resource.md @@ -1,9 +1,9 @@ --- -title: Use TiDB Cloud Dedicated VPC Peering Resource -summary: Learn how to use the TiDB Cloud Dedicated VPC peering resource to create and modify a TiDB Cloud Dedicated VPC peering. +title: Use the `tidbcloud_dedicated_vpc_peering` Resource +summary: Learn how to use the `tidbcloud_dedicated_vpc_peering` resource to create and modify a TiDB Cloud Dedicated VPC peering. --- -# Use TiDB Cloud Dedicated VPC Peering Resource +# Use the `tidbcloud_dedicated_vpc_peering` Resource This document describes how to manage a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) VPC peering with the `tidbcloud_dedicated_vpc_peering` resource. @@ -15,7 +15,7 @@ The features of the `tidbcloud_dedicated_vpc_peering` resource include the follo > **Note:** > -> TiDB Cloud Dedicated VPC peering resource cannot be modified. If you want to change the configuration of a TiDB Cloud Dedicated VPC peering, you need to delete the existing one, and then create a new one. +> The `tidbcloud_dedicated_vpc_peering` resource cannot be modified. If you want to change the configuration of a TiDB Cloud Dedicated VPC peering, you need to delete the existing one, and then create a new one. ## Prerequisites @@ -56,7 +56,7 @@ The following example shows how to create a TiDB Cloud Dedicated VPC peering. Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the TiDB Cloud Dedicated VPC peering resource, set the resource type as `tidbcloud_dedicated_vpc_peering`. + - To use the `tidbcloud_dedicated_vpc_peering` resource, set the resource type as `tidbcloud_dedicated_vpc_peering`. - For the resource name, you can define it as needed. For example, `example`. - If you do not know how to get the values of the required arguments, see [Connect to TiDB Cloud Dedicated via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md). - To get the TiDB Cloud Dedicated VPC peering specification information, see [tidbcloud_dedicated_vpc_peering (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/dedicated_vpc_peering). @@ -146,24 +146,24 @@ The following example shows how to create a TiDB Cloud Dedicated VPC peering. ## Import a TiDB Cloud Dedicated VPC peering -For a TiDB Cloud Dedicated VPC peering that is not managed by Terraform, you can use Terraform to manage it just by importing it. +For a TiDB Cloud Dedicated VPC peering that is not managed by Terraform, you can bring it under Terraform management by importing it. For example, you can import a VPC peering that is not created by Terraform. -1. Add an import block for the new TiDB Cloud Dedicated VPC peering resource. +1. Add an import block for the new `tidbcloud_dedicated_vpc_peering` resource. - Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,vpc_peering_id`: + Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${vpc_peering_id}` with the actual VPC peering ID. ``` import { to = tidbcloud_dedicated_vpc_peering.example - id = "${id}" + id = "${vpc_peering_id}" } ``` 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Dedicated VPC peering resource according to the import block: + Generate the new configuration file for the new `tidbcloud_dedicated_vpc_peering` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf diff --git a/tidb-cloud/terraform-use-import-resource.md b/tidb-cloud/terraform-use-import-resource.md index 416ff55f5dda9..164008b209e49 100644 --- a/tidb-cloud/terraform-use-import-resource.md +++ b/tidb-cloud/terraform-use-import-resource.md @@ -1,32 +1,34 @@ --- -title: Use Import Resource -summary: Learn how to manage the import task using the import resource. +title: Use the `tidbcloud_import` Resource +summary: Learn how to manage the import task using the `tidbcloud_import` resource. --- -# Use Import Resource +# Use the `tidbcloud_import` Resource You can learn how to import data to a TiDB Cloud cluster with the `tidbcloud_import` resource in this document. The features of the `tidbcloud_import` resource include the following: -- Create import tasks for TiDB Cloud Serverless and TiDB Cloud Dedicated clusters. +- Create import tasks for TiDB Cloud clusters. - Import data either from local disks or from Amazon S3 buckets. - Cancel ongoing import tasks. ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md). -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) or [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). +- Refer to one of the following documents to create a TiDB Cloud cluster: + - [Create a {{{ .starter }}} or Essential cluster](/tidb-cloud/create-tidb-cluster-serverless.md) + - [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). ## Create and run an import task -You can manage either a local import task or an Amazon S3 import task using the import resource. +You can manage either a local import task or an Amazon S3 import task using the `tidbcloud_import` resource. ### Create and run a local import task > **Note:** > -> Importing local files is supported only for TiDB Cloud Serverless clusters, not for TiDB Cloud Dedicated clusters. +> Importing local files is supported only for {{{ .starter }}} or {{{ .essential }}} clusters, not for TiDB Cloud Dedicated clusters. 1. Create a CSV file for import. For example: @@ -253,7 +255,7 @@ Import tasks cannot be updated. ## Delete an import task -For Terraform, deleting an import task means canceling the corresponding import resource. +For Terraform, deleting an import task means canceling the corresponding `tidbcloud_import` resource. You cannot cancel a `COMPLETED` import task. Otherwise, you will get a `Delete Error` as in the following example: @@ -298,4 +300,4 @@ Destroy complete! Resources: 1 destroyed. ## Configurations -See [configuration documentation](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/import) to get all the available configurations for the import resource. +See [configuration documentation](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/import) to get all the available configurations for the `tidbcloud_import` resource. diff --git a/tidb-cloud/terraform-use-restore-resource.md b/tidb-cloud/terraform-use-restore-resource.md index 2793dc7e5ad6b..22ae8a74f230a 100644 --- a/tidb-cloud/terraform-use-restore-resource.md +++ b/tidb-cloud/terraform-use-restore-resource.md @@ -1,9 +1,9 @@ --- -title: Use Restore Resource -summary: Learn how to use restore resource. +title: Use the `tidbcloud_restore` Resource +summary: Learn how to use the `tidbcloud_restore` resource to create and modify a restore task. --- -# Use Restore Resource +# Use the `tidbcloud_restore` Resource You can learn how to manage a restore task with the `tidbcloud_restore` resource in this document. @@ -14,10 +14,10 @@ The features of the `tidbcloud_restore` resource include the following: ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md). -- The backup and restore feature is unavailable for TiDB Cloud Serverless clusters. To use restore resources, make sure that you have created a TiDB Cloud Dedicated cluster. +- The backup and restore feature introduced in this document is unavailable for {{{ .starter }}} and {{{ .essential }}} clusters. To use `tidbcloud_restore` resources, make sure that you have created a TiDB Cloud Dedicated cluster. ## Create a restore task - + After creating a backup of a cluster, you can restore the cluster by creating a restore task with the `tidbcloud_restore` resource. > **Note:** diff --git a/tidb-cloud/terraform-use-serverless-branch-resource.md b/tidb-cloud/terraform-use-serverless-branch-resource.md index b2a63765ced23..4cbfd5e611d72 100644 --- a/tidb-cloud/terraform-use-serverless-branch-resource.md +++ b/tidb-cloud/terraform-use-serverless-branch-resource.md @@ -1,32 +1,32 @@ --- -title: Use TiDB Cloud Serverless Branch Resource -summary: Learn how to use the serverless branch resource to create and modify a TiDB Cloud Serverless branch. +title: Use `tidbcloud_serverless_branch` Resource +summary: Learn how to use the serverless branch resource to create and modify a {{{ .starter }}} or {{{ .essential }}} branch. --- -# Use TiDB Cloud Serverless Branch Resource +# Use the `tidbcloud_serverless_branch` Resource -This document describes how to manage a [TiDB Cloud Serverless branch](/tidb-cloud/branch-manage.md) using the `tidbcloud_serverless_branch` resource. +This document describes how to manage a [{{{ .starter }}} or {{{ .essential }}} branch](/tidb-cloud/branch-manage.md) using the `tidbcloud_serverless_branch` resource. The features of the `tidbcloud_serverless_branch` resource include the following: -- Create TiDB Cloud Serverless branches. -- Import TiDB Cloud Serverless branches. -- Delete TiDB Cloud Serverless branches. +- Create {{{ .starter }}} or {{{ .essential }}} branches. +- Import {{{ .starter }}} or {{{ .essential }}} branches. +- Delete {{{ .starter }}} or {{{ .essential }}} branches. > **Note:** > -> TiDB Cloud Serverless branch resource cannot be modified. If you want to change the configuration of a serverless branch resource, you need to delete the existing one and create a new one. +> The `tidbcloud_serverless_branch` resource cannot be modified. If you want to change the configuration of a serverless branch resource, you need to delete the existing one and create a new one. ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) v0.4.0 or later. -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +- [Create a {{{ .starter }}} or {{{ .essential }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md). -## Create a TiDB Cloud Serverless branch +## Create a {{{ .starter }}} or {{{ .essential }}} branch -You can create a TiDB Cloud Serverless branch using the `tidbcloud_serverless_branch` resource. +You can create a {{{ .starter }}} or {{{ .essential }}} branch using the `tidbcloud_serverless_branch` resource. -The following example shows how to create a TiDB Cloud Serverless branch. +The following example shows how to create a {{{ .starter }}} or {{{ .essential }}} branch. 1. Create a directory for the branch and enter it. @@ -158,13 +158,13 @@ The following example shows how to create a TiDB Cloud Serverless branch. } ``` -## Import a TiDB Cloud Serverless branch +## Import a {{{ .starter }}} or {{{ .essential }}} branch -For a TiDB Cloud Serverless branch that is not managed by Terraform, you can use Terraform to manage it just by importing it. +For a {{{ .starter }}} or {{{ .essential }}} branch that is not managed by Terraform, you can bring it under Terraform management by importing it. -Import a TiDB Cloud Serverless branch that is not created by Terraform as follows: +Import a {{{ .starter }}} or {{{ .essential }}} branch that is not created by Terraform as follows: -1. Add an import block for the new TiDB Cloud Serverless branch resource. +1. Add an import block for the new `tidbcloud_serverless_branch` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,branch_id`: @@ -177,7 +177,7 @@ Import a TiDB Cloud Serverless branch that is not created by Terraform as follow 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Serverless branch resource according to the import block: + Generate the new configuration file for the new `tidbcloud_serverless_branch` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf @@ -200,9 +200,9 @@ Import a TiDB Cloud Serverless branch that is not created by Terraform as follow Now you can manage the imported branch with Terraform. -## Delete a TiDB Cloud Serverless branch +## Delete a {{{ .starter }}} or {{{ .essential }}} branch -To delete a TiDB Cloud Serverless branch, you can delete the configuration of the `tidbcloud_serverless_branch` resource, then use the `terraform apply` command to destroy the resource: +To delete a {{{ .starter }}} or {{{ .essential }}} branch, you can delete the configuration of the `tidbcloud_serverless_branch` resource, then use the `terraform apply` command to destroy the resource: ```shell $ terraform apply diff --git a/tidb-cloud/terraform-use-serverless-cluster-resource-manage-essential.md b/tidb-cloud/terraform-use-serverless-cluster-resource-manage-essential.md new file mode 100644 index 0000000000000..3772bc598812d --- /dev/null +++ b/tidb-cloud/terraform-use-serverless-cluster-resource-manage-essential.md @@ -0,0 +1,553 @@ +--- +title: Use the `tidbcloud_serverless_cluster` Resource +summary: Learn how to use the `tidbcloud_serverless_cluster` resource to create and modify a {{{ .essential }}} cluster. +--- + +# Use the `tidbcloud_serverless_cluster` Resource + +This document describes how to manage a [{{{ .essential }}}](/tidb-cloud/select-cluster-tier.md#essential) cluster with the `tidbcloud_serverless_cluster` resource. + +You will also learn how to get the necessary information with the `tidbcloud_projects` data source. + +The features of the `tidbcloud_serverless_cluster` resource include the following: + +- Create {{{ .essential }}} clusters. +- Modify {{{ .essential }}} clusters. +- Import {{{ .essential }}} clusters. +- Delete {{{ .essential }}} clusters. + +## Prerequisites + +- [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) v0.4.2 or later. + +## Get project IDs using the `tidbcloud_projects` data source + +Each TiDB cluster belongs to a project. Before creating a {{{ .essential }}} cluster, you need to obtain the ID of the project where you want to create the cluster. If no `project_id` is specified, the default project will be used. + +To retrieve the information about all available projects, use the `tidbcloud_projects` data source as follows: + +1. In the `main.tf` file created when you [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md), add the `data` and `output` blocks as follows: + + ``` + terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } + } + + provider "tidbcloud" { + public_key = "your_public_key" + private_key = "your_private_key" + } + + data "tidbcloud_projects" "example_project" { + page = 1 + page_size = 10 + } + + output "projects" { + value = data.tidbcloud_projects.example_project.items + } + ``` + + - Use the `data` block to define the data source of TiDB Cloud, including the data source type and the data source name. + + - To use the projects data source, set the data source type as `tidbcloud_projects`. + - For the data source name, you can define it as needed. For example, `"example_project"`. + - For the `tidbcloud_projects` data source, you can use the `page` and `page_size` attributes to limit the maximum number of projects you want to check. + + - Use the `output` block to define the data source information to be displayed in the output, and expose the information for other Terraform configurations to use. + + The `output` block works similarly to returned values in programming languages. See [Terraform documentation](https://www.terraform.io/language/values/outputs) for more details. + + To get all the available configurations for the resources and data sources, see the [Terraform provider configuration documentation](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs). + +2. Run the `terraform apply` command to apply the configurations. You need to type `yes` at the confirmation prompt to proceed. + + To skip the prompt, use `terraform apply --auto-approve`: + + ```shell + $ terraform apply --auto-approve + + Changes to Outputs: + + projects = [ + + { + + cluster_count = 0 + + create_timestamp = "1649154426" + + id = "1372813089191000000" + + name = "test1" + + org_id = "1372813089189000000" + + user_count = 1 + }, + + { + + cluster_count = 1 + + create_timestamp = "1640602740" + + id = "1372813089189000000" + + name = "default project" + + org_id = "1372813089189000000" + + user_count = 1 + }, + ] + + You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + + Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + + Outputs: + + projects = tolist([ + { + "cluster_count" = 0 + "create_timestamp" = "1649154426" + "id" = "1372813089100000000" + "name" = "test1" + "org_id" = "1372813089100000000" + "user_count" = 1 + }, + { + "cluster_count" = 1 + "create_timestamp" = "1640602740" + "id" = "1372813089100000001" + "name" = "default project" + "org_id" = "1372813089100000000" + "user_count" = 1 + }, + ]) + ``` + +Now, you can get all the available projects from the output. Copy one of the project IDs that you need. + +## Create a {{{ .essential }}} cluster + +You can create a {{{ .essential }}} cluster using the `tidbcloud_serverless_cluster` resource. + +1. Create a directory for the cluster and enter it. + +2. Create a `cluster.tf` file. + + The following is an example of the `cluster.tf` file: + + ``` + terraform { + required_providers { + tidbcloud = { + source = "tidbcloud/tidbcloud" + } + } + } + + provider "tidbcloud" { + public_key = "your_public_key" + private_key = "your_private_key" + } + + resource "tidbcloud_serverless_cluster" "example" { + project_id = "1372813089454000000" + display_name = "test-tf" + auto_scaling = { + min_rcu = 3000 + max_rcu = 4000 + } + region = { + name = "regions/aws-us-east-1" + } + } + ``` + + Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. + + - To use the `tidbcloud_serverless_cluster` resource, set the resource type as `tidbcloud_serverless_cluster`. + - For the resource name, you can define it as needed. For example, `example`. + - For resource details, you can configure them according to the Project ID and the [`tidbcloud_serverless_cluster` specification](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/serverless_cluster). To create a {{{ .essential }}} cluster, you must specify the `auto_scaling` attribute in resource details. + +3. Run the `terraform apply` command. It is not recommended to use `terraform apply --auto-approve` when you apply a resource. + + ```shell + $ terraform apply + + Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + + Terraform will perform the following actions: + + # tidbcloud_serverless_cluster.example will be created + + resource "tidbcloud_serverless_cluster" "example" { + + annotations = (known after apply) + + automated_backup_policy = (known after apply) + + cluster_id = (known after apply) + + create_time = (known after apply) + + created_by = (known after apply) + + display_name = "test-tf" + + encryption_config = (known after apply) + + endpoints = (known after apply) + + labels = (known after apply) + + project_id = "1372813089454000000" + + region = { + + cloud_provider = (known after apply) + + display_name = (known after apply) + + name = "regions/aws-us-east-1" + + region_id = (known after apply) + } + + auto_scaling = { + + max_rcu = 4000 + + min_rcu = 3000 + } + + state = (known after apply) + + update_time = (known after apply) + + user_prefix = (known after apply) + + version = (known after apply) + } + + Plan: 1 to add, 0 to change, 0 to destroy. + + Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: + ``` + + In the preceding result, Terraform generates an execution plan for you, which describes the actions that Terraform will take: + + - You can check the differences between the configurations and the states. + - You can also see the results of this `apply`. It will add a new resource, and no resource will be changed or destroyed. + - `known after apply` indicates that you will get the corresponding value after `apply`. + +4. If everything in your plan looks fine, type `yes` to continue: + + ```shell + Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + tidbcloud_serverless_cluster.example: Creating... + tidbcloud_serverless_cluster.example: Still creating... [10s elapsed] + + Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + ``` + +5. Use the `terraform show` or `terraform state show tidbcloud_serverless_cluster.${resource-name}` command to inspect the state of your resource. The former command shows the states of all resources and data sources. + + ```shell + $ terraform state show tidbcloud_serverless_cluster.example + + # tidbcloud_serverless_cluster.example: + resource "tidbcloud_serverless_cluster" "example" { + annotations = { + "tidb.cloud/available-features" = "DISABLE_PUBLIC_LB,DELEGATE_USER" + "tidb.cloud/has-set-password" = "false" + } + automated_backup_policy = { + retention_days = 14 + start_time = "07:00" + } + cluster_id = "10145794214536000000" + create_time = "2025-06-16T07:04:41Z" + created_by = "apikey-S2000000" + display_name = "test-tf" + encryption_config = { + enhanced_encryption_enabled = false + } + endpoints = { + private = { + aws = { + availability_zone = [ + "use1-az6", + ] + service_name = "com.amazonaws.vpce.us-east-1.vpce-svc-0062ecf0683000000" + } + host = "gateway01-privatelink.us-east-1.prod.aws.tidbcloud.com" + port = 4000 + } + public = { + disabled = false + host = "gateway01.us-east-1.prod.aws.tidbcloud.com" + port = 4000 + } + } + labels = { + "tidb.cloud/organization" = "1372813089187000000" + "tidb.cloud/project" = "1372813089454000000" + } + project_id = "1372813089454000000" + region = { + cloud_provider = "aws" + display_name = "N. Virginia (us-east-1)" + name = "regions/aws-us-east-1" + region_id = "us-east-1" + } + auto_scaling = { + min_rcu = 3000 + max_rcu = 4000 + } + state = "ACTIVE" + update_time = "2025-06-16T07:04:48Z" + user_prefix = "KhSDGqQ3P000000" + version = "v7.5.2" + } + ``` + +## Modify a {{{ .essential }}} cluster + +For a {{{ .essential }}} cluster, you can use Terraform to manage resources. The arguments that you can modify include: + +- `display_name`: The display name of the cluster. +- `auto_scaling`: The auto scaling configuration of the cluster. +- `endpoints.public.disabled`: Whether to disable the public endpoint. +- `automated_backup_policy.start_time`: The UTC time of day in `HH:mm` format when the automated backup starts. + +To modify a {{{ .essential }}} cluster, you can modify the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to apply the changes. For example, you can modify the `display_name` and `auto_scaling` as follows: + +``` +resource "tidbcloud_serverless_cluster" "example" { + project_id = "1372813089454000000" + display_name = "test-tf-modified" + auto_scaling = { + min_rcu = 4000 + max_rcu = 5000 + } + region = { + name = "regions/aws-us-east-1" + } +} +``` + +Then, run the `terraform apply` command to apply the changes: + +```shell +$ terraform apply + +tidbcloud_serverless_cluster.example: Refreshing state... + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # tidbcloud_serverless_cluster.example will be updated in-place + ~ resource "tidbcloud_serverless_cluster" "example" { + ~ annotations = { + - "tidb.cloud/available-features" = "DISABLE_PUBLIC_LB,DELEGATE_USER" + - "tidb.cloud/has-set-password" = "false" + } -> (known after apply) + ~ display_name = "test-tf" -> "test-tf-modified" + ~ labels = { + - "tidb.cloud/organization" = "1372813089187041280" + - "tidb.cloud/project" = "1372813089454543324" + } -> (known after apply) + ~ auto_scaling = { + ~ min_rcu = 3000 -> 4000 + ~ max_rcu = 4000 -> 5000 + } + ~ state = "ACTIVE" -> (known after apply) + ~ update_time = "2025-06-16T07:04:57Z" -> (known after apply) + ~ version = "v7.5.2" -> (known after apply) + # (9 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_serverless_cluster.example: Modifying... +tidbcloud_serverless_cluster.example: Modifications complete after 8s + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. +``` + +Then, you can use the `terraform show` or `terraform state show tidbcloud_serverless_cluster.${resource-name}` command to inspect the state of your resource. The former command shows the states of all resources and data sources. + +```shell +$ terraform state show tidbcloud_serverless_cluster.example +# tidbcloud_serverless_cluster.example: +resource "tidbcloud_serverless_cluster" "example" { + annotations = { + "tidb.cloud/available-features" = "DISABLE_PUBLIC_LB,DELEGATE_USER" + "tidb.cloud/has-set-password" = "false" + } + automated_backup_policy = { + retention_days = 14 + start_time = "07:00" + } + cluster_id = "10145794214536000000" + create_time = "2025-06-16T07:04:41Z" + created_by = "apikey-S2000000" + display_name = "test-tf-modified" + encryption_config = { + enhanced_encryption_enabled = false + } + endpoints = { + private = { + aws = { + availability_zone = [ + "use1-az6", + ] + service_name = "com.amazonaws.vpce.us-east-1.vpce-svc-0062ecf0683000000" + } + host = "gateway01-privatelink.us-east-1.prod.aws.tidbcloud.com" + port = 4000 + } + public = { + disabled = false + host = "gateway01.us-east-1.prod.aws.tidbcloud.com" + port = 4000 + } + } + labels = { + "tidb.cloud/organization" = "1372813089187000000" + "tidb.cloud/project" = "1372813089454000000" + } + project_id = "1372813089454000000" + region = { + cloud_provider = "aws" + display_name = "N. Virginia (us-east-1)" + name = "regions/aws-us-east-1" + region_id = "us-east-1" + } + auto_scaling = { + min_rcu = 4000 + max_rcu = 5000 + } + state = "ACTIVE" + update_time = "2025-06-16T07:04:57Z" + user_prefix = "KhSDGqQ3P000000" + version = "v7.5.2" +} +``` + +## Import a {{{ .essential }}} cluster + +For a {{{ .essential }}} cluster that is not managed by Terraform, you can bring it under Terraform management by importing it. + +1. Add an import block for the new `tidbcloud_serverless_cluster` resource. + + Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the cluster ID: + + ``` + import { + to = tidbcloud_serverless_cluster.example + id = "${id}" + } + ``` + +2. Generate the new configuration file. + + Generate the new configuration file for the new `tidbcloud_serverless_cluster` resource according to the import block: + + ```shell + terraform plan -generate-config-out=generated.tf + ``` + + Do not specify an existing `.tf` filename in the preceding command. Otherwise, Terraform will return an error. + +3. Review and apply the generated configuration. + + Review the generated configuration file to ensure that it meets your needs. Optionally, you can move the contents of this file to your preferred location. + + Then, run `terraform apply` to import your infrastructure. After applying, the example output is as follows: + + ```shell + tidbcloud_serverless_cluster.example: Importing... + tidbcloud_serverless_cluster.example: Import complete + + Apply complete! Resources: 1 imported, 0 added, 0 changed, 0 destroyed. + ``` + +Now you can manage the imported cluster with Terraform. + +## Delete a {{{ .essential }}} cluster + +To delete a {{{ .essential }}} cluster, you can delete the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to destroy the resource: + +```shell +$ terraform apply +tidbcloud_serverless_cluster.example: Refreshing state... + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + # tidbcloud_serverless_cluster.example will be destroyed + # (because tidbcloud_serverless_cluster.example is not in configuration) + - resource "tidbcloud_serverless_cluster" "example" { + - annotations = { + - "tidb.cloud/available-features" = "DISABLE_PUBLIC_LB,DELEGATE_USER" + - "tidb.cloud/has-set-password" = "false" + } -> null + - automated_backup_policy = { + - retention_days = 14 -> null + - start_time = "07:00" -> null + } -> null + - cluster_id = "10145794214536000000" -> null + - create_time = "2025-06-16T07:04:41Z" -> null + - created_by = "apikey-S2000000" -> null + - display_name = "test-tf-modified" -> null + - encryption_config = { + - enhanced_encryption_enabled = false -> null + } -> null + - endpoints = { + - private = { + - aws = { + - availability_zone = [ + - "use1-az6", + ] -> null + - service_name = "com.amazonaws.vpce.us-east-1.vpce-svc-0062ecf0683000000" -> null + } -> null + - host = "gateway01-privatelink.us-east-1.prod.aws.tidbcloud.com" -> null + - port = 4000 -> null + } -> null + - public = { + - disabled = false -> null + - host = "gateway01.us-east-1.prod.aws.tidbcloud.com" -> null + - port = 4000 -> null + } -> null + } -> null + - labels = { + - "tidb.cloud/organization" = "1372813089187000000" + - "tidb.cloud/project" = "1372813089454000000" + } -> null + - project_id = "1372813089454000000" -> null + - region = { + - cloud_provider = "aws" -> null + - display_name = "N. Virginia (us-east-1)" -> null + - name = "regions/aws-us-east-1" -> null + - region_id = "us-east-1" -> null + } -> null + - auto_scaling = { + - min_rcu = 4000 -> null + - max_rcu = 5000 -> null + } -> null + - state = "ACTIVE" -> null + - update_time = "2025-06-16T07:04:57Z" -> null + - user_prefix = "KhSDGqQ3P000000" -> null + - version = "v7.5.2" -> null + } + +Plan: 0 to add, 0 to change, 1 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +tidbcloud_serverless_cluster.example: Destroying... +tidbcloud_serverless_cluster.example: Destruction complete after 1s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. +``` + +Now, if you run the `terraform show` command, it will show no managed resources because the resource has been destroyed: + +``` +$ terraform show +``` diff --git a/tidb-cloud/terraform-use-serverless-cluster-resource.md b/tidb-cloud/terraform-use-serverless-cluster-resource.md index 9ddfb0a15b592..aa7b992e2a001 100644 --- a/tidb-cloud/terraform-use-serverless-cluster-resource.md +++ b/tidb-cloud/terraform-use-serverless-cluster-resource.md @@ -1,20 +1,20 @@ --- -title: Use TiDB Cloud Serverless Cluster Resource -summary: Learn how to use the TiDB Cloud Serverless cluster resource to create and modify a TiDB Cloud Serverless cluster. +title: Use the `tidbcloud_serverless_cluster` Resource +summary: Learn how to use the `tidbcloud_serverless_cluster` resource to create and modify a {{{ .starter }}} cluster. --- -# Use TiDB Cloud Serverless Cluster Resource +# Use the `tidbcloud_serverless_cluster` Resource -This document describes how to manage a [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) cluster with the `tidbcloud_serverless_cluster` resource. +This document describes how to manage a [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) cluster with the `tidbcloud_serverless_cluster` resource. -In addition, you will also learn how to get the necessary information with the `tidbcloud_projects` data source. +You will also learn how to get the necessary information with the `tidbcloud_projects` data source. The features of the `tidbcloud_serverless_cluster` resource include the following: -- Create TiDB Cloud Serverless clusters. -- Modify TiDB Cloud Serverless clusters. -- Import TiDB Cloud Serverless clusters. -- Delete TiDB Cloud Serverless clusters. +- Create {{{ .starter }}} clusters. +- Modify {{{ .starter }}} clusters. +- Import {{{ .starter }}} clusters. +- Delete {{{ .starter }}} clusters. ## Prerequisites @@ -22,7 +22,7 @@ The features of the `tidbcloud_serverless_cluster` resource include the followin ## Get project IDs using the `tidbcloud_projects` data source -Each TiDB cluster belongs to a project. Before creating a TiDB Cloud Serverless cluster, you need to obtain the ID of the project where you want to create the cluster. If no `project_id` is specified, the default project will be used. +Each TiDB cluster belongs to a project. Before creating a {{{ .starter }}} cluster, you need to obtain the ID of the project where you want to create the cluster. If no `project_id` is specified, the default project will be used. To retrieve the information about all available projects, use the `tidbcloud_projects` data source as follows: @@ -119,15 +119,15 @@ To retrieve the information about all available projects, use the `tidbcloud_pro Now, you can get all the available projects from the output. Copy one of the project IDs that you need. -## Create a TiDB Cloud Serverless cluster +## Create a {{{ .starter }}} cluster -You can create a TiDB Cloud Serverless cluster using the `tidbcloud_serverless_cluster` resource. - -The following example shows how to create a TiDB Cloud Serverless cluster. +You can create a {{{ .starter }}} cluster using the `tidbcloud_serverless_cluster` resource. 1. Create a directory for the cluster and enter it. -2. Create a `cluster.tf` file: +2. Create a `cluster.tf` file. + + The following is an example of the `cluster.tf` file: ``` terraform { @@ -157,10 +157,9 @@ The following example shows how to create a TiDB Cloud Serverless cluster. Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the TiDB Cloud Serverless cluster resource, set the resource type as `tidbcloud_serverless_cluster`. + - To use the `tidbcloud_serverless_cluster` resource, set the resource type as `tidbcloud_serverless_cluster`. - For the resource name, you can define it as needed. For example, `example`. - - For the resource details, you can configure them according to the Project ID and the TiDB Cloud Serverless cluster specification information. - - To get the TiDB Cloud Serverless cluster specification information, see [tidbcloud_serverless_cluster (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/serverless_cluster). + - For resource details, you can configure them according to the Project ID and the [`tidbcloud_serverless_cluster` specification](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/serverless_cluster). 3. Run the `terraform apply` command. It is not recommended to use `terraform apply --auto-approve` when you apply a resource. @@ -289,16 +288,16 @@ The following example shows how to create a TiDB Cloud Serverless cluster. } ``` -## Modify a TiDB Cloud Serverless cluster +## Modify a {{{ .starter }}} cluster -For a TiDB Cloud Serverless cluster, you can use Terraform to manage resources. The arguments that you can modify include: +For a {{{ .starter }}} cluster, you can use Terraform to manage resources. The arguments that you can modify include: - `display_name`: The display name of the cluster. - `spending_limit`: The spending limit of the cluster. - `endpoints.public.disabled`: Whether to disable the public endpoint. - `automated_backup_policy.start_time`: The UTC time of day in `HH:mm` format when the automated backup starts. -To modify a TiDB Cloud Serverless cluster, you can modify the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to apply the changes. For example, you can modify the `display_name` and `spending_limit` as follows: +To modify a {{{ .starter }}} cluster, you can modify the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to apply the changes. For example, you can modify the `display_name` and `spending_limit` as follows: ``` resource "tidbcloud_serverless_cluster" "example" { @@ -418,13 +417,11 @@ resource "tidbcloud_serverless_cluster" "example" { } ``` -## Import a TiDB Cloud Serverless cluster - -For a TiDB Cloud Serverless cluster that is not managed by Terraform, you can use Terraform to manage it just by importing it. +## Import a {{{ .starter }}} cluster -Import a TiDB Cloud Serverless cluster that is not created by Terraform as follows: +For a {{{ .starter }}} cluster that is not managed by Terraform, you can bring it under Terraform management by importing it. -1. Add an import block for the new TiDB Cloud Serverless cluster resource. +1. Add an import block for the new `tidbcloud_serverless_cluster` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the cluster ID: @@ -437,7 +434,7 @@ Import a TiDB Cloud Serverless cluster that is not created by Terraform as follo 2. Generate the new configuration file. - Generate the new configuration file for the new TiDB Cloud Serverless cluster resource according to the import block: + Generate the new configuration file for the new `tidbcloud_serverless_cluster` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf @@ -460,9 +457,9 @@ Import a TiDB Cloud Serverless cluster that is not created by Terraform as follo Now you can manage the imported cluster with Terraform. -## Delete a TiDB Cloud Serverless cluster +## Delete a {{{ .starter }}} cluster -To delete a TiDB Cloud Serverless cluster, you can delete the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to destroy the resource: +To delete a {{{ .starter }}} cluster, you can delete the configuration of the `tidbcloud_serverless_cluster` resource, then use the `terraform apply` command to destroy the resource: ```shell $ terraform apply @@ -542,7 +539,7 @@ tidbcloud_serverless_cluster.example: Destruction complete after 1s Apply complete! Resources: 0 added, 0 changed, 1 destroyed. ``` -Now, if you run the `terraform show` command, it will show no managed resources because the resource has been cleared: +Now, if you run the `terraform show` command, it will show no managed resources because the resource has been destroyed: ``` $ terraform show diff --git a/tidb-cloud/terraform-use-serverless-export-resource.md b/tidb-cloud/terraform-use-serverless-export-resource.md index 2c2f382a14273..6dc11c96d285f 100644 --- a/tidb-cloud/terraform-use-serverless-export-resource.md +++ b/tidb-cloud/terraform-use-serverless-export-resource.md @@ -1,36 +1,36 @@ --- -title: Use TiDB Cloud Serverless Export Resource -summary: Learn how to use the TiDB Cloud Serverless export resource to create and modify a TiDB Cloud Serverless export task. +title: Use `tidbcloud_serverless_export` Resource +summary: Learn how to use the `tidbcloud_serverless_export` resource to create and modify data export tasks for {{{ .starter }}} or {{{ .essential }}} clusters. --- -# Use TiDB Cloud Serverless Export Resource +# Use `tidbcloud_serverless_export` Resource -This document describes how to manage a [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) data export task using the `tidbcloud_serverless_export` resource. +This document describes how to manage data export tasks for {{{ .starter }}} or {{{ .essential }}} clusters using the `tidbcloud_serverless_export` resource. The features of the `tidbcloud_serverless_export` resource include the following: -- Create TiDB Cloud Serverless data export tasks. -- Import TiDB Cloud Serverless data export tasks. -- Delete TiDB Cloud Serverless data export tasks. +- Create data export tasks for {{{ .starter }}} or {{{ .essential }}} clusters. +- Import data export tasks for {{{ .starter }}} or {{{ .essential }}} clusters. +- Delete data export tasks for {{{ .starter }}} or {{{ .essential }}} clusters. > **Note:** > -> TiDB Cloud Serverless export resource cannot be modified. If you want to change the configuration of a TiDB Cloud Serverless export resource, you need to delete the existing one, and then create a new one. +> The `tidbcloud_serverless_export` resource cannot be modified. If you want to change the configuration of the `tidbcloud_serverless_export` resource, you need to delete the existing one, and then create a new one. ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) v0.4.0 or later. -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +- [Create a {{{ .starter }}} or {{{ .essential }}} cluster](/tidb-cloud/create-tidb-cluster-serverless.md). -## Create a TiDB Cloud Serverless data export task +## Create a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster -You can create a TiDB Cloud Serverless data export task using the `tidbcloud_serverless_export` resource. - -The following example shows how to create a TiDB Cloud Serverless data export task. +You can create a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster using the `tidbcloud_serverless_export` resource. 1. Create a directory for the export and enter it. -2. Create a `export.tf` file: +2. Create a `export.tf` file for the data export task. + + The following is an example of the `export.tf` file: ``` terraform { @@ -142,13 +142,11 @@ The following example shows how to create a TiDB Cloud Serverless data export ta } ``` -## Import a TiDB Cloud Serverless data export task - -For a TiDB Serverless data export task that is not managed by Terraform, you can use Terraform to manage it just by importing it. +## Import a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster -Import a TiDB Cloud Serverless data export task that is not created by Terraform as follows: +If a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster is not managed by Terraform, you can bring it under Terraform management by importing it. -1. Add an import block for the new TiDB Cloud Serverless export resource. +1. Add an import block for the new `tidbcloud_serverless_export` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,export_id`: @@ -184,9 +182,9 @@ Import a TiDB Cloud Serverless data export task that is not created by Terraform Now you can manage the imported export with Terraform. -## Delete a TiDB Cloud Serverless data export task +## Delete a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster -To delete a TiDB Cloud Serverless data export task, you can delete the configuration of the `tidbcloud_serverless_export` resource, then use the `terraform apply` command to destroy the resource: +To delete a data export task for a {{{ .starter }}} or {{{ .essential }}} cluster, you can delete the configuration of the `tidbcloud_serverless_export` resource, then use the `terraform apply` command to destroy the resource: ```shell $ terraform apply diff --git a/tidb-cloud/terraform-use-sql-user-resource.md b/tidb-cloud/terraform-use-sql-user-resource.md index 8a8103189c7a9..4cfea3afeaba8 100644 --- a/tidb-cloud/terraform-use-sql-user-resource.md +++ b/tidb-cloud/terraform-use-sql-user-resource.md @@ -1,9 +1,9 @@ --- -title: Use SQL User Resource -summary: Learn how to use the SQL user resource to create and modify a TiDB Cloud SQL user. +title: Use the `tidbcloud_sql_user` Resource +summary: Learn how to use the `tidbcloud_sql_user` resource to create and modify a TiDB Cloud SQL user. --- -# Use SQL User Resource +# Use the `tidbcloud_sql_user` Resource This document describes how to manage TiDB Cloud SQL users using the `tidbcloud_sql_user` resource. @@ -17,7 +17,9 @@ The features of the `tidbcloud_sql_user` resource include the following: ## Prerequisites - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) v0.4.0 or later. -- [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md) or [a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md). +- Refer to one of the following documents to create a TiDB Cloud cluster: + - [Create a {{{ .starter }}} or Essential cluster](/tidb-cloud/create-tidb-cluster-serverless.md) + - [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). ## Create a SQL user @@ -53,9 +55,9 @@ The following example shows how to create a TiDB Cloud SQL user. Use the `resource` block to define the resource of TiDB Cloud, including the resource type, resource name, and resource details. - - To use the SQL user resource, set the resource type as `tidbcloud_sql_user`. + - To use the `tidbcloud_sql_user` resource, set the resource type as `tidbcloud_sql_user`. - For the resource name, you can define it as needed. For example, `example`. - - For SQL users in the TiDB Cloud Serverless cluster, the `user_name` and builtin role `role_readonly` and `role_readwrite` must start with the user prefix, you can get the user prefix by running the `tidbcloud_serverless_cluster` data source. + - For SQL users in the {{{ .starter }}} or {{{ .essential }}} cluster, the `user_name` and builtin role `role_readonly` and `role_readwrite` must start with the user prefix, you can get the user prefix by running the `tidbcloud_serverless_cluster` data source. - To get the SQL user specification information, see [`tidbcloud_sql_user` (Resource)](https://registry.terraform.io/providers/tidbcloud/tidbcloud/latest/docs/resources/sql_user). 3. Run the `terraform apply` command. It is not recommended to use `terraform apply --auto-approve` when you apply a resource. @@ -201,7 +203,7 @@ For a TiDB Cloud SQL user that is not managed by Terraform, you can use Terrafor For example, you can import a SQL user that is not created by Terraform as follows: -1. Add an import block for the new SQL user resource +1. Add an import block for the new `tidbcloud_sql_user` resource. Add the following import block to your `.tf` file, replace `example` with a desired resource name, and replace `${id}` with the format of `cluster_id,user_name`: @@ -212,9 +214,9 @@ For example, you can import a SQL user that is not created by Terraform as follo } ``` -2. Generate the new configuration file +2. Generate the new configuration file. - Generate the new configuration file for the new SQL user resource according to the import block: + Generate the new configuration file for the new `tidbcloud_sql_user` resource according to the import block: ```shell terraform plan -generate-config-out=generated.tf @@ -224,7 +226,7 @@ For example, you can import a SQL user that is not created by Terraform as follo Then the `generated.tf` file is created in the current directory, which contains the configuration of the imported resource. But the provider will throw an error because the required argument `password` is not set. You can replace the value of `password` argument to the `tidbcloud_sql_user` resource in the generated configuration file. -3. Review and apply the generated configuration +3. Review and apply the generated configuration. Review the generated configuration file to ensure that it meets your needs. Optionally, you can move the contents of this file to your preferred location. diff --git a/tidb-cloud/third-party-monitoring-integrations.md b/tidb-cloud/third-party-monitoring-integrations.md index daf7b3eca11d8..e55ed7ed9cca9 100644 --- a/tidb-cloud/third-party-monitoring-integrations.md +++ b/tidb-cloud/third-party-monitoring-integrations.md @@ -1,44 +1,29 @@ --- -title: Third-Party Metrics Integrations(Beta) +title: Third-Party Metrics Integrations summary: Learn how to use third-party metrics integrations. --- -# Third-Party Metrics Integrations(Beta) +# Third-Party Metrics Integrations -You can integrate TiDB Cloud with third-party metrics services to receive TiDB Cloud alerts and view the performance metrics of your TiDB cluster using the metrics services. The third-party metrics integrations are currently in beta. +You can integrate TiDB Cloud with the following third-party metrics services to receive TiDB Cloud alerts and view the performance metrics of your TiDB cluster in these services: -## Required access +- [Datadog integration](#datadog-integration) +- [Prometheus and Grafana integration](#prometheus-and-grafana-integration) +- [New Relic integration](#new-relic-integration) -To edit third-party integration settings, you must be in the `Organization Owner` role of your organization or the `Project Owner` role of the target project. - -## View or modify third-party integrations - -1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target project using the combo box in the upper-left corner. -2. In the left navigation pane, click **Project Settings** > **Integrations**. - -The available third-party integrations are displayed. - -## Limitation - -- For [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, third-party metrics integrations are not supported. - -- Third-party metrics integrations are not available when the cluster status is **CREATING**, **RESTORING**, **PAUSED**, or **RESUMING**. - -## Available integrations - -### Datadog integration (beta) +## Datadog integration With the Datadog integration, you can configure TiDB Cloud to send metric data about your TiDB clusters to [Datadog](https://www.datadoghq.com/) and view these metrics in your Datadog dashboards. For the detailed integration steps and a list of metrics that Datadog tracks, refer to [Integrate TiDB Cloud with Datadog](/tidb-cloud/monitor-datadog-integration.md). -### Prometheus and Grafana integration (beta) +## Prometheus and Grafana integration With the Prometheus and Grafana integration, you can get a `scrape_config` file for Prometheus from TiDB Cloud and use the content from the file to configure Prometheus. You can view these metrics in your Grafana dashboards. For the detailed integration steps and a list of metrics that Prometheus tracks, see [Integrate TiDB Cloud with Prometheus and Grafana](/tidb-cloud/monitor-prometheus-and-grafana-integration.md). -### New Relic integration (beta) +## New Relic integration With the New Relic integration, you can configure TiDB Cloud to send metric data about your TiDB clusters to [New Relic](https://newrelic.com/) and view these metrics in your New Relic dashboards. diff --git a/tidb-cloud/ticloud-ai.md b/tidb-cloud/ticloud-ai.md deleted file mode 100644 index 7f2a6b705c498..0000000000000 --- a/tidb-cloud/ticloud-ai.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: ticloud ai -summary: The reference of `ticloud ai`. ---- - -# ticloud ai - -Chat with TiDB Bot: - -```shell -ticloud ai [flags] -``` - -## Examples - -Chat with TiDB Bot in interactive mode: - -```shell -ticloud ai -``` - -Chat with TiDB Bot in non-interactive mode: - -```shell -ticloud ai -q "How to create a cluster?" -``` - -## Flags - -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - -| Flag | Description | Required | Note | -|--------------------|-----------------------------------|----------|------------------------------------------------------| -| -q, --query string | Specifies your query to TiDB Bot. | Yes | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | - -## Inherited flags - -| Flag | Description | Required | Note | -|----------------------|--------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | - -## Feedback - -If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-auditlog-config.md b/tidb-cloud/ticloud-auditlog-config.md deleted file mode 100644 index dbf6f68ff2c76..0000000000000 --- a/tidb-cloud/ticloud-auditlog-config.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: ticloud serverless audit-log config -summary: The reference of `ticloud serverless audit-log config`. ---- - -# ticloud serverless audit-log config - -Configure the database audit logging for a TiDB Cloud Serverless cluster. - -```shell -ticloud serverless audit-log config [flags] -``` - -## Examples - -Configure the database audit logging in interactive mode: - -```shell -ticloud serverless audit-log config -``` - -Enable the database audit logging in non-interactive mode: - -```shell -ticloud serverless audit-log config -c --enabled -``` - -Disable the database audit logging in non-interactive mode: - -```shell -ticloud serverless audit-log config -c --enabled=false -``` - -Unredact the database audit logging in non-interactive mode: - -```shell -ticloud serverless audit-log config -c --unredacted -``` - -## Flags - -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - -| Flag | Description | Required | Note | -|-------------------------|-----------------------------------------------------------------------------|----------|------------------------------------------------------| -| -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | -| --enabled | Enable or disable the database audit logging. | No | Only works in non-interactive mode. | -| --unredacted | Enable or disable data redaction in audit logs. | No | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | - -## Inherited flags - -| Flag | Description | Required | Note | -|----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | - -## Feedback - -If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-auditlog-download.md b/tidb-cloud/ticloud-auditlog-download.md deleted file mode 100644 index e1eaf4c0e3b59..0000000000000 --- a/tidb-cloud/ticloud-auditlog-download.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: ticloud serverless audit-log download -summary: The reference of `ticloud serverless audit-log download`. ---- - -# ticloud serverless audit-log download - -Download the database audit logs from a TiDB Cloud Serverless cluster. - -```shell -ticloud serverless audit-log download [flags] -``` - -## Examples - -Download the database audit logs in interactive mode: - -```shell -ticloud serverless audit-log download -``` - -Download the database audit logs in non-interactive mode: - -```shell -ticloud serverless audit-log download -c --start-date --end-date -``` - -## Flags - -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - -| Flag | Description | Required | Note | -|-------------------------|---------------------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------| -| -c, --cluster-id string | Cluster ID. | Yes | Only works in non-interactive mode. | -| --start-date string | The start date of the audit log you want to download in the format of `YYYY-MM-DD`, for example `2025-01-01`. | Yes | Only works in non-interactive mode. | -| --end-date string | The end date of the audit log you want to download in the format of `YYYY-MM-DD`, for example `2025-01-01`. | Yes | Only works in non-interactive mode. | -| --output-path string | The path where you want to download the audit logs. If not specified, logs are downloaded to the current directory. | No | Only works in non-interactive mode. | -| --concurrency int | Download concurrency (`3` by default). | No | Works in both non-interactive and interactive modes. | -| --force | Download without confirmation. | No | Works in both non-interactive and interactive modes. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | - -## Inherited flags - -| Flag | Description | Required | Note | -|----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | - -## Feedback - -If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-branch-create.md b/tidb-cloud/ticloud-branch-create.md index 5caca96a9f52d..6c0bf7ca5707e 100644 --- a/tidb-cloud/ticloud-branch-create.md +++ b/tidb-cloud/ticloud-branch-create.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless branch create`. # ticloud serverless branch create -Create a [branch](/tidb-cloud/branch-overview.md) for a TiDB Cloud Serverless cluster: +Create a [branch](/tidb-cloud/branch-overview.md) for a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless branch create [flags] @@ -13,13 +13,13 @@ ticloud serverless branch create [flags] ## Examples -Create a branch for a TiDB Cloud Serverless cluster in interactive mode: +Create a branch for a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless branch create ``` -Create a branch for a TiDB Cloud Serverless cluster in non-interactive mode: +Create a branch for a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless branch create --cluster-id --display-name diff --git a/tidb-cloud/ticloud-branch-delete.md b/tidb-cloud/ticloud-branch-delete.md index b11209b7f9293..d1ad6e4b19c8e 100644 --- a/tidb-cloud/ticloud-branch-delete.md +++ b/tidb-cloud/ticloud-branch-delete.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless branch delete`. # ticloud serverless branch delete -Delete a branch from your TiDB Cloud Serverless cluster: +Delete a branch from your {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless branch delete [flags] @@ -19,13 +19,13 @@ ticloud serverless branch rm [flags] ## Examples -Delete a TiDB Cloud Serverless branch in interactive mode: +Delete a branch in interactive mode: ```shell ticloud serverless branch delete ``` -Delete a TiDB Cloud Serverless branch in non-interactive mode: +Delete a branch in non-interactive mode: ```shell ticloud branch delete --branch-id --cluster-id diff --git a/tidb-cloud/ticloud-branch-describe.md b/tidb-cloud/ticloud-branch-describe.md index 9074460c63fac..911efdd04c349 100644 --- a/tidb-cloud/ticloud-branch-describe.md +++ b/tidb-cloud/ticloud-branch-describe.md @@ -19,13 +19,13 @@ ticloud serverless branch get [flags] ## Examples -Get branch information of a TiDB Cloud Serverless cluster in interactive mode: +Get branch information of a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless branch describe ``` -Get branch information of a TiDB Cloud Serverless cluster in non-interactive mode: +Get branch information of a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless branch describe --branch-id --cluster-id diff --git a/tidb-cloud/ticloud-branch-list.md b/tidb-cloud/ticloud-branch-list.md index 805ffa5a7d574..fbf7d462a8a0f 100644 --- a/tidb-cloud/ticloud-branch-list.md +++ b/tidb-cloud/ticloud-branch-list.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless branch list`. # ticloud serverless branch list -List all branches for a TiDB Cloud Serverless cluster: +List all branches for a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless branch list [flags] @@ -19,19 +19,19 @@ ticloud serverless branch ls [flags] ## Examples -List all branches for a TiDB Cloud Serverless cluster in interactive mode: +List all branches for a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless branch list ``` -List all branches for a specific TiDB Cloud Serverless cluster in non-interactive mode: +List all branches for a specific {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless branch list -c ``` -List all branches for a specific TiDB Cloud Serverless cluster in the JSON format: +List all branches for a specific {{{ .starter }}} or {{{ .essential }}} cluster in the JSON format: ```shell ticloud serverless branch list -o json diff --git a/tidb-cloud/ticloud-branch-shell.md b/tidb-cloud/ticloud-branch-shell.md index 835cd99f440fe..dd1dfb0255c8c 100644 --- a/tidb-cloud/ticloud-branch-shell.md +++ b/tidb-cloud/ticloud-branch-shell.md @@ -6,7 +6,7 @@ aliases: ['/tidbcloud/ticloud-connect'] # ticloud serverless branch shell -Connect to a branch of a TiDB Cloud Serverless cluster: +Connect to a branch of a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless branch shell [flags] @@ -14,25 +14,25 @@ ticloud serverless branch shell [flags] ## Examples -Connect to a TiDB Cloud Serverless branch in interactive mode: +Connect to a branch in interactive mode: ```shell ticloud serverless branch shell ``` -Connect to a TiDB Cloud Serverless branch with the default user in non-interactive mode: +Connect to a branch with the default user in non-interactive mode: ```shell ticloud serverless branch shell -c -b ``` -Connect to a TiDB Cloud Serverless branch with the default user and password in non-interactive mode: +Connect to a branch with the default user and password in non-interactive mode: ```shell ticloud serverless branch shell -c -b --password ``` -Connect to a TiDB Cloud Serverless branch with a specific user and password in non-interactive mode: +Connect to a branch with a specific user and password in non-interactive mode: ```shell ticloud serverless branch shell -c -b -u --password diff --git a/tidb-cloud/ticloud-cluster-create.md b/tidb-cloud/ticloud-cluster-create.md index d295836efe330..78c6718cabb4a 100644 --- a/tidb-cloud/ticloud-cluster-create.md +++ b/tidb-cloud/ticloud-cluster-create.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless create`. # ticloud serverless create -Create a TiDB Cloud Serverless cluster: +Create a TiDB Cloud cluster: ```shell ticloud serverless create [flags] @@ -13,23 +13,29 @@ ticloud serverless create [flags] ## Examples -Create a TiDB Cloud Serverless cluster in interactive mode: +Create a TiDB Cloud cluster in interactive mode: ```shell ticloud serverless create ``` -Create a TiDB Cloud Serverless cluster in non-interactive mode: +Create a TiDB Cloud cluster in non-interactive mode: ```shell ticloud serverless create --display-name --region ``` -Create a TiDB Cloud Serverless cluster with a spending limit in non-interactive mode: +Create a {{{ .starter }}} cluster with a spending limit in non-interactive mode: ```shell ticloud serverless create --display-name --region --spending-limit-monthly -``` +``` + +Create a {{{ .essential }}} cluster in non-interactive mode: + +```shell +ticloud serverless create --display-name --region --max-rcu --min-rcu +``` ## Flags @@ -40,9 +46,11 @@ In non-interactive mode, you need to manually enter the required flags. In inter | -n --display-name string | Specifies the name of the cluster to be created. | Yes | Only works in non-interactive mode. | | --spending-limit-monthly int | Specifies the maximum monthly spending limit in USD cents. | No | Only works in non-interactive mode. | | -p, --project-id string | Specifies the ID of the project, in which the cluster will be created. The default value is `default project`. | No | Only works in non-interactive mode. | -| -r, --region string | Specifies the name of cloud region. You can use "ticloud serverless region" to see all regions. | Yes | Only works in non-interactive mode. | -| --disable-public-endpoint | Disables the public endpoint. | No | Only works in non-interactive mode. | -| --encryption | Enables enhanced encryption at rest. | No | Only works in non-interactive mode. | +| -r, --region string | Specifies the name of the cloud region. You can view all available regions using the `ticloud serverless region` command. | Yes | Only works in non-interactive mode. | +| --disable-public-endpoint | Disables the public endpoint. Use this option if you want to prevent public access to the cluster. | No | Only works in non-interactive mode. | +| --encryption | Enables dual-layer data encryption. It is enabled by default for {{{ .essential }}} clusters, and disabled by default for {{{ .starter }}} clusters. | No | Only works in non-interactive mode. | +| --max-rcu int32 | Sets the maximum Request Capacity Units (RCUs) for the {{{ .essential }}} cluster, up to 100000. | No | Only works in non-interactive mode. | +| --min-rcu int32 | Sets the minimum Request Capacity Units (RCUs) for the {{{ .essential }}} cluster, at least 2000. | No | Only works in non-interactive mode. | | -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes | ## Inherited flags diff --git a/tidb-cloud/ticloud-cluster-delete.md b/tidb-cloud/ticloud-cluster-delete.md index 3bbb3342cdd77..eea820c386fd1 100644 --- a/tidb-cloud/ticloud-cluster-delete.md +++ b/tidb-cloud/ticloud-cluster-delete.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless delete`. # ticloud serverless delete -Delete a TiDB Cloud Serverless cluster from your project: +Delete a {{{ .starter }}} or {{{ .essential }}} cluster from your project: ```shell ticloud serverless delete [flags] @@ -19,13 +19,13 @@ ticloud serverless rm [flags] ## Examples -Delete a TiDB Cloud Serverless cluster in interactive mode: +Delete a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless delete ``` -Delete a TiDB Cloud Serverless cluster in non-interactive mode: +Delete a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless delete --cluster-id diff --git a/tidb-cloud/ticloud-cluster-describe.md b/tidb-cloud/ticloud-cluster-describe.md index 7f23d0efdba06..064d382aaef71 100644 --- a/tidb-cloud/ticloud-cluster-describe.md +++ b/tidb-cloud/ticloud-cluster-describe.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless describe`. # ticloud serverless describe -Get information about a TiDB Cloud Serverless cluster (such as the cluster configurations and cluster status): +Get information about a {{{ .starter }}} or {{{ .essential }}} cluster (such as the cluster configurations and cluster status): ```shell ticloud serverless describe [flags] @@ -19,13 +19,13 @@ ticloud serverless get [flags] ## Examples -Get information about a TiDB Cloud Serverless cluster in interactive mode: +Get information about a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless describe ``` -Get information about a TiDB Cloud Serverless cluster in non-interactive mode: +Get information about a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless describe --cluster-id diff --git a/tidb-cloud/ticloud-cluster-list.md b/tidb-cloud/ticloud-cluster-list.md index 50092432c309c..01369bd28502e 100644 --- a/tidb-cloud/ticloud-cluster-list.md +++ b/tidb-cloud/ticloud-cluster-list.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless list`. # ticloud serverless list -List all TiDB Cloud Serverless clusters in a project: +List all {{{ .starter }}} and {{{ .essential }}} clusters in a project: ```shell ticloud serverless list [flags] @@ -19,19 +19,19 @@ ticloud serverless ls [flags] ## Examples -List all TiDB Cloud Serverless clusters in interactive mode: +List all {{{ .starter }}} and {{{ .essential }}} clusters in interactive mode: ```shell ticloud serverless list ``` -List all TiDB Cloud Serverless clusters in a specified project in non-interactive mode: +List all {{{ .starter }}} and {{{ .essential }}} clusters in a specified project in non-interactive mode: ```shell ticloud serverless list -p ``` -List all TiDB Cloud Serverless clusters in a specified project with the JSON format in non-interactive mode: +List all {{{ .starter }}} and {{{ .essential }}} clusters in a specified project with the JSON format in non-interactive mode: ```shell ticloud serverless list -p -o json diff --git a/tidb-cloud/ticloud-auditlog-describe.md b/tidb-cloud/ticloud-serverless-audit-log-config-describe.md similarity index 54% rename from tidb-cloud/ticloud-auditlog-describe.md rename to tidb-cloud/ticloud-serverless-audit-log-config-describe.md index 0ce320858ebf4..5855a29bc45fb 100644 --- a/tidb-cloud/ticloud-auditlog-describe.md +++ b/tidb-cloud/ticloud-serverless-audit-log-config-describe.md @@ -1,20 +1,14 @@ --- -title: ticloud serverless audit-log describe -summary: The reference of `ticloud serverless audit-log describe`. +title: ticloud serverless audit-log config describe +summary: The reference of `ticloud serverless audit-log config describe`. --- -# ticloud serverless audit-log describe +# ticloud serverless audit-log config describe -Describe the database audit logging configuration for a TiDB Cloud Serverless cluster. +Describe the database audit logging configuration for a {{{ .essential }}} cluster. ```shell -ticloud serverless audit-log describe [flags] -``` - -Or use the following alias command: - -```shell -ticloud serverless audit-log get [flags] +ticloud serverless audit-log config describe [flags] ``` ## Examples @@ -22,31 +16,30 @@ ticloud serverless audit-log get [flags] Get the database audit logging configuration in interactive mode: ```shell -ticloud serverless audit-log describe +ticloud serverless audit-log config describe ``` Get the database audit logging configuration in non-interactive mode: ```shell -ticloud serverless audit-log describe -c +ticloud serverless audit-log config describe -c ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|----------------------------|----------|------------------------------------------------------| | -c, --cluster-id string | The cluster ID. | Yes | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback -If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. \ No newline at end of file +If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-serverless-audit-log-config-update.md b/tidb-cloud/ticloud-serverless-audit-log-config-update.md new file mode 100644 index 0000000000000..9725f78d29b1c --- /dev/null +++ b/tidb-cloud/ticloud-serverless-audit-log-config-update.md @@ -0,0 +1,79 @@ +--- +title: ticloud serverless audit-log config update +summary: The reference of `ticloud serverless audit-log config update`. +--- + +# ticloud serverless audit-log config update + +Update the database audit logging configuration for a {{{ .essential }}} cluster. + +```shell +ticloud serverless audit-log config update [flags] +``` + +## Examples + +Configure database audit logging in interactive mode: + +```shell +ticloud serverless audit-log config update +``` + +Unredact the database audit log in non-interactive mode: + +```shell +ticloud serverless audit-log config update -c --unredacted +``` + +Enable database audit logging with Amazon S3 storage in non-interactive mode: + +```shell +ticloud serverless audit-log config update -c --enabled --cloud-storage S3 --s3.uri --s3.access-key-id --s3.secret-access-key +``` + +Configure database audit logging rotation strategy in non-interactive mode: + +```shell +ticloud serverless audit-log config update -c --rotation-interval-minutes --rotation-size-mib +``` + +Disable database audit logging in non-interactive mode: + +```shell +ticloud serverless audit-log config update -c --enabled=false +``` + +## Flags + +| Flag | Description | Required | Note | +|------|-------------|----------|------| +| --azblob.sas-token string | The SAS token of Azure Blob Storage. | No | Only works in non-interactive mode. | +| --azblob.uri string | The Azure Blob Storage URI in `azure://.blob.core.windows.net//` format. | No | Only works in non-interactive mode. | +| --cloud-storage string | The cloud storage provider. Valid options: `"TIDB_CLOUD"`, `"S3"`, `"GCS"`, `"AZURE_BLOB"`, and `"OSS"`. | No | Only works in non-interactive mode. | +| -c, --cluster-id string | The ID of the cluster to be updated. | Yes | Only works in non-interactive mode. | +| --enabled | Enables or disables database audit logging. | No | Only works in non-interactive mode. | +| --gcs.service-account-key string | The Base64-encoded service account key of Google Cloud Storage. | No | Only works in non-interactive mode. | +| --gcs.uri string | The Google Cloud Storage URI in `gs:///` format. | No | Only works in non-interactive mode. | +| --oss.access-key-id string | The access key ID of Alibaba Cloud Object Storage Service (OSS). | No | Only works in non-interactive mode. | +| --oss.access-key-secret string | The access key secret of Alibaba Cloud OSS. | No | Only works in non-interactive mode. | +| --oss.uri string | The Alibaba Cloud OSS URI in `oss:///` format. | No | Only works in non-interactive mode. | +| --rotation-interval-minutes int32 | The rotation interval in minutes. Valid range: `[10, 1440]`. | No | Only works in non-interactive mode. | +| --rotation-size-mib int32 | The rotation size in MiB. Valid range: `[1, 1024]`. | No | Only works in non-interactive mode. | +| --s3.access-key-id string | The access key ID of Amazon S3. You only need to set either `--s3.role-arn` or both `--s3.access-key-id` and `--s3.secret-access-key`. | No | Only works in non-interactive mode. | +| --s3.role-arn string | The role ARN of Amazon S3. You only need to set either `--s3.role-arn` or both `--s3.access-key-id` and `--s3.secret-access-key`. | No | Only works in non-interactive mode. | +| --s3.secret-access-key string | The secret access key of Amazon S3. You only need to set either `--s3.role-arn` or both `--s3.access-key-id` and `--s3.secret-access-key`. | No | Only works in non-interactive mode. | +| --s3.uri string | The Amazon S3 URI in `s3:///` format. | No | Only works in non-interactive mode. | +| --unredacted | Unredacts or redacts the database audit log. | No | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | + +## Inherited flags + +| Flag | Description | Required | Note | +|------|-------------|----------|------| +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | + +## Feedback + +If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-serverless-audit-log-download.md b/tidb-cloud/ticloud-serverless-audit-log-download.md new file mode 100644 index 0000000000000..b278065120691 --- /dev/null +++ b/tidb-cloud/ticloud-serverless-audit-log-download.md @@ -0,0 +1,50 @@ +--- +title: ticloud serverless audit-log download +summary: The reference of `ticloud serverless audit-log download`. +--- + +# ticloud serverless audit-log download + +Download the database audit log files from a {{{ .essential }}} cluster. + +```shell +ticloud serverless audit-log download [flags] +``` + +## Examples + +Download the database audit logs in interactive mode: + +```shell +ticloud serverless audit-log download +``` + +Download the database audit logs in non-interactive mode: + +```shell +ticloud serverless audit-log download -c --start-date --end-date +``` + +## Flags + +| Flag | Description | Required | Note | +|-------------------------|-----------------------------------------------------------------------------------------------|----------|------------------------------------------------------| +| -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | +| --start-date string | The start date of the audit log you want to download in the format of `YYYY-MM-DD`, for example, `2025-01-01`. | Yes | Only works in non-interactive mode. | +| --end-date string | The end date of the audit log you want to download in the format of `YYYY-MM-DD`, for example, `2025-01-01`. | Yes | Only works in non-interactive mode. | +| --output-path string | The path to download the audit logs. If not specified, logs are downloaded to the current directory. | No | Only works in non-interactive mode. | +| --concurrency int | The number of concurrent downloads. The default value is `3`. | No | Works in both interactive and non-interactive modes. | +| --force | Downloads audit logs without confirmation. | No | Works in both interactive and non-interactive modes. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | + +## Inherited flags + +| Flag | Description | Required | Note | +|----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | + +## Feedback + +If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-auditlog-filter-create.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-create.md similarity index 53% rename from tidb-cloud/ticloud-auditlog-filter-create.md rename to tidb-cloud/ticloud-serverless-audit-log-filter-rule-create.md index 35b5babaf816c..4a02ad5fc22da 100644 --- a/tidb-cloud/ticloud-auditlog-filter-create.md +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-create.md @@ -5,56 +5,48 @@ summary: The reference of `ticloud serverless audit-log filter-rule create`. # ticloud serverless audit-log filter-rule create -Create an audit log filter rule for a TiDB Cloud Serverless cluster. +Create an audit log filter rule for a {{{ .essential }}} cluster. ```shell ticloud serverless audit-log filter-rule create [flags] ``` -Or use the following alias command: - -```shell -ticloud serverless audit-log filter create [flags] -``` - ## Examples Create a filter rule in interactive mode: ```shell -ticloud serverless audit-log filter create +ticloud serverless audit-log filter-rule create ``` -Create a filter rule that captures all audit logs in non-interactive mode: +Create a filter rule to capture all audit logs in non-interactive mode: ```shell -ticloud serverless audit-log filter create --cluster-id --name --rule '{"users":["%@%"],"filters":[{}]}' +ticloud serverless audit-log filter-rule create --cluster-id --display-name --rule '{"users":["%@%"],"filters":[{}]}' ``` -Create a filter rule that filters `QUERY` and `EXECUTE` events for the `test.t` table and filters `QUERY` events for all tables in non-interactive mode: +Create a filter rule to capture `QUERY` and `EXECUTE` events for the `test.t` table, and `QUERY` events for all tables in non-interactive mode: ```shell -ticloud serverless audit-log filter create --cluster-id --name --rule '{"users":["%@%"],"filters":[{"classes":["QUERY","EXECUTE"],"tables":["test.t"]},{"classes":["QUERY"]}]}' +ticloud serverless audit-log filter-rule create --cluster-id --display-name --rule '{"users":["%@%"],"filters":[{"classes":["QUERY","EXECUTE"],"tables":["test.t"]},{"classes":["QUERY"]}]}' ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|-------------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------| | -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | -| --name string | The name of the filter rule. | Yes | Only works in non-interactive mode. | -| --rule string | Filter rule expressions. Use `ticloud serverless audit-log filter template` to see filter templates. | Yes | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| --display-name string | The display name of the filter rule. | Yes | Only works in non-interactive mode. | +| --rule string | Filter rule expressions. Use `ticloud serverless audit-log filter-rule template` to see filter templates. | Yes | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback diff --git a/tidb-cloud/ticloud-serverless-audit-log-filter-rule-delete.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-delete.md new file mode 100644 index 0000000000000..7f05bd9e70ddd --- /dev/null +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-delete.md @@ -0,0 +1,47 @@ +--- +title: ticloud serverless audit-log filter-rule delete +summary: The reference of `ticloud serverless audit-log filter-rule delete`. +--- + +# ticloud serverless audit-log filter-rule delete + +Delete an audit log filter rule for a {{{ .essential }}} cluster. + +```shell +ticloud serverless audit-log filter-rule delete [flags] +``` + +## Examples + +Delete an audit log filter rule in interactive mode: + +```shell +ticloud serverless audit-log filter-rule delete +``` + +Delete an audit log filter rule in non-interactive mode: + +```shell +ticloud serverless audit-log filter-rule delete --cluster-id --filter-rule-id +``` + +## Flags + +| Flag | Description | Required | Note | +|-------------------------|-----------------------------------------------------|----------|------------------------------------------------------| +| -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | +| --filter-rule-id string | The ID of the filter rule. | Yes | Only works in non-interactive mode. | +| --force | Deletes without confirmation. | No | Works in both interactive and non-interactive modes. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | + +## Inherited flags + +| Flag | Description | Required | Note | +|----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | + +## Feedback + +If you have any questions or suggestions on the TiDB Cloud CLI, feel free to create an [issue](https://github.com/tidbcloud/tidbcloud-cli/issues/new/choose). Also, we welcome any contributions. diff --git a/tidb-cloud/ticloud-auditlog-filter-describe.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-describe.md similarity index 57% rename from tidb-cloud/ticloud-auditlog-filter-describe.md rename to tidb-cloud/ticloud-serverless-audit-log-filter-rule-describe.md index c75c6dd6b1661..e2936f22f1799 100644 --- a/tidb-cloud/ticloud-auditlog-filter-describe.md +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-describe.md @@ -5,49 +5,41 @@ summary: The reference of `ticloud serverless audit-log filter-rule describe`. # ticloud serverless audit-log filter-rule describe -Describe an audit log filter rule for a TiDB Cloud Serverless cluster. +Describe an audit log filter rule for a {{{ .essential }}} cluster. ```shell ticloud serverless audit-log filter-rule describe [flags] ``` -Or use the following alias command: - -```shell -ticloud serverless audit-log filter describe [flags] -``` - ## Examples Describe an audit log filter rule in interactive mode: ```shell -ticloud serverless audit-log filter describe +ticloud serverless audit-log filter-rule describe ``` Describe an audit log filter rule in non-interactive mode: ```shell -ticloud serverless audit-log filter describe --cluster-id --name +ticloud serverless audit-log filter-rule describe --cluster-id --filter-rule-id ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|------------------------------|----------|------------------------------------------------------| | -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | -| --name string | The name of the filter rule. | Yes | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| --filter-rule-id string | The ID of the filter rule. | Yes | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback diff --git a/tidb-cloud/ticloud-auditlog-filter-list.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-list.md similarity index 54% rename from tidb-cloud/ticloud-auditlog-filter-list.md rename to tidb-cloud/ticloud-serverless-audit-log-filter-rule-list.md index ac11aa930831f..f9e2403255aea 100644 --- a/tidb-cloud/ticloud-auditlog-filter-list.md +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-list.md @@ -5,55 +5,47 @@ summary: The reference of `ticloud serverless audit-log filter-rule list`. # ticloud serverless audit-log filter-rule list -List audit log filter rules for a TiDB Cloud Serverless cluster. +List audit log filter rules for a {{{ .essential }}} cluster. ```shell ticloud serverless audit-log filter-rule list [flags] ``` -Or use the following alias command: - -```shell -ticloud serverless audit-log filter list [flags] -``` - ## Examples List all audit log filter rules in interactive mode: ```shell -ticloud serverless audit-log filter list +ticloud serverless audit-log filter-rule list ``` List all audit log filter rules in non-interactive mode: ```shell -ticloud serverless audit-log filter list -c +ticloud serverless audit-log filter-rule list -c ``` List all audit log filter rules with JSON format in non-interactive mode: ```shell -ticloud serverless audit-log filter list -c -o json +ticloud serverless audit-log filter-rule list -c -o json ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|---------------------------------------------------------------------------------------------------|----------|------------------------------------------------------| -| -c, --cluster-id string | The ID of the cluster whose audit log filter rules you want to list. | No | Only works in non-interactive mode. | -| -o, --output string | Specifies the output format (`human` by default). Valid values are `human` or `json`. To get a complete result, use the `json` format. | No | Works in both non-interactive and interactive modes. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| -c, --cluster-id string | The ID of the cluster. | No | Only works in non-interactive mode. | +| -o, --output string | Specifies the output format. Valid values are `human` (default) or `json`. For the complete result, use the `json` format. | No | Works in both interactive and non-interactive modes. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback diff --git a/tidb-cloud/ticloud-auditlog-filter-template.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-template.md similarity index 52% rename from tidb-cloud/ticloud-auditlog-filter-template.md rename to tidb-cloud/ticloud-serverless-audit-log-filter-rule-template.md index ffe67f612dbfa..ade5954bc4c40 100644 --- a/tidb-cloud/ticloud-auditlog-filter-template.md +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-template.md @@ -5,48 +5,40 @@ summary: The reference of `ticloud serverless audit-log filter-rule template`. # ticloud serverless audit-log filter-rule template -Show audit log filter rule templates for a TiDB Cloud Serverless cluster. +Show audit log filter rule templates for a {{{ .essential }}} cluster. ```shell ticloud serverless audit-log filter-rule template [flags] ``` -Or use the following alias command: - -```shell -ticloud serverless audit-log filter template [flags] -``` - ## Examples Show filter templates in interactive mode: ```shell -ticloud serverless audit-log filter template +ticloud serverless audit-log filter-rule template ``` Show filter templates in non-interactive mode: ```shell -ticloud serverless audit-log filter template --cluster-id +ticloud serverless audit-log filter-rule template --cluster-id ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|------------------------------|----------|------------------------------------------------------| -| -c, --cluster-id string | The ID of the cluster (optional, for context if templates might become cluster-specific). | No | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| -c, --cluster-id string | The ID of the cluster. | No | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback diff --git a/tidb-cloud/ticloud-auditlog-filter-update.md b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-update.md similarity index 53% rename from tidb-cloud/ticloud-auditlog-filter-update.md rename to tidb-cloud/ticloud-serverless-audit-log-filter-rule-update.md index ecf766fd5ada5..9b90de008a1e3 100644 --- a/tidb-cloud/ticloud-auditlog-filter-update.md +++ b/tidb-cloud/ticloud-serverless-audit-log-filter-rule-update.md @@ -5,63 +5,56 @@ summary: The reference of `ticloud serverless audit-log filter-rule update`. # ticloud serverless audit-log filter-rule update -Update an audit log filter rule for a TiDB Cloud Serverless cluster. +Update an audit log filter rule for a {{{ .essential }}} cluster. ```shell ticloud serverless audit-log filter-rule update [flags] ``` -Or use the following alias command: - -```shell -ticloud serverless audit-log filter update [flags] -``` - ## Examples Update an audit log filter rule in interactive mode: ```shell -ticloud serverless audit-log filter update +ticloud serverless audit-log filter-rule update ``` -Enable an audit log filter rule in non-interactive mode: +Enable audit log filter rule in non-interactive mode: ```shell -ticloud serverless audit-log filter update --cluster-id --name --enabled +ticloud serverless audit-log filter-rule update --cluster-id --filter-rule-id --enabled ``` -Disable an audit log filter rule in non-interactive mode: +Disable audit log filter rule in non-interactive mode: ```shell -ticloud serverless audit-log filter update --cluster-id --name --enabled=false +ticloud serverless audit-log filter-rule update --cluster-id --filter-rule-id --enabled=false ``` Update filters of an audit log filter rule in non-interactive mode: ```shell -ticloud serverless audit-log filter update --cluster-id --name --rule '{"users":["%@%"],"filters":[{"classes":["QUERY"],"tables":["test.t"]}]}' +ticloud serverless audit-log filter-rule update --cluster-id --filter-rule-id --rule '{"users":["%@%"],"filters":[{"classes":["QUERY"],"tables":["test.t"]}]}' ``` ## Flags -In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. - | Flag | Description | Required | Note | |-------------------------|-------------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------| | -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | -| --name string | The name of the filter rule to update. | Yes | Only works in non-interactive mode. | +| --display-name string | The display name of the filter rule. | No | Only works in non-interactive mode. | | --enabled | Enables or disables the filter rule. | No | Only works in non-interactive mode. | -| --rule string | The new, complete filter rule expression. Use `ticloud serverless audit-log filter template` to see filter templates. | No | Only works in non-interactive mode. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| --filter-rule-id string | The ID of the filter rule. | Yes | Only works in non-interactive mode. | +| --rule string | Complete filter rule expressions. Use [`ticloud serverless audit-log filter template`](/tidb-cloud/ticloud-serverless-audit-log-filter-rule-template.md) to see filter templates. | No | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both interactive and non-interactive modes. | ## Inherited flags | Flag | Description | Required | Note | |----------------------|------------------------------------------------------------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------| -| --no-color | Disables color in output. | No | Only works in non-interactive mode. In interactive mode, disabling color might not work with some UI components. | -| -P, --profile string | Specifies the active [user profile](/tidb-cloud/cli-reference.md#user-profile) used in this command. | No | Works in both non-interactive and interactive modes. | -| -D, --debug | Enables debug mode. | No | Works in both non-interactive and interactive modes. | +| -D, --debug | Enables debug mode. | No | Works in both interactive and non-interactive modes. | +| --no-color | Disables color output. | No | Only works in non-interactive mode. | +| -P, --profile string | Specifies the profile to use from your configuration file. | No | Works in both interactive and non-interactive modes. | ## Feedback diff --git a/tidb-cloud/ticloud-auditlog-filter-delete.md b/tidb-cloud/ticloud-serverless-capacity.md similarity index 58% rename from tidb-cloud/ticloud-auditlog-filter-delete.md rename to tidb-cloud/ticloud-serverless-capacity.md index 337ed339a4666..d80f2f9584a6a 100644 --- a/tidb-cloud/ticloud-auditlog-filter-delete.md +++ b/tidb-cloud/ticloud-serverless-capacity.md @@ -1,46 +1,40 @@ --- -title: ticloud serverless audit-log filter-rule delete -summary: The reference of `ticloud serverless audit-log filter-rule delete`. +title: ticloud serverless capacity +summary: The reference of `ticloud serverless capacity`. --- -# ticloud serverless audit-log filter-rule delete +# ticloud serverless capacity -Delete an audit log filter rule for a TiDB Cloud Serverless cluster. +Set the capacity, in terms of maximum and minimum Request Capacity Units (RCUs), for a TiDB Cloud cluster. ```shell -ticloud serverless audit-log filter-rule delete [flags] -``` - -Or use the following alias command: - -```shell -ticloud serverless audit-log filter delete [flags] +ticloud serverless capacity [flags] ``` ## Examples -Delete an audit log filter rule in interactive mode: +Set capacity for a TiDB Cloud cluster in interactive mode: ```shell -ticloud serverless audit-log filter delete + ticloud serverless capacity ``` -Delete an audit log filter rule in non-interactive mode: +Set capacity for a TiDB Cloud cluster in non-interactive mode: ```shell -ticloud serverless audit-log filter delete --cluster-id --name +ticloud serverless capacity -c --max-rcu --min-rcu ``` ## Flags In non-interactive mode, you need to manually enter the required flags. In interactive mode, you can just follow CLI prompts to fill them in. -| Flag | Description | Required | Note | -|-------------------------|-----------------------------------------------------|----------|------------------------------------------------------| -| -c, --cluster-id string | The ID of the cluster. | Yes | Only works in non-interactive mode. | -| --name string | The name of the filter rule. | Yes | Only works in non-interactive mode. | -| --force | Deletes the filter rule without confirmation. | No | Works in both non-interactive and interactive modes. | -| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | +| Flag | Description | Required | Note | +|-------------------------|----------------------------------------------|----------|------------------------------------------------------| +| -c, --cluster-id string | Specifies the ID of the cluster. | Yes | Only works in non-interactive mode. | +| --max-rcu int32 | Specifies the maximum Request Capacity Units (RCUs) for the cluster, up to 100000. | No | Only works in non-interactive mode. | +| --min-rcu int32 | Specifies the minimum Request Capacity Units (RCUs) for the cluster, at least 2000. | No | Only works in non-interactive mode. | +| -h, --help | Shows help information for this command. | No | Works in both non-interactive and interactive modes. | ## Inherited flags diff --git a/tidb-cloud/ticloud-serverless-export-create.md b/tidb-cloud/ticloud-serverless-export-create.md index 4fd835e827c4a..33c49a605a31b 100644 --- a/tidb-cloud/ticloud-serverless-export-create.md +++ b/tidb-cloud/ticloud-serverless-export-create.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless export create`. # ticloud serverless export create -Export data from a TiDB Cloud Serverless cluster: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless export create [flags] @@ -13,37 +13,37 @@ ticloud serverless export create [flags] ## Examples -Export data from a TiDB Cloud Serverless cluster in interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless export create ``` -Export data from a TiDB Cloud Serverless cluster to a local file in non-interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster to a local file in non-interactive mode: ```shell ticloud serverless export create -c --filter ``` -Export data from a TiDB Cloud Serverless cluster to Amazon S3 in non-interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster to Amazon S3 in non-interactive mode: ```shell ticloud serverless export create -c --s3.uri --s3.access-key-id --s3.secret-access-key --filter ``` -Export data from a TiDB Cloud Serverless cluster to Google Cloud Storage in non-interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster to Google Cloud Storage in non-interactive mode: ```shell ticloud serverless export create -c --gcs.uri --gcs.service-account-key --filter ``` -Export data from a TiDB Cloud Serverless cluster to Azure Blob Storage in non-interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster to Azure Blob Storage in non-interactive mode: ```shell ticloud serverless export create -c --azblob.uri --azblob.sas-token --filter ``` -Export data from a TiDB Cloud Serverless cluster to Alibaba Cloud OSS in non-interactive mode: +Export data from a {{{ .starter }}} or {{{ .essential }}} cluster to Alibaba Cloud OSS in non-interactive mode: ```shell ticloud serverless export create -c --oss.uri --oss.access-key-id --oss.access-key-secret --filter diff --git a/tidb-cloud/ticloud-serverless-export-describe.md b/tidb-cloud/ticloud-serverless-export-describe.md index ea9fd4ca7311c..f707af6a057fa 100644 --- a/tidb-cloud/ticloud-serverless-export-describe.md +++ b/tidb-cloud/ticloud-serverless-export-describe.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless export describe`. # ticloud serverless export describe -Get the export information of a TiDB Cloud Serverless cluster: +Get the export information of a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless export describe [flags] diff --git a/tidb-cloud/ticloud-serverless-export-download.md b/tidb-cloud/ticloud-serverless-export-download.md index bb1ee9b9dc5de..2b76002af71d9 100644 --- a/tidb-cloud/ticloud-serverless-export-download.md +++ b/tidb-cloud/ticloud-serverless-export-download.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless export download`. # ticloud serverless export download -Download the exported data from a TiDB Cloud Serverless cluster to your local storage: +Download the exported data from a {{{ .starter }}} or {{{ .essential }}} cluster to your local storage: ```shell ticloud serverless export download [flags] diff --git a/tidb-cloud/ticloud-serverless-export-list.md b/tidb-cloud/ticloud-serverless-export-list.md index 03a16afc5884f..c91b1d911ce8e 100644 --- a/tidb-cloud/ticloud-serverless-export-list.md +++ b/tidb-cloud/ticloud-serverless-export-list.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless export list`. # ticloud serverless export list -List data export tasks of TiDB Cloud Serverless clusters: +List data export tasks of {{{ .starter }}} and {{{ .essential }}} clusters: ```shell ticloud serverless export list [flags] diff --git a/tidb-cloud/ticloud-serverless-region.md b/tidb-cloud/ticloud-serverless-region.md index 6e78e5d87f524..aa0b26088b792 100644 --- a/tidb-cloud/ticloud-serverless-region.md +++ b/tidb-cloud/ticloud-serverless-region.md @@ -6,7 +6,7 @@ aliases: ['/tidbcloud/ticloud-serverless-regions'] # ticloud serverless region -List all available regions for TiDB Cloud Serverless: +List all available regions for {{{ .starter }}} and {{{ .essential }}}: ```shell ticloud serverless region [flags] @@ -14,13 +14,13 @@ ticloud serverless region [flags] ## Examples -List all available regions for TiDB Cloud Serverless: +List all available regions for {{{ .starter }}} and {{{ .essential }}}: ```shell ticloud serverless region ``` -List all available regions for TiDB Cloud Serverless clusters in the JSON format: +List all available regions for {{{ .starter }}} and {{{ .essential }}} in the JSON format: ```shell ticloud serverless region -o json diff --git a/tidb-cloud/ticloud-serverless-shell.md b/tidb-cloud/ticloud-serverless-shell.md index a3188b1cc84e6..9a4581ca25eda 100644 --- a/tidb-cloud/ticloud-serverless-shell.md +++ b/tidb-cloud/ticloud-serverless-shell.md @@ -6,7 +6,7 @@ aliases: ['/tidbcloud/ticloud-connect'] # ticloud serverless shell -Connect to a TiDB Cloud Serverless cluster: +Connect to a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless shell [flags] @@ -14,25 +14,25 @@ ticloud serverless shell [flags] ## Examples -Connect to a TiDB Cloud Serverless cluster in interactive mode: +Connect to a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless shell ``` -Connect to a TiDB Cloud Serverless cluster with the default user in non-interactive mode: +Connect to a {{{ .starter }}} or {{{ .essential }}} cluster with the default user in non-interactive mode: ```shell ticloud serverless shell -c ``` -Connect to a TiDB Cloud Serverless cluster with the default user and password in non-interactive mode: +Connect to a {{{ .starter }}} or {{{ .essential }}} cluster with the default user and password in non-interactive mode: ```shell ticloud serverless shell -c --password ``` -Connect to a TiDB Cloud Serverless cluster with a specific user and password in non-interactive mode: +Connect to a {{{ .starter }}} or {{{ .essential }}} cluster with a specific user and password in non-interactive mode: ```shell ticloud serverless shell -c -u --password diff --git a/tidb-cloud/ticloud-serverless-spending-limit.md b/tidb-cloud/ticloud-serverless-spending-limit.md index 5abca8a06666d..4ca0b362e094e 100644 --- a/tidb-cloud/ticloud-serverless-spending-limit.md +++ b/tidb-cloud/ticloud-serverless-spending-limit.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless spending-limit`. # ticloud serverless spending-limit -Set the maximum monthly [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for a TiDB Cloud Serverless cluster: +Set the maximum monthly [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) for a {{{ .starter }}} cluster: ```shell ticloud serverless spending-limit [flags] @@ -13,13 +13,13 @@ ticloud serverless spending-limit [flags] ## Examples -Set the spending limit for a TiDB Cloud Serverless cluster in interactive mode: +Set the spending limit for a {{{ .starter }}} cluster in interactive mode: ```shell ticloud serverless spending-limit ``` -Set the spending limit for a TiDB Cloud Serverless cluster in non-interactive mode: +Set the spending limit for a {{{ .starter }}} cluster in non-interactive mode: ```shell ticloud serverless spending-limit -c --monthly diff --git a/tidb-cloud/ticloud-serverless-sql-user-create.md b/tidb-cloud/ticloud-serverless-sql-user-create.md index 894360c4ffdda..626b636625194 100644 --- a/tidb-cloud/ticloud-serverless-sql-user-create.md +++ b/tidb-cloud/ticloud-serverless-sql-user-create.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless sql-user create`. # ticloud serverless sql-user create -Create a TiDB Cloud Serverless SQL user: +Create a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless sql-user create [flags] @@ -13,13 +13,13 @@ ticloud serverless sql-user create [flags] ## Examples -Create a TiDB Cloud Serverless SQL user in interactive mode: +Create a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless sql-user create ``` -Create a TiDB Cloud Serverless SQL user in non-interactive mode: +Create a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless sql-user create --user --password --role --cluster-id diff --git a/tidb-cloud/ticloud-serverless-sql-user-delete.md b/tidb-cloud/ticloud-serverless-sql-user-delete.md index 9f7cd35ea52d9..02dd114f65a2a 100644 --- a/tidb-cloud/ticloud-serverless-sql-user-delete.md +++ b/tidb-cloud/ticloud-serverless-sql-user-delete.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless sql-user delete`. # ticloud serverless sql-user delete -Delete a TiDB Cloud Serverless SQL user: +Delete a SQL user from a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless sql-user delete [flags] @@ -13,13 +13,13 @@ ticloud serverless sql-user delete [flags] ## Examples -Delete a TiDB Cloud Serverless SQL user in interactive mode: +Delete a SQL user from a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless sql-user delete ``` -Delete a TiDB Cloud Serverless SQL user in non-interactive mode: +Delete a SQL user from a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless sql-user delete -c --user diff --git a/tidb-cloud/ticloud-serverless-sql-user-list.md b/tidb-cloud/ticloud-serverless-sql-user-list.md index 152b0362e0701..e93312072d1fe 100644 --- a/tidb-cloud/ticloud-serverless-sql-user-list.md +++ b/tidb-cloud/ticloud-serverless-sql-user-list.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless sql-user list`. # ticloud serverless sql-user list -List TiDB Cloud Serverless SQL users: +List SQL users in a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless sql-user list [flags] @@ -13,13 +13,13 @@ ticloud serverless sql-user list [flags] ## Examples -List TiDB Cloud Serverless SQL users in interactive mode: +List SQL users in a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless sql-user list ``` -List TiDB Cloud Serverless SQL users in non-interactive mode: +List SQL users in a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless sql-user list -c diff --git a/tidb-cloud/ticloud-serverless-sql-user-update.md b/tidb-cloud/ticloud-serverless-sql-user-update.md index 1925b3280fc57..f38fd1a35808f 100644 --- a/tidb-cloud/ticloud-serverless-sql-user-update.md +++ b/tidb-cloud/ticloud-serverless-sql-user-update.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless sql-user update`. # ticloud serverless sql-user update -Update a TiDB Cloud Serverless SQL user: +Update a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless sql-user update [flags] @@ -13,13 +13,13 @@ ticloud serverless sql-user update [flags] ## Examples -Update a TiDB Cloud Serverless SQL user in interactive mode: +Update a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless sql-user update ``` -Update a TiDB Cloud Serverless SQL user in non-interactive mode: +Update a SQL user in a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless sql-user update -c --user --password --role diff --git a/tidb-cloud/ticloud-serverless-update.md b/tidb-cloud/ticloud-serverless-update.md index a8c00ce24feb1..6c32109f6f806 100644 --- a/tidb-cloud/ticloud-serverless-update.md +++ b/tidb-cloud/ticloud-serverless-update.md @@ -5,7 +5,7 @@ summary: The reference of `ticloud serverless update`. # ticloud serverless update -Update a TiDB Cloud Serverless cluster: +Update a {{{ .starter }}} or {{{ .essential }}} cluster: ```shell ticloud serverless update [flags] @@ -13,19 +13,19 @@ ticloud serverless update [flags] ## Examples -Update a TiDB Cloud Serverless cluster in interactive mode: +Update a {{{ .starter }}} or {{{ .essential }}} cluster in interactive mode: ```shell ticloud serverless update ``` -Update the name of a TiDB Cloud Serverless cluster in non-interactive mode: +Update the name of a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode: ```shell ticloud serverless update -c --display-name ``` -Update labels of a TiDB Cloud Serverless cluster in non-interactive mode +Update labels of a {{{ .starter }}} or {{{ .essential }}} cluster in non-interactive mode ```shell ticloud serverless update -c --labels "{\"label1\":\"value1\"}" diff --git a/tidb-cloud/tidb-cloud-auditing.md b/tidb-cloud/tidb-cloud-auditing.md index 01b3d8441cfdc..48d57f029083a 100644 --- a/tidb-cloud/tidb-cloud-auditing.md +++ b/tidb-cloud/tidb-cloud-auditing.md @@ -5,15 +5,15 @@ summary: Learn about how to audit a cluster in TiDB Cloud. # TiDB Cloud Dedicated Database Audit Logging -TiDB Cloud provides you with a database audit logging feature to record a history of user access details (such as any SQL statements executed) in logs. +TiDB Cloud provides an audit logging feature that records user access activities of your database, such as executed SQL statements. > **Note:** > -> Currently, the database audit logging feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for database audit logging" in the **Description** field and click **Submit**. +> Currently, the database audit logging feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply for database audit logging" in the **Description** field, and then click **Submit**. -To assess the effectiveness of user access policies and other information security measures of your organization, it is a security best practice to conduct a periodic analysis of the database audit logs. +To evaluate the effectiveness of user access policies and other information security measures of your organization, it is a security best practice to periodically analyze database audit logs. -The audit logging feature is disabled by default. To audit a cluster, you need to enable the audit logging first, and then specify the auditing filter rules. +The audit logging feature is **disabled by default**. To audit a cluster, you must first enable the audit logging, and then specify the auditing filter rules. > **Note:** > @@ -21,7 +21,13 @@ The audit logging feature is disabled by default. To audit a cluster, you need t ## Prerequisites -- You are using a TiDB Cloud Dedicated cluster. Audit logging is not available for TiDB Cloud Serverless clusters. +- You are using a TiDB Cloud Dedicated cluster. + + > **Note:** + > + > - Database audit logging is not available for {{{ .starter }}}. + > - For {{{ .essential }}}, see [Database Audit Logging (Beta) for {{{ .essential }}}](/tidb-cloud/essential-database-audit-logging.md). + - You are in the `Organization Owner` or `Project Owner` role of your organization. Otherwise, you cannot see the database audit-related options in the TiDB Cloud console. For more information, see [User roles](/tidb-cloud/manage-user-access.md#user-roles). ## Enable audit logging @@ -30,7 +36,7 @@ TiDB Cloud supports recording the audit logs of a TiDB Cloud Dedicated cluster t > **Note:** > -> For TiDB clusters deployed on AWS, you can choose to store audit log files in TiDB Cloud when enabling database audit logging. Currently, this feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply to store audit log files in TiDB Cloud" in the **Description** field and click **Submit**. +> For TiDB clusters deployed on AWS, you can choose to store audit log files in TiDB Cloud when enabling database audit logging. Currently, this feature is only available upon request. To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com), and then click **Support Tickets** to go to the [Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals). Create a ticket, fill in "Apply to store audit log files in TiDB Cloud" in the **Description** field, and then click **Submit**. ### Enable audit logging for AWS @@ -38,9 +44,9 @@ To enable audit logging for AWS, take the following steps: #### Step 1. Create an Amazon S3 bucket -Specify an Amazon S3 bucket in your corporate-owned AWS account as a destination to which TiDB Cloud writes the audit logs. +Specify an Amazon S3 bucket in your organization-owned AWS account as a destination to which TiDB Cloud writes the audit logs. -> Note: +> **Note:** > > Do not enable object lock on the AWS S3 bucket. Enabling object lock will prevent TiDB Cloud from pushing audit log files to S3. @@ -99,7 +105,7 @@ In the TiDB Cloud console, go back to the **Enable Database Audit Logging** dial 3. In the **Role ARN** field, fill in the Role ARN value that you copied in [Step 2. Configure Amazon S3 access](#step-2-configure-amazon-s3-access). 4. Click **Test Connection** to verify whether TiDB Cloud can access and write to the bucket. - If it is successful, **The connection is successfully** is displayed. Otherwise, check your access configuration. + If it is successful, **The connection is successful** is displayed. Otherwise, check your access configuration. 5. Click **Enable** to enable audit logging for the cluster. @@ -116,7 +122,7 @@ To enable audit logging for Google Cloud, take the following steps: #### Step 1. Create a GCS bucket -Specify a Google Cloud Storage (GCS) bucket in your corporate-owned Google Cloud account as a destination to which TiDB Cloud writes audit logs. +Specify a Google Cloud Storage (GCS) bucket in your organization-owned Google Cloud account as a destination to which TiDB Cloud writes audit logs. For more information, see [Creating storage buckets](https://cloud.google.com/storage/docs/creating-buckets) in the Google Cloud Storage documentation. @@ -163,7 +169,7 @@ In the TiDB Cloud console, go back to the **Enable Database Audit Logging** dial 2. In the **Bucket Region** field, select the GCS region where the bucket locates. 3. Click **Test Connection** to verify whether TiDB Cloud can access and write to the bucket. - If it is successful, **The connection is successfully** is displayed. Otherwise, check your access configuration. + If it is successful, **The connection is successful** is displayed. Otherwise, check your access configuration. 4. Click **Enable** to enable audit logging for the cluster. @@ -256,7 +262,7 @@ To specify auditing filter rules for a cluster, take the following steps: You can add one audit rule at a time. Each rule specifies a user expression, database expression, table expression, and access type. You can add multiple audit rules to meet your auditing requirements. -2.In the **Log Filter Rules** section, click **>** to expand and view the list of audit rules you have added. +2. In the **Log Filter Rules** section, click **>** to expand and view the list of audit rules you have added. > **Note:** > @@ -271,9 +277,9 @@ By default, TiDB Cloud stores database audit log files in your storage service, > > If you have requested and chosen to store audit log files in TiDB Cloud, you can download them from the **Audit Log Access** section on the **Database Audit Logging** page. -TiDB Cloud audit logs are readable text files with the cluster ID, Pod ID, and log creation date incorporated into the fully qualified filenames. +TiDB Cloud audit logs are readable text files with the cluster ID, node ID, and log creation date incorporated into the fully qualified filenames. -For example, `13796619446086334065/tidb-0/tidb-audit-2022-04-21T18-16-29.529.log`. In this example, `13796619446086334065` indicates the cluster ID and `tidb-0` indicates the Pod ID. +For example, `13796619446086334065/tidb-0/tidb-audit-2022-04-21T18-16-29.529.log`. In this example, `13796619446086334065` indicates the cluster ID and `tidb-0` indicates the node ID. ## Disable audit logging diff --git a/tidb-cloud/tidb-cloud-billing-dm.md b/tidb-cloud/tidb-cloud-billing-dm.md index 5ab94cd92e871..03450fa0c274b 100644 --- a/tidb-cloud/tidb-cloud-billing-dm.md +++ b/tidb-cloud/tidb-cloud-billing-dm.md @@ -33,7 +33,7 @@ The Data Migration job measures incremental data migration performance in rows/s ## Price -To learn about the supported regions and the price of TiDB Cloud for each Data Migration RCU, see [Data Migration Cost](https://www.pingcap.com/tidb-cloud-pricing-details/#dm-cost). +To learn about the supported regions and the price of TiDB Cloud for each Data Migration RCU, see [Data Migration Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#dm-cost). The Data Migration job is in the same region as the target TiDB node. @@ -51,7 +51,7 @@ Note that if you are using AWS PrivateLink or VPC peering connections, and if th ![Cross-region and cross-AZ traffic charges](/media/tidb-cloud/dm-billing-cross-region-and-az-fees.png) -The cross-region and cross-AZ traffic prices are the same as those for TiDB Cloud. For more information, see [TiDB Cloud Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/). +The cross-region and cross-AZ traffic prices are the same as those for TiDB Cloud. For more information, see [TiDB Cloud Dedicated Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/). ## See also diff --git a/tidb-cloud/tidb-cloud-billing-recovery-group.md b/tidb-cloud/tidb-cloud-billing-recovery-group.md index 4197b257b90c9..dfc8b0a765746 100644 --- a/tidb-cloud/tidb-cloud-billing-recovery-group.md +++ b/tidb-cloud/tidb-cloud-billing-recovery-group.md @@ -11,4 +11,4 @@ TiDB Cloud also bills for data processing per GiB basis. The data processing pri ## Pricing -To learn about the supported regions and the pricing for TiDB Cloud recovery groups, see [Recovery Group Cost](https://www.pingcap.com/tidb-cloud-pricing-details/#recovery-group-cost). +To learn about the supported regions and the pricing for TiDB Cloud recovery groups, see [Recovery Group Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#recovery-group-cost). diff --git a/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md b/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md index ee5d21a3c55d2..01a4a40c4b8ea 100644 --- a/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md +++ b/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md @@ -1,14 +1,16 @@ --- -title: Changefeed Billing +title: Changefeed Billing for TiDB Cloud Dedicated summary: Learn about billing for changefeeds in TiDB Cloud. aliases: ['/tidbcloud/tidb-cloud-billing-tcu'] --- -# Changefeed Billing +# Changefeed Billing for TiDB Cloud Dedicated + +This document describes the billing details for changefeeds in TiDB Cloud Dedicated. ## RCU cost -TiDB Cloud measures the capacity of [changefeeds](/tidb-cloud/changefeed-overview.md) in TiCDC Replication Capacity Units (RCUs). When you [create a changefeed](/tidb-cloud/changefeed-overview.md#create-a-changefeed) for a cluster, you can select an appropriate specification. The higher the RCU, the better the replication performance. You will be charged for these TiCDC changefeed RCUs. +TiDB Cloud Dedicated measures the capacity of [changefeeds](/tidb-cloud/changefeed-overview.md) in TiCDC Replication Capacity Units (RCUs). When you [create a changefeed](/tidb-cloud/changefeed-overview.md#create-a-changefeed) for a cluster, you can select an appropriate specification. The higher the RCU, the better the replication performance. You will be charged for these TiCDC changefeed RCUs. ### Number of TiCDC RCUs @@ -23,6 +25,13 @@ The following table lists the specifications and corresponding replication perfo | 24 RCUs | 60,000 rows/s | | 32 RCUs | 80,000 rows/s | | 40 RCUs | 100,000 rows/s | +| 64 RCUs | 160,000 rows/s | +| 96 RCUs | 240,000 rows/s | +| 128 RCUs | 320,000 rows/s | +| 192 RCUs | 480,000 rows/s | +| 256 RCUs | 640,000 rows/s | +| 320 RCUs | 800,000 rows/s | +| 384 RCUs | 960,000 rows/s | > **Note:** > @@ -30,7 +39,7 @@ The following table lists the specifications and corresponding replication perfo ### Price -To learn about the supported regions and the price of TiDB Cloud for each TiCDC RCU, see [Changefeed Cost](https://www.pingcap.com/tidb-cloud-pricing-details/#changefeed-cost). +To learn about the supported regions and the price of TiDB Cloud for each TiCDC RCU, see [Changefeed Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#changefeed-cost). ## Private Data Link cost diff --git a/tidb-cloud/tidb-cloud-billing.md b/tidb-cloud/tidb-cloud-billing.md index 4c7819d8434cf..0b438f26b8b19 100644 --- a/tidb-cloud/tidb-cloud-billing.md +++ b/tidb-cloud/tidb-cloud-billing.md @@ -5,14 +5,29 @@ summary: Learn about TiDB Cloud billing. # TiDB Cloud Billing -> **Note:** -> -> [TiDB Cloud Serverless clusters](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) are free until May 31, 2023, with a 100% discount off. After that, usage beyond the [free quota](/tidb-cloud/select-cluster-tier.md#usage-quota) will be charged. +TiDB Cloud charges according to the resources that you consume. + +## Pricing + +### Pricing for TiDB Cloud Dedicated + +See [TiDB Cloud Dedicated Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/). + +### Pricing for {{{ .starter }}} {#pricing-for-starter} + +See [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). -TiDB Cloud charges according to the resources that you consume. You can visit the following pages to get more information about the pricing. +### Pricing for {{{ .essential }}} {#pricing-for-essential} -- [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-serverless-pricing-details/) -- [TiDB Cloud Dedicated Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/) +For {{{ .essential }}}, you are charged based on the number of provisioned Request Capacity Units (RCUs), **not** on the actual usage by your application. See [{{{ .essential }}} Pricing Details](https://www.pingcap.com/tidb-cloud-essential-pricing-details/). + + + +### Pricing for {{{ .premium }}} {#pricing-for-premium} + +For {{{ .premium }}}, you are billed based on the number of provisioned Request Capacity Units (RCUs) and the storage you actually use, rather than the underlying backend nodes or provisioned disk size. As {{{ .premium }}} is currently in private preview, you can [contact our sales](https://www.pingcap.com/contact-us/) for pricing details. + + ## Invoices @@ -20,10 +35,22 @@ If you are in the `Organization Owner` or `Organization Billing Manager` role of After you set up the payment method, TiDB Cloud will generate an invoice once your cost reaches a quota, which is $500 by default. If you want to raise the quota or receive one invoice per month, you can [contact our sales](https://www.pingcap.com/contact-us/). + + +> **Note:** +> +> If you sign up for TiDB Cloud through [AWS Marketplace](https://aws.amazon.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/), [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), or [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), you can pay through your AWS account, Azure account, Google Cloud account, or Alibaba Cloud account directly but cannot add payment methods or download invoices in the TiDB Cloud console. + + + + + > **Note:** > > If you sign up for TiDB Cloud through [AWS Marketplace](https://aws.amazon.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/), or [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), you can pay through your AWS account, Azure account, or Google Cloud account directly but cannot add payment methods or download invoices in the TiDB Cloud console. + + After you contact our sales for receiving an invoice on a monthly basis, TiDB Cloud will generate the invoice for the previous month at the beginning of each month. Invoice costs include TiDB cluster usage consumption, discounts, backup storage costs, support service cost, credit consumption, and data transmission costs in your organization. @@ -159,10 +186,22 @@ To view the discount information, perform the following steps: If you are in the `Organization Owner` or `Organization Billing Manager` role of your organization, you can manage the payment information of TiDB Cloud. Otherwise, skip this section. + + +> **Note:** +> +> If you sign up for TiDB Cloud through [AWS Marketplace](https://aws.amazon.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/), [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), or [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), you can pay through your AWS account, Azure account, Google Cloud account, or Alibaba Cloud account directly but cannot add payment methods or download invoices in the TiDB Cloud console. + + + + + > **Note:** > > If you sign up for TiDB Cloud through [AWS Marketplace](https://aws.amazon.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/), or [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), you can pay through your AWS account, Azure account, or Google Cloud account directly but cannot add payment methods or download invoices in the TiDB Cloud console. + + The fee is deducted from a bound credit card according to your cluster usage. To add a valid credit card, you can use either of the following methods: - When you are creating a TiDB Cloud Dedicated cluster: @@ -206,17 +245,42 @@ If you have agreed with our sales on a contract and received an email to review To learn more about contracts, feel free to [contact our sales](https://www.pingcap.com/contact-us/). -## Billing from AWS Marketplace, Azure Marketplace, or Google Cloud Marketplace +## Billing from cloud provider marketplace + + -If you are in the `Organization Owner` or `Organization Billing Manager` role of your organization, you can link your TiDB Cloud account to an AWS billing account, an Azure billing account, or a Google Cloud billing account. Otherwise, skip this section. +If you are in the `Organization Owner` or `Organization Billing Manager` role of your organization, you can link your TiDB Cloud account to the billing account of your cloud provider (AWS, Azure, Google Cloud, or Alibaba Cloud). Otherwise, skip this section. -If you are new to TiDB Cloud and do not have a TiDB Cloud account, you can sign up for a TiDB Cloud account through [AWS Marketplace](https://aws.amazon.com/marketplace), [Azure Marketplace](https://azuremarketplace.microsoft.com/), or [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), and pay for the usage via the AWS, Azure, or Google Cloud billing account. + -- To sign up through AWS Marketplace, search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. -- To sign up through Azure Marketplace, search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. -- To sign up through Google Cloud Marketplace, search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + -If you already have a TiDB Cloud account and you want to pay for the usage via your AWS or Google Cloud billing account, you can link your TiDB Cloud account to your AWS or Google Cloud billing account. +If you are in the `Organization Owner` or `Organization Billing Manager` role of your organization, you can link your TiDB Cloud account to the billing account of your cloud provider (AWS, Azure, or Google Cloud). Otherwise, skip this section. + + + +If you are new to TiDB Cloud and do not have a TiDB Cloud account, you can sign up for a TiDB Cloud account through the marketplace of your cloud provider, and pay for the usage via the billing account of your cloud provider. + + + +- To sign up through [AWS Marketplace](https://aws.amazon.com/marketplace), search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- To sign up through [Azure Marketplace](https://azuremarketplace.microsoft.com), search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- To sign up through [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- To sign up through [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), search for `TiDB Cloud` in [Alibaba Cloud Marketplace](https://marketplace.alibabacloud.com/), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + +If you already have a TiDB Cloud account and you want to pay for the usage via your AWS, Azure, Google Cloud, or Alibaba Cloud billing account, you can link your TiDB Cloud account to your AWS, Azure, Google Cloud, or Alibaba Cloud billing account. + + + + + +- To sign up through [AWS Marketplace](https://aws.amazon.com/marketplace), search for `TiDB Cloud` in [AWS Marketplace](https://aws.amazon.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- To sign up through [Azure Marketplace](https://azuremarketplace.microsoft.com), search for `TiDB Cloud` in [Azure Marketplace](https://azuremarketplace.microsoft.com), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. +- To sign up through [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), search for `TiDB Cloud` in [Google Cloud Marketplace](https://console.cloud.google.com/marketplace), subscribe to TiDB Cloud, and then follow the onscreen instructions to set up your TiDB Cloud account. + +If you already have a TiDB Cloud account and you want to pay for the usage via your AWS, Azure, or Google Cloud billing account, you can link your TiDB Cloud account to your AWS, Azure, or Google Cloud billing account. + +
@@ -290,4 +354,29 @@ To link your TiDB Cloud account to a Google Cloud billing account, take the foll > If your organization already has a payment method in TiDB Cloud, the existing payment method for this organization will be replaced by the newly added Google Cloud billing account.
+ + + +
+ +To link your TiDB Cloud account to an Alibaba Cloud billing account, take the following steps: + +1. Open the [Alibaba Cloud Marketplace page](https://marketplace.alibabacloud.com/), search for `TiDB Cloud` and select **TiDB Cloud** in the search results. The TiDB Cloud product page is displayed. + +2. On the TiDB Cloud product page, click **Activate Now**, and then follow the onscreen instructions to confirm the pay-as-you-go mode and view the activation application. + +3. On the subscription page, locate your subscription of TiDB Cloud, and then click **Auto Login**. You are directed to the TiDB Cloud sign-up page. + +4. Check the notification in the upper part of the sign-up page and click **Sign in**. + +5. Sign in with your TiDB Cloud account. The page for linking to your Alibaba Cloud billing account is displayed. + +6. On the page, select the target organization and click **Link** to link to your Alibaba Cloud billing account. + + > **Note:** + > + > If your organization already has a payment method in TiDB Cloud, the existing payment method for this organization will be replaced by the newly added Alibaba Cloud billing account. + +
+
diff --git a/tidb-cloud/tidb-cloud-budget.md b/tidb-cloud/tidb-cloud-budget.md index 9ed4b3a00678f..4439e7c024b65 100644 --- a/tidb-cloud/tidb-cloud-budget.md +++ b/tidb-cloud/tidb-cloud-budget.md @@ -11,7 +11,7 @@ When your monthly actual costs exceed the percentage thresholds of your specifie TiDB Cloud provides two types of budgets to help you track your spending: -- **Serverless Spending Limit** budget: for each TiDB Cloud Serverless scalable cluster, TiDB Cloud automatically creates a **Serverless Spending Limit** budget. This budget helps you track the actual cost against the [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) configured on that cluster. It includes three threshold rules: 75%, 90%, and 100% of the budget, which are not editable. +- **Starter Spending Limit** budget: for each {{{ .starter }}} with the spending limit > 0, TiDB Cloud automatically creates a **Starter Spending Limit** budget. This budget helps you track the actual cost against the [spending limit](/tidb-cloud/manage-serverless-spend-limit.md) configured on that cluster. It includes three threshold rules: 75%, 90%, and 100% of the budget, which are not editable. - **Custom** budget: you can create custom budgets to track actual costs for an entire organization or specific projects. For each budget, you can specify a budget scope, set a target spending amount, and configure alert thresholds. After creating a custom budget, you can compare your monthly actual costs with your planned costs to ensure you stay within budget. @@ -60,7 +60,7 @@ To create a custom budget to monitor the spending of your organization or specif > **Note:** > -> The **Serverless Spending Limit** budget cannot be edited because it is automatically created by TiDB Cloud to help you track the cost of a TiDB Cloud Serverless scalable cluster against its [spending limit](/tidb-cloud/manage-serverless-spend-limit.md). +> The **Starter Spending Limit** budget cannot be edited because it is automatically created by TiDB Cloud to help you track the cost of a {{{ .starter }}} cluster against its [spending limit](/tidb-cloud/manage-serverless-spend-limit.md). To edit a custom budget, take the following steps: @@ -81,7 +81,7 @@ To edit a custom budget, take the following steps: > **Note:** > > - Once a custom budget is deleted, you will no longer receive any alert emails related to it. -> - The **Serverless Spending Limit** budget cannot be deleted because it is automatically created by TiDB Cloud to help you track the cost of a TiDB Cloud Serverless scalable cluster against its [spending limit](/tidb-cloud/manage-serverless-spend-limit.md). +> - The **Starter Spending Limit** budget cannot be deleted because it is automatically created by TiDB Cloud to help you track the cost of a {{{ .starter }}} cluster against its [spending limit](/tidb-cloud/manage-serverless-spend-limit.md). To delete a custom budget, take the following steps: diff --git a/tidb-cloud/tidb-cloud-clinic.md b/tidb-cloud/tidb-cloud-clinic.md index f1f3511bf36e5..12885991e8381 100644 --- a/tidb-cloud/tidb-cloud-clinic.md +++ b/tidb-cloud/tidb-cloud-clinic.md @@ -56,6 +56,7 @@ The dashboards and their content are subject to change. Currently, the following - TiDB-Resource-Control - TiFlash-Summary - TiKV-Details +- TiProxy-Summary - User-Node-Info ## Analyze top slow queries diff --git a/tidb-cloud/tidb-cloud-connect-aws-dms.md b/tidb-cloud/tidb-cloud-connect-aws-dms.md index 6ad1a127fd72c..5cca979f8c3cc 100644 --- a/tidb-cloud/tidb-cloud-connect-aws-dms.md +++ b/tidb-cloud/tidb-cloud-connect-aws-dms.md @@ -18,9 +18,9 @@ You are expected to have an AWS account with enough access to manage DMS-related ### A TiDB Cloud account and a TiDB cluster -You are expected to have a TiDB Cloud account and a TiDB Cloud Serverless or TiDB Cloud Dedicated cluster. If not, refer to the following documents to create one: +You are expected to have a TiDB Cloud account and a {{{ .starter }}}, {{{ .essential }}}, or TiDB Cloud Dedicated cluster. If not, refer to the following documents to create one: -- [Create a TiDB Cloud Serverless cluster](/tidb-cloud/create-tidb-cluster-serverless.md) +- [Create a {{{ .starter }}} or Essential cluster](/tidb-cloud/create-tidb-cluster-serverless.md) - [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md) ## Configure network @@ -29,17 +29,36 @@ Before creating DMS resources, you need to configure network properly to ensure -
+
-For TiDB Cloud Serverless, your clients can connect to clusters via public endpoint or private endpoint. +For {{{ .starter }}} or {{{ .essential }}}, your clients can connect to clusters via public endpoint or private endpoint. -- To [connect to a TiDB Cloud Serverless cluster via public endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md), do one of the following to make sure that the DMS replication instance can access the internet. + + +- To [connect to a {{{ .starter }}} or Essential cluster via public endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md), do one of the following to make sure that the DMS replication instance can access the internet. - Deploy the replication instance in public subnets and enable **Public accessible**. For more information, see [Configuration for internet access](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html#vpc-igw-internet-access). - Deploy the replication instance in private subnets and route traffic in the private subnets to public subnets. In this case, you need at least three subnets, two private subnets, and one public subnet. The two private subnets form a subnet group where the replication instance lives. Then you need to create a NAT gateway in the public subnet and route traffic of the two private subnets to the NAT gateway. For more information, see [Access the internet from a private subnet](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-scenarios.html#public-nat-internet-access). -- To connect to a TiDB Cloud Serverless cluster via private endpoint, [set up a private endpoint](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) first and deploy the replication instance in private subnets. +- To connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, refer to the following documents to set up a private endpoint first and deploy the replication instance in private subnets. + + - [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) + - [Connect to {{{ .starter }}} or Essential via Alibaba Cloud Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-alibaba-cloud.md) + + + + + +- To [connect to a {{{ .starter }}} or Essential cluster via public endpoint](/tidb-cloud/connect-via-standard-connection-serverless.md), do one of the following to make sure that the DMS replication instance can access the internet. + + - Deploy the replication instance in public subnets and enable **Public accessible**. For more information, see [Configuration for internet access](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html#vpc-igw-internet-access). + + - Deploy the replication instance in private subnets and route traffic in the private subnets to public subnets. In this case, you need at least three subnets, two private subnets, and one public subnet. The two private subnets form a subnet group where the replication instance lives. Then you need to create a NAT gateway in the public subnet and route traffic of the two private subnets to the NAT gateway. For more information, see [Access the internet from a private subnet](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-scenarios.html#public-nat-internet-access). + +- To connect to a {{{ .starter }}} or {{{ .essential }}} cluster via private endpoint, refer to [Connect to {{{ .starter }}} or Essential via AWS PrivateLink](/tidb-cloud/set-up-private-endpoint-connections-serverless.md) to set up a private endpoint first and deploy the replication instance in private subnets. + +
@@ -110,14 +129,14 @@ For connectivity, the steps for using TiDB Cloud clusters as a source or as a ta -
+
- - **Server name**: `HOST` of TiDB Cloud Serverless cluster. - - **Port**: `PORT` of TiDB Cloud Serverless cluster. - - **User name**: User of TiDB Cloud Serverless cluster for migration. Make sure it meets DMS requirements. - - **Password**: Password of the TiDB Cloud Serverless cluster user. + - **Server name**: `HOST` of the cluster. + - **Port**: `PORT` of the cluster. + - **User name**: User of the cluster for migration. Make sure it meets DMS requirements. + - **Password**: Password of the cluster user. - **Secure Socket Layer (SSL) mode**: If you are connecting via public endpoint, it is highly recommended to set the mode to **verify-full** to ensure transport security. If you are connecting via private endpoint, you can set the mode to **none**. - - (Optional) **CA certificate**: Use the [ISRG Root X1 certificate](https://letsencrypt.org/certs/isrgrootx1.pem). For more information, see [TLS Connections to TiDB Cloud Serverless](/tidb-cloud/secure-connections-to-serverless-clusters.md). + - (Optional) **CA certificate**: Use the [ISRG Root X1 certificate](https://letsencrypt.org/certs/isrgrootx1.pem). For more information, see [TLS Connections to {{{ .starter }}} or Essential](/tidb-cloud/secure-connections-to-serverless-clusters.md).
diff --git a/tidb-cloud/tidb-cloud-console-auditing.md b/tidb-cloud/tidb-cloud-console-auditing.md index b7fb23ea74466..58bbaa9c9cd95 100644 --- a/tidb-cloud/tidb-cloud-console-auditing.md +++ b/tidb-cloud/tidb-cloud-console-auditing.md @@ -131,6 +131,7 @@ The console audit logs record various user activities on the TiDB Cloud console | UpdateIPAccessList | Update the IP access list of a TiDB cluster | | SetAutoBackup | Set the automatic backup mechanism of a TiDB cluster | | DoManualBackup | Perform a manual backup of TiDB cluster | +| BackupCompleted | A backup task completes | | DeleteBackupTask | Delete a backup task | | DeleteBackup | Delete a backup file | | RestoreFromBackup | Restore to a TiDB cluster based on the backup files | @@ -151,14 +152,15 @@ The console audit logs record various user activities on the TiDB Cloud console | BindSupportPlan | Bind a support plan | | CancelSupportPlan | Cancel a support plan | | UpdateOrganizationName | Update the organization name | -| SetSpendLimit | Edit the spending limit of a TiDB Cloud Serverless scalable cluster | +| SetSpendLimit | Edit the spending limit of a {{{ .starter }}} cluster | | UpdateMaintenanceWindow | Modify maintenance window start time | | DeferMaintenanceTask | Defer a maintenance task | -| CreateBranch | Create a TiDB Cloud Serverless branch | -| DeleteBranch | Delete a TiDB Cloud Serverless branch | -| SetBranchRootPassword | Set root password for a TiDB Cloud Serverless branch | +| CreateBranch | Create a branch for {{{ .starter }}} or {{{ .essential }}} cluster | +| DeleteBranch | Delete a branch for {{{ .starter }}} or {{{ .essential }}} cluster | +| SetBranchRootPassword | Set root password for a branch of your {{{ .starter }}} or {{{ .essential }}} cluster | | ConnectBranchGitHub | Connect the cluster with a GitHub repository to enable branching integration | | DisconnectBranchGitHub | Disconnect the cluster from a GitHub repository to disable branching integration | +| UpdateAuthenticationMethod | Update the authentication method for Cloud Organization SSO | ## Console audit log fields diff --git a/tidb-cloud/tidb-cloud-encrypt-cmek.md b/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md similarity index 83% rename from tidb-cloud/tidb-cloud-encrypt-cmek.md rename to tidb-cloud/tidb-cloud-encrypt-cmek-aws.md index 6120ad9758711..c043de7fd1247 100644 --- a/tidb-cloud/tidb-cloud-encrypt-cmek.md +++ b/tidb-cloud/tidb-cloud-encrypt-cmek-aws.md @@ -1,26 +1,26 @@ --- -title: Encryption at Rest Using Customer-Managed Encryption Keys -summary: Learn about how to use Customer-Managed Encryption Key (CMEK) in TiDB Cloud. +title: Encryption at Rest Using Customer-Managed Encryption Keys on AWS +summary: Learn how to use Customer-Managed Encryption Key (CMEK) to encrypt data in TiDB Cloud clusters hosted on AWS. +aliases: ['/tidbcloud/tidb-cloud-encrypt-cmek'] --- -# Encryption at Rest Using Customer-Managed Encryption Keys +# Encryption at Rest Using Customer-Managed Encryption Keys on AWS -Customer-Managed Encryption Key (CMEK) allows you to secure your static data in a TiDB Cloud Dedicated cluster by utilizing a symmetric encryption key that is under your complete control. This key is referred to as the CMEK key. +Customer-Managed Encryption Key (CMEK) enables you to secure your static data in a TiDB Cloud Dedicated cluster by utilizing a symmetric encryption key that is under your complete control. This key is referred to as the CMEK key. Once CMEK is enabled for a project, all clusters created within that project encrypt their static data using the CMEK key. Additionally, any backup data generated by these clusters is encrypted using the same key. If CMEK is not enabled, TiDB Cloud employs an escrow key to encrypt all data in your cluster when it is at rest. > **Note:** > -> - CMEK is similar to Bring Your Own Key (BYOK). With BYOK, you typically generate the key locally and upload it. However, TiDB Cloud only supports keys generated within [AWS KMS](https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html). -> - Currently, this feature is only available upon request. If you need to try out this feature, contact [support](/tidb-cloud/tidb-cloud-support.md). +> CMEK is similar to Bring Your Own Key (BYOK). With BYOK, you typically generate the key locally and upload it. However, TiDB Cloud supports keys generated within [AWS KMS](https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html). ## Restrictions -- Currently, TiDB Cloud only supports using AWS KMS to provide CMEK. +- Currently, TiDB Cloud only supports using AWS KMS and Azure Key Vault to provide CMEK. - To use CMEK, you need to enable CMEK when creating a project and complete CMEK-related configurations before creating a cluster. You cannot enable CMEK for existing projects. -- Currently, in CMEK-enabled projects, you can only create [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS. TiDB Cloud Dedicated clusters hosted on other cloud providers and [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters are not supported. +- Currently, in CMEK-enabled projects, you can only create [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS and Azure. - Currently, in CMEK-enabled projects, [dual region backup](/tidb-cloud/backup-and-restore-concepts.md#dual-region-backup) is not supported. -- Currently, for a specific project, you can only enable CMEK for one AWS region. Once you have configured it, you cannot create clusters in other regions within the same project. +- Currently, in CMEK-enabled projects, you can enable CMEK on AWS and Azure. For each cloud provider, you can configure one unique encryption key per region. You can only create clusters in regions where you have configured an encryption key for the chosen cloud provider. ## Enable CMEK @@ -143,7 +143,7 @@ To complete the CMEK configuration of the project, take the following steps: ### Step 3. Create a cluster -Under the project created in [Step 1](#step-1-create-a-cmek-enabled-project), create a TiDB Cloud Dedicated cluster hosted on AWS. For detailed steps, refer to [this document](/tidb-cloud/create-tidb-cluster.md). Ensure that the region where the cluster is located is the same as that in [Step 2](/tidb-cloud/tidb-cloud-encrypt-cmek.md#step-2-complete-the-cmek-configuration-of-the-project). +Under the project created in [Step 1](#step-1-create-a-cmek-enabled-project), create a TiDB Cloud Dedicated cluster hosted on AWS. For detailed steps, refer to [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md). Ensure that the region where the cluster is located is the same as that in [Step 2](#step-2-complete-the-cmek-configuration-of-the-project). > **Note:** > @@ -167,4 +167,4 @@ If you need to temporarily revoke TiDB Cloud's access to CMEK, follow these step After revoking TiDB Cloud's access to CMEK, if you need to restore the access, follow these steps: 1. On the AWS KMS console, restore the CMEK access policy. -2. On the TiDB Cloud console, restore all clusters in the project. +2. In the TiDB Cloud console, restore all clusters in the project. diff --git a/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md b/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md new file mode 100644 index 0000000000000..b6b7cf72eb68e --- /dev/null +++ b/tidb-cloud/tidb-cloud-encrypt-cmek-azure.md @@ -0,0 +1,159 @@ +--- +title: Encryption at Rest Using Customer-Managed Encryption Keys on Azure +summary: Learn how to use Customer-Managed Encryption Key (CMEK) to encrypt data in TiDB Cloud clusters hosted on Azure. +--- + +# Encryption at Rest Using Customer-Managed Encryption Keys on Azure + +Customer-Managed Encryption Key (CMEK) enables you to secure your static data in a TiDB Cloud Dedicated cluster by utilizing a symmetric encryption key that is under your complete control. This key is referred to as the CMEK key. + +Once CMEK is enabled for a project, all clusters created within that project encrypt their static data using the CMEK key. Additionally, any backup data generated by these clusters is encrypted using the same key. If CMEK is not enabled, TiDB Cloud employs an escrow key to encrypt all data in your cluster when it is at rest. + +## Restrictions + +- Currently, TiDB Cloud only supports using AWS KMS and Azure Key Vault to provide CMEK. +- To use CMEK, you need to enable CMEK when creating a project and complete CMEK-related configurations before creating a cluster. You cannot enable CMEK for existing projects. +- Currently, in CMEK-enabled projects, you can only create [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS and Azure. +- Currently, in CMEK-enabled projects, [dual region backup](/tidb-cloud/backup-and-restore-concepts.md#dual-region-backup) is not supported. +- Currently, in CMEK-enabled projects, you can enable CMEK on AWS and Azure. For each cloud provider, you can configure one unique encryption key per region. You can only create clusters in regions where you have configured an encryption key for the chosen cloud provider. + +## Enable CMEK + +If you want to encrypt your data using the encryption keys owned by your account, take the following steps. + +### Step 1. Create a CMEK-enabled project + +If you are in the `Organization Owner` role of your organization, you can create a CMEK-enabled project by performing the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. +2. In the left navigation pane, click **Projects**. +3. On the **Projects** page, click **Create New Project** in the upper-right corner. +4. Fill in a project name. +5. Choose to enable the CMEK capability of the project. +6. Click **Confirm** to complete the project creation. + +### Step 2. Complete the CMEK configuration of the project + +You can complete the CMEK configuration for a project by using the TiDB Cloud console with either the Azure portal or Azure Resource Manager. + +> **Note:** +> +> - Make sure that the policy of the key meets the requirements and is free from errors such as insufficient permissions or account issues. These errors can cause clusters to be incorrectly created using this key. +> - The cross-tenant customer-managed key (CMK) feature for Azure managed disks is currently in preview and available only in select Azure regions. Currently, only availability regions are supported. For more information, see [Encrypt managed disks with cross-tenant customer-managed keys](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-cross-tenant-customer-managed-keys?tabs=azure-portal#preview-regional-availability). + + +
+ +To configure CMEK using the TiDB Cloud console and Azure portal, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), switch to your target project using the combo box in the upper-left corner. + +2. In the left navigation pane, click **Project Settings** > **Encryption Access**. + +3. On the **Encryption Access** page, click **Create Encryption Key**. + +4. Under **Key Management Service**, select **Azure Key Vault**, and choose the region where the encryption key will be used. + +5. If a Service Principal for the TiDB-provided enterprise application does not already exist in your tenant, create one. The TiDB Cloud console displays the **Microsoft Entra Application Name** and **ID**, which you need for this process and later steps. To create the Service Principal, run the following command from the **Create Service Principal** section: + + ```shell + az ad sp create --id {Microsoft_Entra_Application_ID} + ``` + + For more information, see [Application and service principal objects in Microsoft Entra ID](https://learn.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). + +6. Create a Key Vault in your Azure account, or select an existing one. Ensure that: + + * **Purge protection** is enabled. + * The **region** matches your cluster's region. + +7. In the TiDB Cloud console, enter the Key Vault name and Key name. TiDB Cloud adds a unique suffix to the key name for enhanced security. Copy the full key name and create the encryption key in the Azure portal. For more information, see [Create your encryption key](https://learn.microsoft.com/en-us/azure/key-vault/keys/quick-create-portal). + +8. Assign the **Key Vault Crypto Officer** role to your current user: + + 1. In the [Azure portal](https://portal.azure.com/), navigate to your Key Vault. + 2. Click **Access control (IAM)**, and then click **Add** > **Add role assignment**. + 3. Search for and select the **Key Vault Crypto Officer** role, then click **Next**. + 4. On the **Members** tab, set **Assign access to** as **User, group, or service principal**. + 5. Click **+ Select members**, search for and select your current user as the member. Then, click **Select**. + 6. Review the settings and click **Review + assign**. + +9. Assign the **Key Vault Crypto Service Encryption User** role to the TiDB-provided enterprise application for the encryption key: + + 1. In your Key Vault, go to the encryption key object you created. + 2. Click **Add** > **Add role assignment**. + 3. Search for and select the **Key Vault Crypto Service Encryption User** role, then click **Next**. + 4. On the **Members** tab, set **Assign access to** as **User, group, or service principal**. + 5. Click **+ Select members**, enter the TiDB-provided **Enterprise Application Name**, and select it as the member. Then, click **Select**. + 6. Review the configuration and click **Review + assign**. + +10. In the TiDB Cloud console, click **Test Encryption Key and Create** to validate the configuration and create the encryption key. + +
+
+ +To configure CMEK using the TiDB Cloud console and Azure Resource Manager, take the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), use the combo box in the upper-left corner to switch to your target project. + +2. In the left navigation pane, go to **Project Settings** > **Encryption Access**. + +3. On the **Encryption Access** page, click **Create Encryption Key**. + +4. Under **Key Management Service**, select **Azure Key Vault**, and specify the region where the encryption key will be available. + +5. If a Service Principal for the TiDB-provided enterprise application does not already exist in your tenant, create one. To create the Service Principal, run the following command from the **Create Service Principal** section: + + ```shell + az ad sp create --id {Microsoft_Entra_Application_ID} + ``` + + For more information, see [Application and service principal objects in Microsoft Entra ID](https://learn.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). + +6. Open the [TiDB custom deployment template for Azure Resource Manager](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Ftcidm.blob.core.windows.net%2Fcmek%2Fazure_cmek_rmt.json%3Fsv%3D2015-04-05%26ss%3Db%26srt%3Dco%26sp%3Drl%26se%3D2029-03-01T00%3A00%3A01.0000000Z%26sig%3DIA02CymcFpYCwoTsqCSJVD%2F8Khh%2F0UAPrkKDeLMIIFc%3D) in the Azure portal. Select your **Subscription** and **Resource Group**, then fill in the **Instance Details** section as follows: + + - **Region**: select the location where you want to create the Key Vault. This must match your cluster's region. + - **Key Vault Name**: enter the name of your Azure Key Vault. + - **Key Name**: provide the full key name to be created in the Key Vault. In the TiDB Cloud console, enter the key name prefix and click **Copy** to get the full key name. + - **Enterprise App Service Principal ID**: enter the Service Principal ID for the TiDB-provided enterprise application. To retrieve the **Service Principal ID**, run the following command (replace `{microsoft_enterprise_app_id}` with the actual ID shown in the TiDB Cloud console): + + ```shell + az ad sp show --id {microsoft_enterprise_app_id} --query id -o tsv + ``` + +
+
+ +> **Note:** +> +> This feature will be further enhanced in the future, and upcoming features might require additional permissions. Therefore, this policy requirement is subject to change. + +### Step 3. Create a cluster + +Under the project created in [Step 1](#step-1-create-a-cmek-enabled-project), create a TiDB Cloud Dedicated cluster hosted on Azure. For detailed steps, refer to [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md). + +When you select a cloud provider and region, the appropriate encryption key is automatically matched. If no key is available for your provider and region, the console displays a tip to help you create one. + +> **Note:** +> +> When CMEK is enabled, TiDB Cloud encrypts both the Premium SSD v2 used by cluster nodes and the storage blob for cluster backups with CMEK. + +## Rotate CMEK + +You can configure [cryptographic key auto-rotation](https://learn.microsoft.com/en-us/azure/key-vault/keys/how-to-configure-key-rotation) in Azure Key Vault. With this rotation enabled, you do not need to update **Encryption Access** in project settings in TiDB Cloud. + +## Disable and re-enable CMEK + +If you need to temporarily revoke TiDB Cloud's access to CMEK, follow these steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), pause the corresponding cluster in the project. +2. In the Azure Key Vault console, right-click the encryption key and select **Disable**. + +> **Note:** +> +> After you disable CMEK in Azure Key Vault, your running clusters will become unavailable within a few minutes because they can no longer access the CMEK. + +After disabling TiDB Cloud's access to CMEK, if you need to restore the access, follow these steps: + +1. In the Azure Key Vault console, select the encryption key and click **Enable**. +2. In the TiDB Cloud console, restore the corresponding cluster in the project. diff --git a/tidb-cloud/tidb-cloud-events.md b/tidb-cloud/tidb-cloud-events.md index 935df3bc94d2c..614e2bc44a0dc 100644 --- a/tidb-cloud/tidb-cloud-events.md +++ b/tidb-cloud/tidb-cloud-events.md @@ -42,8 +42,8 @@ TiDB Cloud logs the following types of cluster events: | ScaleChangefeed | Scale the specification of a changefeed | | FailedChangefeed | Changefeed failures | | ImportData | Import data to a cluster | -| UpdateSpendingLimit | Update spending limit of a TiDB Cloud Serverless scalable cluster | -| ResourceLimitation | Update resource limitation of a TiDB Cloud Serverless cluster | +| UpdateSpendingLimit | Update spending limit of a {{{ .starter }}} cluster | +| ResourceLimitation | Update resource limitation of a {{{ .starter }}} or {{{ .essential }}} cluster | For each event, the following information is logged: diff --git a/tidb-cloud/tidb-cloud-faq.md b/tidb-cloud/tidb-cloud-faq.md index f524bed54b64a..e763553c06519 100644 --- a/tidb-cloud/tidb-cloud-faq.md +++ b/tidb-cloud/tidb-cloud-faq.md @@ -13,7 +13,7 @@ This document lists the most frequently asked questions about TiDB Cloud. ### What is TiDB Cloud? -TiDB Cloud makes deploying, managing, and maintaining your TiDB clusters even simpler with a fully managed cloud instance that you control through an intuitive console. You are able to easily deploy on Amazon Web Services (AWS), Google Cloud, or Microsoft Azure to quickly build mission-critical applications. +TiDB Cloud makes deploying, managing, and maintaining your TiDB clusters even simpler with a fully managed cloud instance that you control through an intuitive console. You are able to easily deploy on Amazon Web Services (AWS), Google Cloud, Microsoft Azure, or Alibaba Cloud to quickly build mission-critical applications.You are able to easily deploy on Amazon Web Services (AWS), Google Cloud, or Microsoft Azure to quickly build mission-critical applications. TiDB Cloud allows developers and DBAs with little or no training to handle once-complex tasks such as infrastructure management and cluster deployment with ease, to focus on your applications, not the complexities of your database. And by scaling TiDB clusters in or out with a simple click of a button, you no longer waste costly resources because you are able to provision your databases for exactly how much and how long you need them. @@ -33,7 +33,7 @@ You can use any language supported by the MySQL client or driver. ### Where can I run TiDB Cloud? -TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. +TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, Microsoft Azure, and Alibaba Cloud.TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. ### Does TiDB Cloud support VPC peering between different cloud service providers? @@ -41,10 +41,11 @@ No. ### What versions of TiDB are supported on TiDB Cloud? -- Starting from January 2, 2025, the default TiDB version for new TiDB Cloud Dedicated clusters is [v8.1.2](https://docs.pingcap.com/tidb/v8.1/release-8.1.2). -- Starting from February 21, 2024, the TiDB version for TiDB Cloud Serverless clusters is [v7.1.3](https://docs.pingcap.com/tidb/v7.1/release-7.1.3). +- For new TiDB Cloud Dedicated clusters, the default TiDB version is [v8.5.5](https://docs.pingcap.com/tidb/v8.5/release-8.5.5) starting from January 15, 2026. +- For {{{ .starter }}} clusters, the TiDB version is [v8.5.3](https://docs.pingcap.com/tidb/stable/release-8.5.3) starting from February 10, 2026. +- For {{{ .essential }}} clusters, the TiDB version is [v7.5.2](https://docs.pingcap.com/tidb/stable/release-7.5.2) starting from April 22, 2025. -For more information, see [TiDB Cloud Release Notes](/tidb-cloud/tidb-cloud-release-notes.md). +For more information, see [TiDB Cloud Release Notes](/tidb-cloud/releases/tidb-cloud-release-notes.md). ### What companies are using TiDB or TiDB Cloud in production? @@ -64,7 +65,7 @@ The best way to learn about TiDB Cloud is to follow our step-by-step tutorial. C - [TiDB Cloud Introduction](/tidb-cloud/tidb-cloud-intro.md) - [Get Started](/tidb-cloud/tidb-cloud-quickstart.md) -- [Create a TiDB Cloud Serverless Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) +- [Create a {{{ .starter }}} or Essential Cluster](/tidb-cloud/create-tidb-cluster-serverless.md) ### What does `XXX's Org/default project/Cluster0` refer to when deleting a cluster? @@ -108,7 +109,7 @@ TiDB is highly compatible with MySQL. You can migrate data from any MySQL-compat ### Does TiDB Cloud support incremental backups? -No. If you need to restore data to any point in time within the cluster's backup retention, you can use PITR (Point-in-time Recovery). For more information, see [Use PITR in a TiDB Cloud Dedicated cluster](/tidb-cloud/backup-and-restore.md#turn-on-auto-backup) or [Use PITR in a TiDB Cloud Serverless cluster](/tidb-cloud/backup-and-restore-serverless.md#restore). +No. If you need to restore data to any point in time within the cluster's backup retention, you can use PITR (Point-in-time Recovery). For more information, see [Use PITR in a TiDB Cloud Dedicated cluster](/tidb-cloud/backup-and-restore.md#turn-on-auto-backup) or [Use PITR in a {{{ .essential }}} cluster](/tidb-cloud/backup-and-restore-serverless.md#restore). ## HTAP FAQs @@ -153,7 +154,7 @@ No. TiDB Cloud is Database-as-a-Service (DBaaS) and runs only in the TiDB Cloud ### Is my TiDB cluster secure? -In TiDB Cloud, you can use either a TiDB Cloud Dedicated cluster or a TiDB Cloud Serverless cluster according to your needs. +In TiDB Cloud, you can use a TiDB Cloud Dedicated cluster, a {{{ .starter }}} cluster, or a {{{ .essential }}} cluster according to your needs. For TiDB Cloud Dedicated clusters, TiDB Cloud ensures cluster security with the following measures: @@ -162,7 +163,7 @@ For TiDB Cloud Dedicated clusters, TiDB Cloud ensures cluster security with the - Creates server-side TLS certificates and component-level TLS certificates for each cluster to encrypt cluster data in transit. - Provide IP access rules for each cluster to ensure that only allowed source IP addresses can access your cluster. -For TiDB Cloud Serverless clusters, TiDB Cloud ensures cluster security with the following measures: +For {{{ .starter }}} and {{{ .essential }}} clusters, TiDB Cloud ensures cluster security with the following measures: - Creates independent sub-accounts for each cluster. - Sets up firewall rules to isolate external connections. @@ -184,14 +185,14 @@ For more information, see [Connect to Your TiDB Cloud Dedicated Cluster](/tidb-c
-
+
-For a TiDB Cloud Serverless cluster, the steps to connect to your cluster are simplified as follows: +For a {{{ .starter }}} or Essential cluster, the steps to connect to your cluster are simplified as follows: 1. Set a database user and login credential. 2. Choose a SQL client, get an auto-generated connection string displayed on the TiDB Cloud UI, and then connect to your cluster through the SQL client using the string. -For more information, see [Connect to Your TiDB Cloud Serverless Cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). +For more information, see [Connect to Your TiDB Cloud Cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md).
diff --git a/tidb-cloud/tidb-cloud-glossary.md b/tidb-cloud/tidb-cloud-glossary.md index e8ea8cfb631c8..83280669d6bc8 100644 --- a/tidb-cloud/tidb-cloud-glossary.md +++ b/tidb-cloud/tidb-cloud-glossary.md @@ -27,7 +27,7 @@ ACID refers to the four key properties of a transaction: atomicity, consistency, Chat2Query is an AI-powered feature integrated into SQL Editor that assists users in generating, debugging, or rewriting SQL queries using natural language instructions. For more information, see [Explore your data with AI-assisted SQL Editor](/tidb-cloud/explore-data-with-chat2query.md). -In addition, TiDB Cloud provides a Chat2Query API for TiDB Cloud Serverless clusters. After it is enabled, TiDB Cloud will automatically create a system Data App called **Chat2Query** and a Chat2Data endpoint in Data Service. You can call this endpoint to let AI generate and execute SQL statements by providing instructions. For more information, see [Get started with Chat2Query API](/tidb-cloud/use-chat2query-api.md). +In addition, TiDB Cloud provides a Chat2Query API for {{{ .starter }}} clusters hosted on AWS. After it is enabled, TiDB Cloud will automatically create a system Data App called **Chat2Query** and a Chat2Data endpoint in Data Service. You can call this endpoint to let AI generate and execute SQL statements by providing instructions. For more information, see [Get started with Chat2Query API](/tidb-cloud/use-chat2query-api.md). ### Credit @@ -63,9 +63,9 @@ For more information, see [Manage an endpoint](/tidb-cloud/data-service-manage-e ### Full-text search -Unlike [Vector Search](/vector-search/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. +Unlike [Vector Search](/ai/concepts/vector-search-overview.md), which focuses on semantic similarity, full-text search lets you retrieve documents for exact keywords. In Retrieval-Augmented Generation (RAG) scenarios, you can use full-text search together with vector search to improve the retrieval quality. -For more information, see [Full-Text Search with SQL](/tidb-cloud/vector-search-full-text-search-sql.md) and [Full-Text Search with Python](/tidb-cloud/vector-search-full-text-search-python.md). +For more information, see [Full-Text Search with SQL](https://docs.pingcap.com/developer/vector-search-full-text-search-sql) and [Full-Text Search with Python](https://docs.pingcap.com/developer/vector-search-full-text-search-python). ## M @@ -135,19 +135,28 @@ The place where the data of deleted clusters with valid backups is stored. Once A separate database that can be located in the same or different region and contains the same data. A replica is often used for disaster recovery purposes or to improve performance. -### Replication Capacity Unit +### Replication Capacity Unit (RCU) -The replication of changefeed is charged according to the computing resources, which is the TiCDC replication capacity unit. +TiDB Cloud measures the capacity of [changefeeds](/tidb-cloud/changefeed-overview.md) in TiCDC Replication Capacity Units (RCUs). When you create a changefeed for a cluster, you can select an appropriate specification. The higher the RCU, the better the replication performance. You will be charged for these TiCDC changefeed RCUs. For more information, see [Changefeed Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#changefeed-cost). -### Request Unit +### Request Capacity Unit (RCU) -A Request Unit (RU) is a unit of measure used to represent the amount of resources consumed by a single request to the database. The amount of RUs consumed by a request depends on various factors, such as the operation type or the amount of data being retrieved or modified. For more information, see [TiDB Cloud Serverless Pricing Details](https://www.pingcap.com/tidb-cloud-serverless-pricing-details). +A Request Capacity Unit (RCU) is a unit of measure used to represent the provisioned compute capacity for your {{{ .essential }}} cluster. One RCU provides a fixed amount of compute resources that can process a certain number of RUs per second. The number of RCUs you provision determines your cluster's baseline performance and throughput capacity. For more information, see [{{{ .essential }}} Pricing Details](https://www.pingcap.com/tidb-cloud-essential-pricing-details/). + +### Request Unit (RU) + +For {{{ .starter }}} and Essential, a Request Unit (RU) is a unit of measure used to represent the amount of resources consumed by a single request to the database. The amount of RUs consumed by a request depends on various factors, such as the operation type or the amount of data being retrieved or modified. However, the billing models for {{{ .starter }}} and Essential are different: + +- {{{ .starter }}} is billed based on the total number of RUs consumed. For more information, see [{{{ .starter }}} Pricing Details](https://www.pingcap.com/tidb-cloud-starter-pricing-details/). +- {{{ .essential }}} is billed based on the number of provisioned [Request Capacity Units (RCUs)](#request-capacity-unit-rcu). One RCU provides a fixed amount of compute resources that can process a certain number of RUs-per-second. For more information, see [{{{ .essential }}} Pricing Details](https://www.pingcap.com/tidb-cloud-essential-pricing-details/). + +For TiDB Cloud Dedicated and TiDB Self-Managed, a Request Unit (RU) is a resource abstraction unit that represents system resource consumption, which currently includes CPU, IOPS, and IO bandwidth metrics. It is used by the resource control feature to limit, isolate, and manage resources consumed by database requests, **not for billing purposes**. For more information, see [Use Resource Control to Achieve Resource Group Limitation and Flow Control](/tidb-resource-control-ru-groups.md). ## S ### Spending limit -Spending limit refers to the maximum amount of money that you are willing to spend on a particular workload in a month. It is a cost-control mechanism that enables you to set a budget for your TiDB Cloud Serverless clusters. For [scalable clusters](/tidb-cloud/select-cluster-tier.md#scalable-cluster-plan), the spending limit must be set to a minimum of $0.01. Also, the scalable cluster can have a free quota if it meets the qualifications. The scalable cluster with a free quota will consume the free quota first. +[Spending limit](/tidb-cloud/manage-serverless-spend-limit.md) refers to the maximum amount of money that you are willing to spend on a particular workload in a month. It is a cost-control mechanism that enables you to set a budget for your {{{ .starter }}} clusters. If the spending limit is set to 0, the cluster remains free. If the spending limit is greater than 0, you need to add a credit card. ## T @@ -159,6 +168,12 @@ The collection of [TiDB](https://docs.pingcap.com/tidb/stable/tidb-computing), [ The computing node that aggregates data from queries returned from transactional or analytical stores. Increasing the number of TiDB nodes will increase the number of concurrent queries that the cluster can handle. +### TiDB X + +A new distributed SQL architecture that makes cloud-native object storage the backbone of TiDB. By decoupling compute and storage, TiDB X enables TiDB to scale intelligently, adapting in real time to workload patterns, business cycles, and data characteristics. + +The TiDB X architecture is now available in {{{ .starter }}} and Essential{{{ .starter }}}, Essential, and Premium. For more information, see [Introducing TiDB X: A New Foundation for Distributed SQL in the Era of AI](https://www.pingcap.com/blog/introducing-tidb-x-a-new-foundation-distributed-sql-ai-era/) and [PingCAP Launches TiDB X and New AI Capabilities at SCaiLE Summit 2025](https://www.pingcap.com/press-release/pingcap-launches-tidb-x-new-ai-capabilities/). + ### TiFlash node The analytical storage node that replicates data from TiKV in real time and supports real-time analytical workloads. @@ -175,7 +190,7 @@ A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that ### Vector search -[Vector search](/vector-search/vector-search-overview.md) is a search method that prioritizes the meaning of your data to deliver relevant results. Unlike traditional full-text search, which relies on exact keyword matching and word frequency, vector search converts various data types (such as text, images, or audio) into high-dimensional vectors and queries based on the similarity between these vectors. This search method captures the semantic meaning and contextual information of the data, leading to a more precise understanding of user intent. Even when the search terms do not exactly match the content in the database, vector search can still provide results that align with the user's intent by analyzing the semantics of the data. +[Vector search](/ai/concepts/vector-search-overview.md) is a search method that prioritizes the meaning of your data to deliver relevant results. Unlike traditional full-text search, which relies on exact keyword matching and word frequency, vector search converts various data types (such as text, images, or audio) into high-dimensional vectors and queries based on the similarity between these vectors. This search method captures the semantic meaning and contextual information of the data, leading to a more precise understanding of user intent. Even when the search terms do not exactly match the content in the database, vector search can still provide results that align with the user's intent by analyzing the semantics of the data. ### Virtual Private Cloud diff --git a/tidb-cloud/tidb-cloud-htap-quickstart.md b/tidb-cloud/tidb-cloud-htap-quickstart.md index e7628aba99b57..b66a712612f0b 100644 --- a/tidb-cloud/tidb-cloud-htap-quickstart.md +++ b/tidb-cloud/tidb-cloud-htap-quickstart.md @@ -12,7 +12,7 @@ This tutorial guides you through an easy way to experience the Hybrid Transactio ## Before you begin -Before experiencing the HTAP feature, follow [TiDB Cloud Quick Start](/tidb-cloud/tidb-cloud-quickstart.md) to create a TiDB Cloud Serverless cluster and import the **Steam Game Stats** sample dataset to the cluster. +Before experiencing the HTAP feature, follow [TiDB Cloud Quick Start](/tidb-cloud/tidb-cloud-quickstart.md) to create a {{{ .starter }}} cluster and import the **Steam Game Stats** sample dataset to the cluster. ## Steps diff --git a/tidb-cloud/tidb-cloud-import-local-files.md b/tidb-cloud/tidb-cloud-import-local-files.md index 950e86b60623a..20c0d9fb7129e 100644 --- a/tidb-cloud/tidb-cloud-import-local-files.md +++ b/tidb-cloud/tidb-cloud-import-local-files.md @@ -1,18 +1,18 @@ --- -title: Import Local Files to TiDB Cloud Serverless -summary: Learn how to import local files to TiDB Cloud Serverless. +title: Import Local Files to {{{ .starter }}} or Essential +summary: Learn how to import local files to {{{ .starter }}} or {{{ .essential }}}. --- -# Import Local Files to TiDB Cloud Serverless +# Import Local Files to {{{ .starter }}} or Essential -You can import local files to TiDB Cloud Serverless directly. It only takes a few clicks to complete the task configuration, and then your local CSV data will be quickly imported to your TiDB cluster. Using this method, you do not need to provide the cloud storage and credentials. The whole importing process is quick and smooth. +You can import local files to {{{ .starter }}} or {{{ .essential }}} directly. It only takes a few clicks to complete the task configuration, and then your local CSV data will be quickly imported to your TiDB cluster. Using this method, you do not need to provide the cloud storage and credentials. The whole importing process is quick and smooth. Currently, this method supports importing one CSV file for one task into either an existing empty table or a new table. ## Limitations - Currently, TiDB Cloud only supports importing a local file in CSV format within 250 MiB for one task. -- Importing local files is supported only for TiDB Cloud Serverless clusters, not for TiDB Cloud Dedicated clusters. +- Importing local files is supported only for {{{ .starter }}} and {{{ .essential }}} clusters, not for TiDB Cloud Dedicated clusters. - You cannot run more than one import task at the same time. ## Import local files diff --git a/tidb-cloud/tidb-cloud-intro.md b/tidb-cloud/tidb-cloud-intro.md index b406d4500dbda..938f80c9eae09 100644 --- a/tidb-cloud/tidb-cloud-intro.md +++ b/tidb-cloud/tidb-cloud-intro.md @@ -6,7 +6,7 @@ category: intro # What is TiDB Cloud -[TiDB Cloud](https://www.pingcap.com/tidb-cloud/) is a fully-managed Database-as-a-Service (DBaaS) that brings [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source Hybrid Transactional and Analytical Processing (HTAP) database, to your cloud. TiDB Cloud offers an easy way to deploy and manage databases to let you focus on your applications, not the complexities of the databases. You can create TiDB Cloud clusters to quickly build mission-critical applications on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. +[TiDB Cloud](https://www.pingcap.com/tidb-cloud/) is a fully-managed Database-as-a-Service (DBaaS) that brings [TiDB](https://docs.pingcap.com/tidb/stable/overview), an open-source Hybrid Transactional and Analytical Processing (HTAP) database, to your cloud. TiDB Cloud offers an easy way to deploy and manage databases to let you focus on your applications, not the complexities of the databases. You can create TiDB Cloud clusters to quickly build mission-critical applications on Amazon Web Services (AWS), Google Cloud, Microsoft Azure, and Alibaba Cloud.You can create TiDB Cloud clusters to quickly build mission-critical applications on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. ![TiDB Cloud Overview](/media/tidb-cloud/tidb-cloud-overview.png) @@ -50,8 +50,18 @@ With TiDB Cloud, you can get the following key features: - **Multi-Cloud Support** + + + Stay flexible without cloud vendor lock-in. TiDB Cloud is currently available on AWS, Azure, Google Cloud, and Alibaba Cloud. + + + + + Stay flexible without cloud vendor lock-in. TiDB Cloud is currently available on AWS, Azure, and Google Cloud. + + - **Simple Pricing Plans** Pay only for what you use, with transparent and upfront pricing with no hidden fees. @@ -62,17 +72,51 @@ With TiDB Cloud, you can get the following key features: ## Deployment options -TiDB Cloud provides the following two deployment options: +TiDB Cloud provides the following deployment options: + +- TiDB Cloud Starter + + {{{ .starter }}} is a fully managed, multi-tenant TiDB offering. It delivers an instant, autoscaling MySQL-compatible database and offers a generous free quota and consumption based billing once free limits are exceeded. + + + + Currently, {{{ .starter }}} is generally available on AWS and in public preview on Alibaba Cloud. + + + +- {{{ .essential }}} + + For applications experiencing growing workloads and needing scalability in real time, {{{ .essential }}} provides the flexibility and performance to keep pace with your business growth. + + + + Currently, {{{ .essential }}} is in public preview on AWS and Alibaba Cloud. + + For feature comparison between {{{ .starter }}} and {{{ .essential }}} on Alibaba Cloud, see [TiDB on Alibaba Cloud](https://www.pingcap.com/partners/alibaba-cloud/). + + + + + + Currently, {{{ .essential }}} is in public preview on AWS. + + + + + +- {{{ .premium }}} + + {{{ .premium }}} is designed for mission-critical businesses that demand unlimited real-time scalability. It delivers workload-aware auto-scaling and comprehensive enterprise capabilities. -- [TiDB Cloud Serverless](https://www.pingcap.com/tidb-cloud-serverless) + Currently, {{{ .premium }}} is in private preview on AWS and Alibaba Cloud. - TiDB Cloud Serverless is a fully managed, multi-tenant TiDB offering. It delivers an instant, autoscaling MySQL-compatible database and offers a generous free tier and consumption based billing once free limits are exceeded. + -- [TiDB Cloud Dedicated](https://www.pingcap.com/tidb-cloud-dedicated) +- TiDB Cloud Dedicated - TiDB Cloud Dedicated is for production use with the benefits of cross-zone high availability, horizontal scaling, and [HTAP](https://en.wikipedia.org/wiki/Hybrid_transactional/analytical_processing). + TiDB Cloud Dedicated is designed for mission-critical businesses, offering high availability across multiple availability zones, horizontal scaling, and full [HTAP](https://en.wikipedia.org/wiki/Hybrid_transactional/analytical_processing) capabilities. -For feature comparison between TiDB Cloud Serverless and TiDB Cloud Dedicated, see [TiDB: An advanced, open source, distributed SQL database](https://www.pingcap.com/get-started-tidb). + Currently, TiDB Cloud Dedicated is generally available on AWS and Google Cloud, and in public preview on Azure. For more information, see [TiDB Cloud Dedicated](https://www.pingcap.com/tidb-cloud-dedicated). ## Architecture diff --git a/tidb-cloud/tidb-cloud-log-redaction.md b/tidb-cloud/tidb-cloud-log-redaction.md index bf439dac6d3f7..4df13d28eb090 100644 --- a/tidb-cloud/tidb-cloud-log-redaction.md +++ b/tidb-cloud/tidb-cloud-log-redaction.md @@ -1,23 +1,45 @@ --- title: User-Controlled Log Redaction -summary: Learn how to enable or disable user-controlled log redaction for TiDB Cloud Dedicated clusters to manage sensitive data visibility in execution logs. +summary: Learn how to enable or disable user-controlled log redaction in TiDB Cloud to manage the visibility of sensitive data in execution logs. --- # User-Controlled Log Redaction -User-controlled log redaction lets you manage the visibility of sensitive data in your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster logs. By toggling this redaction feature, you can protect your information, balance operational needs with security, and control what appears in your cluster logs. +User-controlled log redaction lets you manage the visibility of sensitive data in your [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster{{{ .premium }}} instance logs. By toggling this redaction feature, you can protect your information, balance operational needs with security, and control what appears in your clusterinstance logs. -Log redaction is enabled by default, ensuring that sensitive information in running logs and execution plans is concealed. If you need more detailed log information for cluster maintenance or SQL tuning, you can disable this feature at any time. +Log redaction is enabled by default, ensuring that sensitive information in running logs and execution plans is concealed. If you need more detailed log information for clusterinstance maintenance or SQL tuning, you can disable this feature at any time. + + > **Note:** > > The log redaction feature is only supported for TiDB Cloud Dedicated clusters. + + + + +> **Note:** +> +> The log redaction feature is supported for TiDB Cloud Dedicated clusters and {{{ .premium }}} instances. + + + ## Prerequisites + + * You must be in the **Organization Owner** or **Project Owner** role of your organization in TiDB Cloud. * Log redaction cannot be enabled or disabled when the cluster is in the `paused` state. + + + + +* You must be in the **Organization Owner** role of your organization in TiDB Cloud. + + + ## Disable log redaction > **Warning:** @@ -27,15 +49,27 @@ Log redaction is enabled by default, ensuring that sensitive information in runn To disable log redaction, do the following: 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/). -2. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters)[**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your target clusterinstance to go to its overview page. + + > **Tip:** > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + + + + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + + + 3. In the left navigation pane, click **Settings** > **Security**. 4. In the **Execution Log Redaction** section, you can see that the redaction feature is **Enabled** by default. -5. Click **Disable**. A warning appears, explaining the risks of disabling log redaction. +5. Click **Disable**. A warning appears, explaining the risks of disabling log redaction. 6. Confirm the disabling. After disabling log redaction, note the following: @@ -62,12 +96,24 @@ To check the updated logs after log redaction is disabled, do the following: To maintain data security, **enable log redaction** as soon as you complete your diagnostic or maintenance task as follows. 1. Log in to the [TiDB Cloud console](https://tidbcloud.com/). -2. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page, and then click the name of your target cluster to go to its overview page. +2. Navigate to the [**Clusters**](https://tidbcloud.com/project/clusters)[**TiDB Instances**](https://tidbcloud.com/tidbs) page, and then click the name of your target clusterinstance to go to its overview page. + + > **Tip:** > > You can use the combo box in the upper-left corner to switch between organizations, projects, and clusters. + + + + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + + + 3. In the left navigation pane, click **Settings** > **Security**. 4. In the **Execution Log Redaction** section, you can see that the redaction feature is **Disabled**. 5. Click **Enable** to enable it. diff --git a/tidb-cloud/tidb-cloud-migration-overview.md b/tidb-cloud/tidb-cloud-migration-overview.md index d71ea31500ec4..10ccd0125bd36 100644 --- a/tidb-cloud/tidb-cloud-migration-overview.md +++ b/tidb-cloud/tidb-cloud-migration-overview.md @@ -41,21 +41,21 @@ If you have data files in SQL, CSV, Parquet, or Aurora Snapshot formats, you can - Import sample data (SQL file) to TiDB Cloud - You can import sample data (SQL file) to TiDB Cloud to quickly get familiar with the TiDB Cloud interface and the import process. For more information, see [Import Sample Data to TiDB Cloud Serverless](/tidb-cloud/import-sample-data-serverless.md) and [Import Sample Data to TiDB Cloud Dedicated](/tidb-cloud/import-sample-data.md). + You can import sample data (SQL file) to TiDB Cloud to quickly get familiar with the TiDB Cloud interface and the import process. For more information, see [Import Sample Data to {{{ .starter }}} or Essential](/tidb-cloud/import-sample-data-serverless.md) and [Import Sample Data to TiDB Cloud Dedicated](/tidb-cloud/import-sample-data.md). -- Import CSV files from Amazon S3, Google Cloud Storage (GCS), or Azure Blob Storage into TiDB Cloud +- Import CSV files from Amazon S3, Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud - You can import CSV files from Amazon S3, Google Cloud Storage (GCS), or Azure Blob Storage into TiDB Cloud. For more information, see [Import CSV Files from Cloud Storage into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) and [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). + You can import CSV files from Amazon S3, Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud. For more information, see [Import CSV Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-csv-files-serverless.md) and [Import CSV Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-csv-files.md). -- Import Apache Parquet files from Amazon S3, Google Cloud Storage (GCS), or Azure Blob Storage into TiDB Cloud +- Import Apache Parquet files from Amazon S3, Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud - You can import Parquet files from Amazon S3, Google Cloud Storage (GCS), or Azure Blob Storage into TiDB Cloud. For more information, see [Import Apache Parquet Files from Cloud Storage into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md) and [Import Apache Parquet Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-parquet-files.md). + You can import Parquet files from Amazon S3, Google Cloud Storage (GCS), Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud. For more information, see [Import Apache Parquet Files from Cloud Storage into {{{ .starter }}} or Essential](/tidb-cloud/import-parquet-files-serverless.md) and [Import Apache Parquet Files from Cloud Storage into TiDB Cloud Dedicated](/tidb-cloud/import-parquet-files.md). ## Reference ### Configure cloud storage access -If your source data is stored in Amazon S3, Google Cloud Storage (GCS) buckets, or Azure Blob Storage containers, before importing or migrating the data to TiDB Cloud, you need to configure access to the storage. For more information, see [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md) and [Configure External Storage Access for TiDB Cloud Dedicated](/tidb-cloud/dedicated-external-storage.md). +If your source data is stored in Amazon S3, Google Cloud Storage (GCS) buckets, Azure Blob Storage containers, or Alibaba Cloud OSS buckets, before importing or migrating the data to TiDB Cloud, you need to configure access to the storage. For more information, see [Configure External Storage Access for {{{ .starter }}} or Essential](/tidb-cloud/configure-external-storage-access.md) and [Configure External Storage Access for TiDB Cloud Dedicated](/tidb-cloud/dedicated-external-storage.md). ### Naming conventions for data import diff --git a/tidb-cloud/tidb-cloud-org-sso-authentication.md b/tidb-cloud/tidb-cloud-org-sso-authentication.md index 227e9d58eaea2..dc25790b6d6e9 100644 --- a/tidb-cloud/tidb-cloud-org-sso-authentication.md +++ b/tidb-cloud/tidb-cloud-org-sso-authentication.md @@ -58,7 +58,7 @@ All the enabled authentication methods will be displayed on your custom TiDB Clo Auto-provision is a feature that allows members to automatically join an organization without requiring an invitation from the `Organization Owner` or `Project Owner`. In TiDB Cloud, it is disabled by default for all the supported authentication methods. - When auto-provision is disabled for an authentication method, only users who have been invited by an `Organization Owner` or `Project Owner` can log in to your custom URL. -- When auto-provision is enabled for an authentication method, any users using this authentication method can log in to your custom URL. After login, they are automatically assigned the default **Member** role within the organization. +- When auto-provision is enabled for an authentication method, any users using this authentication method can log in to your custom URL. After login, they are automatically assigned the default `Organization Viewer` role within the organization. For security considerations, if you choose to enable auto-provision, it is recommended to limit the allowed email domains for authentication when you [configure the authentication method details](#step-2-configure-authentication-methods). diff --git a/tidb-cloud/tidb-cloud-poc.md b/tidb-cloud/tidb-cloud-poc.md index 9d489ccd2352a..a58bd22627146 100644 --- a/tidb-cloud/tidb-cloud-poc.md +++ b/tidb-cloud/tidb-cloud-poc.md @@ -5,7 +5,7 @@ summary: Learn about how to perform a Proof of Concept (PoC) with TiDB Cloud. # Perform a Proof of Concept (PoC) with TiDB Cloud -TiDB Cloud is a Database-as-a-Service (DBaaS) product that delivers everything great about TiDB in a fully managed cloud database. It helps you focus on your applications, instead of the complexities of your database. TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. +TiDB Cloud is a Database-as-a-Service (DBaaS) product that delivers everything great about TiDB in a fully managed cloud database. It helps you focus on your applications, instead of the complexities of your database. TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, Microsoft Azure, and Alibaba Cloud.TiDB Cloud is currently available on Amazon Web Services (AWS), Google Cloud, and Microsoft Azure. Initiating a proof of concept (PoC) is the best way to determine whether TiDB Cloud is the best fit for your business needs. It will also get you familiar with the key features of TiDB Cloud in a short time. By running performance tests, you can see whether your workload can run efficiently on TiDB Cloud. You can also evaluate the efforts required to migrate your data and adapt configurations. @@ -13,7 +13,7 @@ This document describes the typical PoC procedures and aims to help you quickly If you are interested in doing a PoC, feel free to contact PingCAP before you get started. The support team can help you create a test plan and walk you through the PoC procedures smoothly. -Alternatively, you can [create a TiDB Cloud Serverless](/tidb-cloud/tidb-cloud-quickstart.md#step-1-create-a-tidb-cluster) to get familiar with TiDB Cloud for a quick evaluation. Note that the TiDB Cloud Serverless has some [special terms and conditions](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless-special-terms-and-conditions). +Alternatively, you can [create a {{{ .starter }}}](/tidb-cloud/tidb-cloud-quickstart.md#step-1-create-a-tidb-cluster) to get familiar with TiDB Cloud for a quick evaluation. Note that the {{{ .starter }}} has some [special terms and conditions](/tidb-cloud/serverless-limitations.md). ## Overview of the PoC procedures @@ -60,15 +60,23 @@ You might also be interested in using [TiFlash](https://docs.pingcap.com/tidb/st To create a [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster for the PoC, take the following steps: -1. Fill in the PoC application form by doing one of the following: +1. Fill in the PoC application form. - - On the PingCAP website, go to the [Apply for PoC](https://pingcap.com/apply-for-poc/) page to fill in the application form. - - In the [TiDB Cloud console](https://tidbcloud.com/), click **?** in the lower-right corner, click **Contact Sales**, and then select **Apply for PoC** to fill in the application form. + In the [TiDB Cloud console](https://tidbcloud.com/), click **?** in the lower-right corner, click **Contact Sales**, and then select **Apply for PoC** to fill in the application form. Once you submit the form, the TiDB Cloud Support team will review your application, contact you, and transfer credits to your account once the application is approved. You can also contact a PingCAP support engineer to assist with your PoC procedures to ensure the PoC runs as smoothly as possible. 2. Refer to [Create a TiDB Cloud Dedicated Cluster](/tidb-cloud/create-tidb-cluster.md) to create a TiDB Cloud Dedicated cluster for the PoC. + > **Note:** + > + > Before creating a TiDB Cloud Dedicated cluster, you must add one of the following payment methods: + > - Add a credit card by following the on-screen instructions on the cluster creation page. + > - Contact the TiDB Cloud Support team to pay by wire transfer. + > - Sign up for TiDB Cloud through a cloud marketplace (AWS, Azure, or Google Cloud) to pay using your cloud provider account. + > + > Your PoC credits will automatically be used to offset eligible expenses incurred during the PoC period. + Capacity planning is recommended for cluster sizing before you create a cluster. You can start with estimated numbers of TiDB, TiKV, or TiFlash nodes, and scale out the cluster later to meet performance requirements. You can find more details in the following documents or consult our support team. - For more information about estimation practice, see [Size Your TiDB](/tidb-cloud/size-your-cluster.md). @@ -87,7 +95,7 @@ For a newly created cluster, note the following configurations: Next, you can load your database schemas to the TiDB cluster, including tables and indexes. -Because the amount of PoC credits is limited, to maximize the value of credits, it is recommended that you create a [TiDB Cloud Serverless cluster](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) for compatibility tests and preliminary analysis on TiDB Cloud. +Because the amount of PoC credits is limited, to maximize the value of credits, it is recommended that you create a [{{{ .starter }}} cluster](/tidb-cloud/select-cluster-tier.md#starter) for compatibility tests and preliminary analysis on TiDB Cloud. TiDB Cloud is highly compatible with MySQL 8.0. You can directly import your data into TiDB if it is MySQL-compatible or can be adapted to be compatible with MySQL. @@ -175,7 +183,7 @@ Now the workload testing is finished, you can explore more features, for example - Backup - To avoid vendor lock-in, you can use daily full backup to migrate data to a new cluster and use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data. For more information, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md#turn-on-auto-backup) and [Back Up and Restore TiDB Cloud Serverless Data](/tidb-cloud/backup-and-restore-serverless.md). + To avoid vendor lock-in, you can use daily full backup to migrate data to a new cluster and use [Dumpling](https://docs.pingcap.com/tidb/stable/dumpling-overview) to export data. For more information, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md#turn-on-auto-backup) and [Back Up and Restore Data on {{{ .starter }}} or Essential](/tidb-cloud/backup-and-restore-serverless.md). ## Step 8. Clean up the environment and finish the PoC @@ -187,8 +195,6 @@ If your credits are running out and you want to continue with the PoC, contact t You can end the PoC and remove the test environment anytime. For more information, see [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md). -Any feedback to our support team is highly appreciated by filling in the [TiDB Cloud Feedback form](https://www.surveymonkey.com/r/L3VVW8R), such as the PoC process, the feature requests, and how we can improve the products. - ## FAQ ### 1. How long does it take to back up and restore my data? @@ -212,11 +218,7 @@ You can scale out clusters on the console by yourself. If you need to scale in a Once your application for the PoC is approved, you will receive credits in your account. Generally, the credits are sufficient for a 14-day PoC. The credits are charged by the type of nodes and the number of nodes, on an hourly basis. For more information, see [TiDB Cloud Billing](/tidb-cloud/tidb-cloud-billing.md#credits). -To check the credits left for your PoC, go to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your target project, as shown in the following screenshot. - -![TiDB Cloud PoC Credits](/media/tidb-cloud/poc-points.png) - -Alternatively, you can also switch to your target organization using the combo box in the upper-left corner of the TiDB Cloud console, click **Billing** in the left navigation pane, and then click the **Credits** tab to view the credit information. +To check the total credits, available credits, and current credit usage for your PoC, switch to your target organization using the combo box in the upper-left corner of the TiDB Cloud console, click **Billing** in the left navigation pane, and then click the **Credits** tab. To save credits, remove the cluster that you are not using. Currently, you cannot stop a cluster. You need to ensure that your backups are up to date before removing a cluster, so you can restore the cluster later when you want to resume your PoC. diff --git a/tidb-cloud/tidb-cloud-quickstart.md b/tidb-cloud/tidb-cloud-quickstart.md index 1612839485d94..60b4f5cf1d3b3 100644 --- a/tidb-cloud/tidb-cloud-quickstart.md +++ b/tidb-cloud/tidb-cloud-quickstart.md @@ -8,13 +8,13 @@ category: quick start *Estimated completion time: 20 minutes* -This tutorial guides you through an easy way to get started with TiDB Cloud. You can also follow the step-by-step tutorials on the [**Getting Started**](https://tidbcloud.com/getting-started) page in the TiDB Cloud console. +This tutorial guides you through an easy way to get started with TiDB Cloud. Additionally, you can try out TiDB features on [TiDB Playground](https://play.tidbcloud.com/?utm_source=docs&utm_medium=tidb_cloud_quick_start). ## Step 1: Create a TiDB cluster -[TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) is the best way to get started with TiDB Cloud. To create a TiDB Cloud Serverless cluster, follow these steps: +[TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) is the best way to get started with TiDB Cloud. To create a {{{ .starter }}} cluster, follow these steps: 1. If you do not have a TiDB Cloud account, click [here](https://tidbcloud.com/free-trial) to sign up. @@ -24,26 +24,42 @@ Additionally, you can try out TiDB features on [TiDB Playground](https://play.ti The [**Clusters**](https://tidbcloud.com/project/clusters) page is displayed by default. -3. For new sign-up users, TiDB Cloud automatically creates a default TiDB Cloud Serverless cluster named `Cluster0` for you. +3. For new sign-up users, TiDB Cloud automatically creates a default {{{ .starter }}} cluster named `Cluster0` for you. - To instantly try out TiDB Cloud features with this default cluster, proceed to [Step 2: Try AI-assisted SQL Editor](#step-2-try-ai-assisted-sql-editor). - - To create a new TiDB Cloud Serverless cluster on your own, follow these steps: + - To create a new {{{ .starter }}} cluster on your own, follow these steps: 1. Click **Create Cluster**. - 2. On the **Create Cluster** page, **Serverless** is selected by default. Select the target region for your cluster, update the default cluster name if necessary, select your [cluster plan](/tidb-cloud/select-cluster-tier.md#cluster-plans), and then click **Create**. Your TiDB Cloud Serverless cluster will be created in approximately 30 seconds. + 2. On the **Create Cluster** page, **Starter** is selected by default. Select the cloud provider and target region for your cluster, update the default cluster name if necessary, and then click **Create**. Your {{{ .starter }}} cluster will be created in approximately 30 seconds. + + + + > **Note** + > + > Currently, {{{ .starter }}} is generally available on AWS and in public preview on Alibaba Cloud. The subsequent steps in this document use AWS as an example. + + + + + + > **Note** + > + > Currently, {{{ .starter }}} is generally available on AWS. The subsequent steps in this document use AWS as an example. + + ## Step 2: Try AI-assisted SQL Editor -You can use the built-in AI-assisted SQL Editor in the TiDB Cloud console to maximize your data value. This enables you to run SQL queries against databases without a local SQL client. You can intuitively view the query results in tables or charts and easily check the query logs. +For {{{ .starter }}} clusters hosted on AWS, you can use the built-in AI-assisted SQL Editor in the TiDB Cloud console to maximize your data value. This enables you to run SQL queries against databases without a local SQL client. You can intuitively view the query results in tables or charts and easily check the query logs. -1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click on a cluster name to go to its overview page, and then click **SQL Editor** in the left navigation pane. +1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click a cluster name to go to its overview page, and then click **SQL Editor** in the left navigation pane. 2. To try the AI capacity of TiDB Cloud, follow the on-screen instructions to allow PingCAP and AWS Bedrock to use your code snippets for research and service improvement, and then click **Save and Get Started**. 3. In SQL Editor, press + I on macOS (or Control + I on Windows or Linux) to instruct [Chat2Query (beta)](/tidb-cloud/tidb-cloud-glossary.md#chat2query) to generate SQL queries automatically. - For example, to create a new table `test.t` with two columns (column `id` and column `name`), you can type `use test;` to specify the database, press + I, type `create a new table t with id and name` as the instruction, and then press **Enter** to let AI generate a SQL statement accordingly. - + For example, to create a new table `test.t` with two columns (column `id` and column `name`), you can type `use test;` to specify the database, press + I, type `create a new table t with id and name` as the instruction, and then press **Enter** to let AI generate a SQL statement accordingly. + For the generated statement, you can accept it by clicking **Accept** and then further edit it if needed, or reject it by clicking **Discard**. > **Note:** @@ -105,18 +121,17 @@ FROM `t`; ``` -## Step 3: Try interactive tutorials +## Step 3: Try guided tour on the console -TiDB Cloud offers interactive tutorials with carefully crafted sample datasets to help you quickly get started with TiDB Cloud. You can try these tutorials to learn how to use TiDB Cloud for high-performance data analytics. +TiDB Cloud offers an interactive tutorial with carefully crafted sample datasets to help you quickly get started with TiDB Cloud. For {{{ .starter }}} clusters hosted on AWS, you can try this tutorial to learn how to use TiDB Cloud for high-performance data analytics. -1. Click on the **?** icon in the lower-right corner of the console and select **Interactive Tutorials**. -2. In the tutorials list, select a tutorial card to start, such as **Steam Game Stats**. -3. Choose a TiDB Cloud Serverless cluster that you want to use for the tutorial, and click **Import Dataset**. The import process might take approximately one minute. -4. Once the sample data is imported, follow the on-screen instructions to complete the tutorial. +1. Click the **?** icon in the lower-right corner of the console and select **Guided tour of SQL Editor**. +2. Choose a {{{ .starter }}} cluster that you want to use for the tour, and click **Import Dataset**. The import process might take approximately one minute. +3. Once the sample data is imported, follow the on-screen instructions to complete the tour. ## What's next -- To learn how to connect to your cluster using different methods, see [Connect to a TiDB Cloud Serverless cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). +- To learn how to connect to your cluster using different methods, see [Connect to a {{{ .starter }}} or Essential cluster](/tidb-cloud/connect-to-tidb-cluster-serverless.md). - For more information about how to use SQL Editor and Chat2Query to explore your data, see [Explore your data with AI-assisted SQL Editor](/tidb-cloud/explore-data-with-chat2query.md). - For TiDB SQL usage, see [Explore SQL with TiDB](/basic-sql-operations.md). - For production use with the benefits of cross-zone high availability, horizontal scaling, and [HTAP](https://en.wikipedia.org/wiki/Hybrid_transactional/analytical_processing), see [Create a TiDB Cloud Dedicated cluster](/tidb-cloud/create-tidb-cluster.md). diff --git a/tidb-cloud/tidb-cloud-release-notes.md b/tidb-cloud/tidb-cloud-release-notes.md deleted file mode 100644 index e8c7e4ec8da50..0000000000000 --- a/tidb-cloud/tidb-cloud-release-notes.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: TiDB Cloud Release Notes in 2025 -summary: Learn about the release notes of TiDB Cloud in 2025. -aliases: ['/tidbcloud/supported-tidb-versions','/tidbcloud/release-notes'] ---- - -# TiDB Cloud Release Notes in 2025 - -This page lists the release notes of [TiDB Cloud](https://www.pingcap.com/tidb-cloud/) in 2025. - -## June 24, 2025 - -**General changes** - -- [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) database audit logging (beta) is now available upon request. This feature lets you record a history of user access details (such as any SQL statements executed) in logs. - - To request this feature, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com) and click **Request Support**. Then, fill in "Apply for TiDB Cloud Serverless database audit logging" in the Description field and click **Submit**. - - For more information, see [TiDB Cloud Serverless Database Audit Logging](/tidb-cloud/serverless-audit-logging.md). - -- [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) supports user-controlled log redaction. - - You can now enable or disable log redaction for your TiDB Cloud Dedicated clusters to manage the redaction status of cluster logs by yourself. - - For more information, see [User-Controlled Log Redaction](/tidb-cloud/tidb-cloud-log-redaction.md). - -- Encryption at Rest with Customer-Managed Encryption Keys (CMEK) is now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS. - - This feature enables you to secure your data at rest by leveraging a symmetric encryption key that you manage through Key Management Service (KMS). - - For more information, see [Encryption at Rest Using Customer-Managed Encryption Keys](/tidb-cloud/tidb-cloud-encrypt-cmek.md). - -## June 17, 2025 - -**General changes** - -- For [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, the maximum storage size of TiKV nodes with 16 vCPU and 32 vCPU is changed from **6144 GiB** to **4096 GiB**. - - For more information, see [TiKV node storage size](/tidb-cloud/size-your-cluster.md#tikv-node-storage-size). - -**Console changes** - -- Revamp the left navigation pane to improve the overall navigation experience. - - - A new icon is now available in the upper-left corner, letting you easily hide or show the left navigation pane whenever you need. - - A combo box is now available in the upper-left corner, letting you quickly switch between organizations, projects, and clusters, all from one central location. - - - - - The entries shown on the left navigation pane now dynamically adapt to your current selection in the combo box, helping you focus on the most relevant functionalities. - - For your quick access, **Support**, **Notification**, and your account entries are now consistently displayed at the bottom of the left navigation pane on all console pages. - -## June 4, 2025 - -**General changes** - -- [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) on Microsoft Azure is now available in public preview. - - With this launch, TiDB Cloud now supports all three major public cloud platforms — AWS, Google Cloud, and Azure, which enables you to deploy TiDB Cloud Dedicated clusters wherever best fits your business needs and cloud strategy. - - - All core features available on AWS and Google Cloud are fully supported on Azure. - - Azure support is currently available in three regions: East US 2, Japan East, and Southeast Asia, with more regions coming soon. - - TiDB Cloud Dedicated clusters on Azure require TiDB version v7.5.3 or later. - - To quickly get started with TiDB Cloud Dedicated on Azure, see the following documentation: - - - [Create a TiDB Cloud Dedicated Cluster on Azure](/tidb-cloud/create-tidb-cluster.md) - - [Connect a TiDB Cloud Dedicated Cluster via Azure Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections-on-azure.md) - - [Import Data into TiDB Cloud Dedicated Cluster on Azure](/tidb-cloud/import-csv-files.md) - -- The Prometheus integration provides more metrics to enhance monitoring capabilities of [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - - Now you can integrate additional metrics, such as `tidbcloud_disk_read_latency` and `tidbcloud_kv_request_duration`, into Prometheus to track more aspects of your TiDB Cloud Dedicated performance. - - For more information on available metrics and how to enable them for both existing and new users, see [Integrate TiDB Cloud with Prometheus and Grafana (Beta)](/tidb-cloud/monitor-prometheus-and-grafana-integration.md#metrics-available-to-prometheus). - -- TiKV [Standard](/tidb-cloud/size-your-cluster.md#standard-storage) and [Performance](/tidb-cloud/size-your-cluster.md#performance-and-plus-storage) storage pricing is officially released. - - The discount period ends from **00:00 UTC on June 5, 2025**. After that, the price returns to the standard price. For more information about TiDB Cloud Dedicated prices, see [TiDB Cloud Dedicated Pricing Details](https://www.pingcap.com/tidb-dedicated-pricing-details/#node-cost). - -**Console changes** - -- Enhance the interactive experience when configuring the size of TiFlash nodes of [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - - You can now use a toggle switch to control the TiFlash configuration when creating a TiDB Cloud Dedicated cluster, which makes the configuration experience more intuitive and seamless. - -## May 27, 2025 - -**General changes** - -- Support streaming data to [Apache Pulsar](https://pulsar.apache.org) with changefeeds for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - - This feature enables you to integrate your TiDB Cloud Dedicated cluster with a wider range of downstream systems, and accommodates additional data integration requirements. To use this feature, make sure that your TiDB Cloud Dedicated cluster version is v7.5.1 or later. - - For more information, see [Sink to Apache Pulsar](/tidb-cloud/changefeed-sink-to-apache-pulsar.md). - -## May 13, 2025 - -**General changes** - -- Full-text search (beta) now available in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) for AI applications. - - TiDB Cloud Serverless now supports full-text search (beta), enabling AI and Retrieval-Augmented Generation (RAG) applications to retrieve content by exact keywords. This complements vector search, which retrieves content by semantic similarity. Combining both methods significantly improves retrieval accuracy and answer quality in RAG workflows. Key features include: - - - Direct text search: query string columns directly without the need for embeddings. - - Multilingual support: automatically detects and analyzes text in multiple languages, even within the same table, without requiring language specification. - - Relevance-based ranking: results are ranked using the industry-standard BM25 algorithm for optimal relevance. - - Native SQL compatibility: seamlessly use SQL features such as filtering, grouping, and joining with full-text search. - - To get started, see [Full Text Search with SQL](/tidb-cloud/vector-search-full-text-search-sql.md) or [Full Text Search with Python](/tidb-cloud/vector-search-full-text-search-python.md). - -- Increase the maximum TiFlash node storage for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) cluster: - - - For 8 vCPU TiFlash, from 2048 GiB to 4096 GiB - - For 32 vCPU TiFlash, from 4096 GiB to 8192 GiB - - This enhancement increases the analytics data storage capacity of your TiDB Cloud Dedicated cluster, improves workload scaling efficiency, and accommodates growing data requirements. - - For more information, see [TiFlash node storage](/tidb-cloud/size-your-cluster.md#tiflash-node-storage). - -- Enhance the maintenance window configuration experience by providing intuitive options to configure and reschedule maintenance tasks. - - For more information, see [Configure maintenance window](/tidb-cloud/configure-maintenance-window.md). - -- Extend the discount period for TiKV [Standard](/tidb-cloud/size-your-cluster.md#standard-storage) and [Performance](/tidb-cloud/size-your-cluster.md#performance-and-plus-storage) storage types. The promotion now ends on June 5, 2025. After this date, pricing will return to the standard rate. - -**Console changes** - -- Refine the **Backup Setting** page layout to improve the backup configuration experience in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters. - - For more information, see [Back Up and Restore TiDB Cloud Dedicated Data](/tidb-cloud/backup-and-restore.md). - -## April 22, 2025 - -**General changes** - -- Data export to Alibaba Cloud OSS is now supported. - - [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters now support exporting data to [Alibaba Cloud Object Storage Service (OSS)](https://www.alibabacloud.com/en/product/object-storage-service) using an [AccessKey pair](https://www.alibabacloud.com/help/en/ram/user-guide/create-an-accesskey-pair). - - For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md#alibaba-cloud-oss). - -## April 15, 2025 - -**General changes** - -- Support importing data from [Alibaba Cloud Object Storage Service (OSS)](https://www.alibabacloud.com/en/product/object-storage-service) into [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. - - This feature simplifies data migration to TiDB Cloud Serverless. You can use an AccessKey pair to authenticate. - - For more information, see the following documentation: - - - [Import CSV Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) - - [Import Apache Parquet Files from Amazon S3, GCS, Azure Blob Storage, or Alibaba Cloud OSS into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md) - -## April 1, 2025 - -**General changes** - -- The [TiDB Node Groups](/tidb-cloud/tidb-node-group-overview.md) feature is now generally available (GA) for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS and Google Cloud. - - This feature enables **fine-grained computing resource isolation** within a single cluster, helping you optimize performance and resource allocation for multi-tenant or multi-workload scenarios. - - **Key benefits:** - - - **Resource isolation**: - - - Group TiDB nodes into logically isolated units, ensuring workloads in one group do not affect other groups. - - Prevent resource contention between applications or business units. - - - **Simplified management**: - - - Manage all node groups within a single cluster, reducing operational overhead. - - Scale groups independently based on demand. - - For more information about the benefits, see [the technical blog](https://www.pingcap.com/blog/tidb-cloud-node-groups-scaling-workloads-predictable-performance/). To get started, see [Manage TiDB Node Groups](/tidb-cloud/tidb-node-group-management.md). - -- Introduce the [Standard storage](/tidb-cloud/size-your-cluster.md#standard-storage) type for TiKV nodes in [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters hosted on AWS. - - The Standard storage type is ideal for most workloads, providing a balance between performance and cost efficiency. - - **Key benefits:** - - - **Improved performance**: Reserves sufficient disk resources for Raft logs, reducing I/O contention between Raft and data storage, thereby improving both the read and write performance of TiKV. - - **Enhanced stability**: Isolates critical Raft operations from data workloads, ensuring more predictable performance. - - **Cost efficiency**: Delivers higher performance at a competitive price compared with the previous storage type. - - **Availability:** - - The Standard storage type is automatically applied to new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters created on or after April 1, 2025, hosted on AWS, with supported versions (versions >= 7.5.5, 8.1.2, or 8.5.0). Existing clusters still use the previous [Basic storage](/tidb-cloud/size-your-cluster.md#basic-storage) type, and no migration is needed. - - The price of the Standard storage differs from that of the Basic storage. For more information, see [Pricing](https://www.pingcap.com/tidb-dedicated-pricing-details/). - -## March 25, 2025 - -**Console changes** - -- Support firewall rules for public endpoints in [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. - - You can now configure firewall rules for TiDB Cloud Serverless clusters to control access via public endpoints. Specify allowed IP addresses or ranges directly in the [TiDB Cloud console](https://tidbcloud.com/) to enhance security. - - For more information, see [Configure TiDB Cloud Serverless Firewall Rules for Public Endpoints](/tidb-cloud/configure-serverless-firewall-rules-for-public-endpoints.md). - -## March 18, 2025 - -**General changes** - -- Support creating TiDB node groups for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters deployed on Google Cloud to enhance resource management flexibility. - - For more information, see [Overview of TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md). - -- Support storing database audit log files in TiDB Cloud for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters deployed on AWS. - - You can download these audit log files directly from TiDB Cloud. Note that this feature is only available upon request. - - For more information, see [Database Audit Logging](/tidb-cloud/tidb-cloud-auditing.md). - -- Enhance TiDB Cloud account security by improving the management of multi-factor authentication (MFA). This feature applies to password-based logins for TiDB Cloud. - - For more information, see [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md). - -## February 18, 2025 - -**Console changes** - -- Introduce Connected Care, the new support services for TiDB Cloud. - - The Connected Care services are designed to strengthen your connection with TiDB Cloud through modern communication tools, proactive support, and advanced AI capabilities, delivering a seamless and customer-centric experience. - - The Connected Care services introduce the following features: - - - **Clinic service**: Advanced monitoring and diagnostics to optimize performance. - - **AI chat in IM**: Get immediate AI assistance through an instant message (IM) tool. - - **IM subscription for alerts and ticket updates**: Stay informed with alerts and ticket progress via IM. - - **IM interaction for support tickets**: Create and interact with support tickets through an IM tool. - - For more information, see [Connected Care Overview](/tidb-cloud/connected-care-overview.md). - -- Support importing data from GCS and Azure Blob Storage into [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. - - TiDB Cloud Serverless now supports importing data from Google Cloud Storage (GCS) and Azure Blob Storage. You can use a Google Cloud service account key or an Azure shared access signature (SAS) token to authenticate. This feature simplifies data migration to TiDB Cloud Serverless. - - For more information, see [Import CSV Files from Amazon S3, GCS, or Azure Blob Storage into TiDB Cloud Serverless](/tidb-cloud/import-csv-files-serverless.md) and [Import Apache Parquet Files from Amazon S3, GCS, or Azure Blob Storage into TiDB Cloud Serverless](/tidb-cloud/import-parquet-files-serverless.md). - -## January 21, 2025 - -**Console changes** - -- Support importing a single local CSV file of up to 250 MiB per task to [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters, increased from the previous limit of 50 MiB. - - For more information, see [Import Local Files to TiDB Cloud](/tidb-cloud/tidb-cloud-import-local-files.md). - -## January 14, 2025 - -**General changes** - -- Support a new AWS region for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters: `Jakarta (ap-southeast-3)`. - -- Introduce the Notification feature, which enables you to stay informed instantly with TiDB Cloud updates and alerts through the [TiDB Cloud console](https://tidbcloud.com/). - - For more information, see [Notifications](/tidb-cloud/notifications.md). - -## January 2, 2025 - -**General changes** - -- Support creating TiDB node groups for [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters to enhance resource management flexibility. - - For more information, see [Overview of TiDB Node Group](/tidb-cloud/tidb-node-group-overview.md). - -- Support connecting [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters to generic Kafka in AWS and Google Cloud through Private Connect (beta). - - Private Connect leverages Private Link or Private Service Connect technologies from cloud providers to enable changefeeds in the TiDB Cloud VPC to connect to Kafka in customers' VPCs using private IP addresses, as if those Kafkas were hosted directly within the TiDB Cloud VPC. This feature helps prevent VPC CIDR conflicts and meets security compliance requirements. - - - For Apache Kafka in AWS, follow the instructions in [Set Up Self-Hosted Kafka Private Link Service in AWS](/tidb-cloud/setup-aws-self-hosted-kafka-private-link-service.md) to configure the network connection. - - - For Apache Kafka in Google Cloud, follow the instructions in [Set Up Self-Hosted Kafka Private Service Connect in Google Cloud](/tidb-cloud/setup-self-hosted-kafka-private-service-connect.md) to configure the network connection. - - Note that using this feature incurs additional [Private Data Link costs](/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md#private-data-link-cost). - - For more information, see [Changefeed Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md#network). - -- Introduce additional configurable options for Kafka changefeeds: - - - Support using the Debezium protocol. Debezium is a tool for capturing database changes. It converts each captured database change into a message called an event, and sends these events to Kafka. For more information, see [TiCDC Debezium Protocol](https://docs.pingcap.com/tidb/v8.1/ticdc-debezium). - - - Support defining a single partition dispatcher for all tables, or different partition dispatchers for different tables. - - - Introduce two new dispatcher types for the partition distribution of Kafka messages: timestamp and column value. - - For more information, see [Sink to Apache Kafka](/tidb-cloud/changefeed-sink-to-apache-kafka.md). - -- Enhance roles in TiDB Cloud: - - - Introduce the `Project Viewer` and `Organization Billing Viewer` roles to enhance granular access control on TiDB Cloud. - - - Rename the following roles: - - - `Organization Member` to `Organization Viewer` - - `Organization Billing Admin` to `Organization Billing Manager` - - `Organization Console Audit Admin` to `Organization Console Audit Manager` - - For more information, see [Identity Access Management](/tidb-cloud/manage-user-access.md#organization-roles). - -- Regional high availability (beta) for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. - - This feature is designed for workloads that require maximum infrastructure redundancy and business continuity. Key functions include: - - - Nodes are distributed across multiple availability zones to ensure high availability in the event of a zone failure. - - Critical OLTP (Online Transactional Processing) components, such as PD and TiKV, are replicated across availability zones for redundancy. - - Automatic failover minimizes service disruption during a primary zone failure. - - This feature is currently available only in the AWS Tokyo (ap-northeast-1) region and can be enabled only during cluster creation. - - For more information, see [High Availability in TiDB Cloud Serverless](/tidb-cloud/serverless-high-availability.md). - -- Upgrade the default TiDB version of new [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters from [v8.1.1](https://docs.pingcap.com/tidb/v8.1/release-8.1.1) to [v8.1.2](https://docs.pingcap.com/tidb/v8.1/release-8.1.2). - -**Console changes** - -- Strengthen the data export service: - - - Support exporting data from [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) to Google Cloud Storage and Azure Blob Storage through the [TiDB Cloud console](https://tidbcloud.com/). - - - Support exporting data in Parquet files through the [TiDB Cloud console](https://tidbcloud.com/). - - For more information, see [Export Data from TiDB Cloud Serverless](/tidb-cloud/serverless-export.md) and [Configure External Storage Access for TiDB Cloud Serverless](/tidb-cloud/serverless-external-storage.md). diff --git a/tidb-cloud/tidb-cloud-roadmap.md b/tidb-cloud/tidb-cloud-roadmap.md new file mode 100644 index 0000000000000..202280b0c1204 --- /dev/null +++ b/tidb-cloud/tidb-cloud-roadmap.md @@ -0,0 +1,205 @@ +--- +title: TiDB Cloud Roadmap +summary: Learn about TiDB Cloud's roadmap for the next few months. See the new features or improvements in advance, follow the progress, learn about the key milestones on the way. +--- + +# TiDB Cloud Roadmap + +> **Warning:** +> +> This roadmap might contain outdated information. We are working on updating it to reflect the latest product plans and development priorities. + +The TiDB Cloud roadmap brings you what's coming in the near future, so you can see the new features or improvements in advance, follow the progress, and learn about the key milestones on the way. In the course of development, this roadmap is subject to change based on user needs, feedback, and our assessment. + +✅: The feature or improvement is already available in TiDB Cloud. + +> **Safe harbor statement:** +> +> Any unreleased features discussed or referenced in our documents, roadmaps, blogs, websites, press releases, or public statements that are not currently available ("unreleased features") are subject to change at our discretion and may not be delivered as planned or at all. Customers acknowledge that purchase decisions are solely based on features and functions that are currently available, and that PingCAP is not obliged to deliver aforementioned unreleased features as part of the contractual agreement unless otherwise stated. + +## Developer experience and enterprise-grade features + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainFeatureDescription
Developer experience✅ Load sample datasets manually.Support loading sample datasets into a cluster. You can use this data to quickly get started with testing the features of TiDB Cloud.
✅ Add Chat2Query (an AI-powered SQL editor).In Chat2Query, you can either let AI generate SQL queries automatically or write SQL queries manually, and run SQL queries against databases without a terminal.
✅ Support Data Service.With Data Service (beta), you can read or write TiDB Cloud data via an HTTPS request using a custom API endpoint.
Cloud provider marketplace✅ Improve the user experience from AWS Marketplace and Google Cloud Marketplace.Improve the user journey and experience of users who sign up from AWS Marketplace and Google Cloud Marketplace.
Enterprise-grade features✅ Manage users in multiple organizations.Allow a user to join multiple organizations by accepting the invitations.
✅ Support hierarchical user roles and permissions.Support role-based access control (RBAC) for the TiDB Cloud console. You can manage user permissions in a fine-grained manner, such as by cluster, billing, and member.
UI experience✅ Provide a more convenient feedback channel.Users can quickly get help with and give feedback on the product.
✅ Add left navigation.Present the TiDB Cloud console in the structure of organizations, projects, and users to simplify the layout logic and improve user experience.
Optimize Playground.Deliver context-driven tutorials to facilitate a deeper understanding of TiDB and TiDB Cloud for our users.
+ +## TiDB kernel + +For the roadmap of TiDB kernel, refer to [TiDB Roadmap](https://docs.pingcap.com/tidb/dev/tidb-roadmap). + +## Diagnosis and maintenance + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainFeatureDescription
Self-service cluster analysis and diagnosis using reports✅ Cluster health report.Provide diagnosis and analysis reports for several different usage scenarios.
✅ Cluster status comparison report.Locate cluster failures for some scenarios and provide recommended solutions.
✅ Cluster system check report.Provide cluster key status summary for some scenarios.
SQL tuning for HTAP workloadsProvide suggestions on optimizing SQL for TiFlash and TiKV in HTAP workloads.Provide a dashboard that displays a SQL execution overview from the perspective of applications in HTAP workloads.
Provide SQL execution information from the perspective of applications.For one or several HTAP scenarios, provide suggestions on SQL optimization.
Cluster diagnosis data accessibility ✅ Access diagnosis data online in real time.Integrate with various monitoring and diagnosis systems to improve the real-time data access capability.
✅ Access diagnosis data offline.Provide offline data access for large-scale diagnosis, analysis, and tuning.
Build logic for data reconstruction.Improve data stability and build logic for data reconstruction.
TiDB Cloud service tracingBuild the monitoring links for each component of TiDB Cloud service. +
  • Build the tracing links for each component of TiDB Cloud service in user scenarios.
  • +
  • Provide assessment on service availability from the perspective of users.
+
+ +## Data backup and migration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainFeatureDescription
Data replication to Kafka/MySQL✅ TiDB Cloud supports replicating data to Kafka/MySQL.TiDB Cloud supports TiCDC-based data replication to Kafka and MySQL compatible databases.
Backup and Restore✅ Support EBS snapshot-based backup and restore.BR service on TiDB Cloud uses EBS snapshot-based backup and restore.
Backup and restoreBackup and restore service based on AWS EBS or Google Cloud persistent disk snapshots.Provide backup and restore service on the cloud based on AWS EBS or Google Cloud persistent disk snapshots.
Online data migration✅ Support full data migration from Amazon Relational Database Service (RDS).Full data migration from RDS to TiDB Cloud.
Support incremental data migration from RDS.Full and incremental data migration from MySQL services such as Amazon RDS and Aurora to TiDB Cloud.
+ +## Security + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainFeatureDescription
TLS rotationSupport TLS rotation for TiDB clusters.Support internal TLS rotation settings and automatic updates in TiDB clusters.
Data EncryptionEnablement of customer-managed encryption keys.Allow customers to use their own KMS encryption keys on TiDB Cloud.
Database audit logging✅ Enhance the database audit logging.Enhance the ability of database audit logging.
Console audit logging✅ Support auditing TiDB Cloud console operations.Support reliable auditing capabilities for various operations in the TiDB Cloud console.
diff --git a/tidb-cloud/tidb-cloud-support.md b/tidb-cloud/tidb-cloud-support.md index cfac2666c559f..cbc1255db1db1 100644 --- a/tidb-cloud/tidb-cloud-support.md +++ b/tidb-cloud/tidb-cloud-support.md @@ -5,11 +5,33 @@ summary: Learn how to contact the support team of TiDB Cloud. # TiDB Cloud Support -TiDB Cloud offers tiered support plan offerings tailored to meet customers' needs. For more information, see [Connected Care Details](/tidb-cloud/connected-care-detail.md). +TiDB Cloud offers tiered support plan offerings tailored to meet customers' needs. For more information about our support offerings, see [Connected Care Details](/tidb-cloud/connected-care-detail.md). -> **Note:** -> -> To request a [Proof of Concept (PoC)](/tidb-cloud/tidb-cloud-poc.md), a demo, or free trial credits, click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com/), and click **Contact Sales**. +## Support channels + +TiDB Cloud provides multiple support channels. The available options depend on the type of issue and your [support plan](/tidb-cloud/connected-care-detail.md). + +- Support tickets ([Help Center](#access-pingcap-help-center)) + + Use this ticket-based channel for issues that require direct assistance from the TiDB Cloud support team. + + - [Billing and account tickets](/tidb-cloud/tidb-cloud-support.md#create-an-account-or-billing-support-ticket) are available to all TiDB Cloud users. + - [Technical support tickets](/tidb-cloud/tidb-cloud-support.md#create-a-technical-support-ticket) with guaranteed response times are available for paid support plans. If you do not have a paid support plan, use community channels for technical questions. + + The **Enterprise** and **Premium** support plans include the following enhanced capabilities. For more information, see [Connected Care Details](/tidb-cloud/connected-care-detail.md). + + - Faster response times with defined SLAs + - Real-time communication through IM-based support + - Proactive support programs, such as [Clinic](/tidb-cloud/tidb-cloud-clinic.md) + - Dedicated or named support roles, such as Technical Account Managers (TAMs) + +- Community ([Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap) and [Discord](https://discord.com/invite/KVRZBR2DrG)) + + Use these open discussion channels to ask questions, share experiences, and get guidance from other users and PingCAP engineers. These channels are suitable for general questions, usage discussions, and non-urgent technical issues. + +- [TiDB.AI](https://tidb.ai/) + + TiDB.AI is an AI-powered assistant that answers common technical and documentation-related questions. It is suitable for quick and self-service help. ## Access PingCAP Help Center @@ -17,14 +39,16 @@ The [PingCAP Help Center](https://tidb.support.pingcap.com/servicedesk/customer/ You can access the PingCAP Help Center via directly, or through the [TiDB Cloud console](https://tidbcloud.com/) in the following ways: -- Click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com/), and then click **Request Support**. -- Click **Support** in the lower-left corner of [TiDB Cloud console](https://tidbcloud.com/), and then click **Create Ticket**. +- Click **?** in the lower-right corner of the [TiDB Cloud console](https://tidbcloud.com/), and then click **Support Tickets**. +- Click **Support** in the lower-left corner of [TiDB Cloud console](https://tidbcloud.com/), and then do one of the following depending on your support plan: + - **Basic**: in the **Account & Billing** area, click **Account/Billing issues**. + - **Developer**, **Enterprise**, or **Premium**: in the **Talk to an expert** area, click **PingCAP Help Center**. - On the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, click **...** in the row of your cluster, and then select **Get Support**. - On your cluster overview page, click **...** in the upper-right corner, and then select **Get Support**. ## Create an account or billing support ticket -To create a support ticket about account or billing issues, take the following steps: +All TiDB Cloud users can create billing and account-related tickets. To create a support ticket about account or billing issues, take the following steps: 1. Log in to the [PingCAP Help Center](https://tidb.support.pingcap.com/servicedesk/customer/portals), and then click [TiDB Cloud Account/Billing Support](https://tidb.support.pingcap.com/servicedesk/customer/portal/16). 2. Click **Submit a request**. @@ -46,7 +70,7 @@ To create a support ticket about technical issues, take the following steps: > **Note:** > - > The [TiDB Cloud Technical Support](https://tidb.support.pingcap.com/servicedesk/customer/portal/6) entry is only available for customers subscribed to **Developer**, **Enterprise**, or **Premium** [support plans](/tidb-cloud/connected-care-detail.md). + > The [TiDB Cloud Technical Support](https://tidb.support.pingcap.com/servicedesk/customer/portal/6) entry is only available for **Developer**, **Enterprise**, or **Premium** [support plans](/tidb-cloud/connected-care-detail.md). If you are on the **Basic** plan, you can ask technical questions through the community channels on [Slack](https://slack.tidb.io/invite?team=tidb-community&channel=everyone&ref=pingcap) or [Discord](https://discord.com/invite/KVRZBR2DrG), where PingCAP engineers and community members provide guidance. 2. Click **Submit a request**. @@ -118,4 +142,4 @@ To check or upgrade your support plan, perform the following steps: To downgrade your support plan, perform the following steps: 1. In the [TiDB Cloud console](https://tidbcloud.com/), click **Support** in the lower-left corner. -2. Choose the support plan you want to switch to, and then click **Downgrade**. \ No newline at end of file +2. Choose the support plan you want to switch to, and then click **Downgrade**. diff --git a/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md b/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md index 074ea23d5e5ea..22d409dfcbccb 100644 --- a/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md +++ b/tidb-cloud/tidb-cloud-tls-connect-to-dedicated.md @@ -8,7 +8,7 @@ aliases: ['/tidbcloud/tidb-cloud-tls-connect-to-dedicated-tier'] On TiDB Cloud, establishing TLS connections is one of the basic security practices for connecting to TiDB Cloud Dedicated clusters. You can configure multiple TLS connections from your client, application, and development tools to your TiDB Cloud Dedicated cluster to protect data transmission security. For security reasons, TiDB Cloud Dedicated only supports TLS 1.2 and TLS 1.3, and does not support TLS 1.0 and TLS 1.1 versions. -To ensure data security, TiDB cluster CA for your TiDB Cloud Dedicated cluster is hosted on [AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/), and TiDB cluster private keys are stored in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. +To ensure data security, the CA certificate for your TiDB Cloud Dedicated cluster is hosted on [AWS Private Certificate Authority](https://aws.amazon.com/private-ca/). The private key of the CA certificate is stored in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. ## Prerequisites @@ -284,7 +284,7 @@ Parameter descriptions: ## Manage root certificates for TiDB Cloud Dedicated -TiDB Cloud Dedicated uses certificates from [AWS Certificate Manager (ACM)](https://aws.amazon.com/certificate-manager/) as a Certificate Authority (CA) for TLS connections between clients and TiDB Cloud Dedicated clusters. Usually, the root certificates of ACM are stored securely in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. +TiDB Cloud Dedicated uses certificates from [AWS Private Certificate Authority](https://aws.amazon.com/private-ca/) as a Certificate Authority (CA) for TLS connections between clients and TiDB Cloud Dedicated clusters. Usually, the private key of the CA certificate is stored securely in AWS-managed hardware security modules (HSMs) that meet [FIPS 140-2 Level 3](https://csrc.nist.gov/projects/cryptographic-module-validation-program/Certificate/3139) security standards. ## FAQs diff --git a/tidb-cloud/tidb-node-group-management.md b/tidb-cloud/tidb-node-group-management.md index cda09ce02e25d..2582c25d8695d 100644 --- a/tidb-cloud/tidb-node-group-management.md +++ b/tidb-cloud/tidb-node-group-management.md @@ -9,7 +9,7 @@ This document describes how to manage TiDB node groups and their endpoints to is > **Note**: > -> The TiDB Node Group feature is **NOT** available for TiDB Cloud Serverless clusters. +> The TiDB Node Group feature is **NOT** available for {{{ .starter }}} or {{{ .essential }}} clusters. ## Terms diff --git a/tidb-cloud/tidb-node-group-overview.md b/tidb-cloud/tidb-node-group-overview.md index e9de8fd866bfa..8d66d90287b26 100644 --- a/tidb-cloud/tidb-node-group-overview.md +++ b/tidb-cloud/tidb-node-group-overview.md @@ -11,7 +11,7 @@ With TiDB node groups, you can divide computing nodes into multiple TiDB node gr > **Note**: > -> The TiDB Node Group feature is **NOT** available for TiDB Cloud Serverless clusters. +> The TiDB Node Group feature is **NOT** available for {{{ .starter }}} and {{{ .essential }}} clusters. ## Implementation diff --git a/tidb-cloud/tidb-x-architecture.md b/tidb-cloud/tidb-x-architecture.md index 8aa29945f9e92..5f11af0dba3fd 100644 --- a/tidb-cloud/tidb-x-architecture.md +++ b/tidb-cloud/tidb-x-architecture.md @@ -23,19 +23,19 @@ The classic TiDB architecture provides the following foundational capabilities: - **Horizontal scalability**: It supports linear scaling for both read and write performance. Clusters can scale to handle millions of queries per second (QPS) and manage over 1 PiB of data across tens of millions of tables. - **Hybrid Transactional and Analytical Processing (HTAP)**: It unifies transactional and analytical workloads. By pushing down heavy aggregation and join operations to TiFlash (the columnar storage engine), it provides predictable, real-time analytics on fresh transactional data without complex ETL pipelines. -- **Non-blocking schema changes**: It utilizes a fully online DDL implementation. Schema changes do not block reads or writes, allowing data models to evolve with minimal impact on application latency or availability. +- **Non-blocking schema changes**: It uses a fully online DDL implementation. Schema changes do not block reads or writes, allowing data models to evolve with minimal impact on application latency or availability. - **High availability**: It supports seamless cluster upgrades and scaling operations. This ensures that critical services remain accessible during maintenance or resource adjustment. - **Multi-cloud support**: It operates as an open-source solution with support for Amazon Web Services (AWS), Google Cloud, Microsoft Azure, and Alibaba CloudAmazon Web Services (AWS), Google Cloud, and Microsoft Azure. This provides cloud neutrality without vendor lock-in. ### Challenges of classic TiDB -While the shared-nothing architecture of classic TiDB provides high resilience, the tight coupling of storage and compute on local nodes introduces limitations in extreme large-scale environments. As data volumes grow and cloud-native requirements evolve, several structural challenges emerge. +While the shared-nothing architecture of classic TiDB provides high resilience, the tight coupling of storage and compute on local nodes introduces limitations in extremely large-scale environments. As data volumes grow and cloud-native requirements evolve, several structural challenges emerge. - **Scalability limitations** - Data movement overhead: In classic TiDB, scaling out (adding nodes) or scaling in (removing nodes) operations require physical movement of SST files between nodes. For large datasets, this process is time-consuming and can degrade online traffic performance due to heavy CPU and I/O consumption during data movement. - - Storage engine bottleneck: The underlying RocksDB storage engine in classic TiDB uses a single LSM-tree protected by a global mutex. This design creates a scalability ceiling where the system struggles to handle large datasets (for example, more than 6 TiB of data or over 300,000 SST files per TiKV node), preventing the system from fully utilizing the hardware capacity. + - Storage engine bottleneck: The underlying RocksDB storage engine in classic TiDB uses a single LSM tree protected by a global mutex. This design creates a scalability ceiling where the system struggles to handle large datasets (for example, more than 6 TiB of data or over 300,000 SST files per TiKV node), preventing the system from fully utilizing the hardware capacity. - **Stability and performance interference** @@ -77,7 +77,7 @@ The TiDB X architecture is as follows: ### Object storage support -TiDB X utilizes object storage, such as Amazon S3, as the single source of truth for all data. Unlike the classic architecture where data is stored on local disks, TiDB X stores the persistent copy of all data in a **shared object storage layer**. The upper **shared cache layer** (row engine and columnar engine) serves as a high-performance cache to ensure low latency. +TiDB X uses object storage, such as Amazon S3, as the single source of truth for all data. Unlike the classic architecture where data is stored on local disks, TiDB X stores the persistent copy of all data in a **shared object storage layer**. The upper **shared cache layer** (row engine and columnar engine) serves as a high-performance cache to ensure low latency. Because the authoritative data is already stored in object storage, backups simply rely on incremental Raft logs and metadata stored in S3, allowing backup operations to complete in seconds regardless of total data volume. During scale-out operations, new TiKV nodes do not need to copy large volumes of data from existing nodes. Instead, they connect to object storage and load the required data on demand, significantly accelerating scale-out operations. @@ -89,7 +89,7 @@ This technical elasticity enables a consumption-based, pay-as-you-go pricing mod ### Microservice and workload isolation -TiDB X implements a sophisticated separation of duties to ensure that diverse workloads do not interfere with each other. The **isolated SQL layer** consists of separate groups of compute nodes, enabling workload isolation or multi-tenancy scenarios in which different applications can use dedicated compute resources while sharing the same underlying data. +TiDB X implements a sophisticated separation of duties to ensure that diverse workloads do not interfere with each other. The **isolated SQL layer** consists of separate groups of compute nodes, enabling workload isolation or multi-tenancy scenarios where different applications can use dedicated compute resources while sharing the same underlying data. The **shared services layer** decomposes heavy database operations into independent microservices, including compaction, statistics collection, and DDL execution. By offloading resource-intensive background operations—such as index creation or large-scale data imports—to this layer, TiDB X ensures that these operations do not compete for CPU or memory resources with compute nodes serving online user traffic. This design provides more predictable performance for critical applications and allows each component—gateway, SQL compute, cache, and background services—to scale independently based on its own resource demands. @@ -99,7 +99,7 @@ The following diagram provides a side-by-side comparison of classic TiDB and TiD ![Classic TiDB vs TiDB X architecture](/media/tidb-x/tidb-classic-vs-tidb-x-1.png) -- **Engine evolution**: In classic TiDB, the Raft engine manages the multi-raft log, while RocksDB handles physical data storage on local disks. In TiDB X, these components are replaced by a **new RF engine** (Raft engine) and a **redesigned KV engine**. The KV engine is an LSM-tree storage engine that replaces RocksDB. Both new engines are specifically optimized for high performance and seamless integration with object storage. +- **Engine evolution**: In classic TiDB, the Raft engine manages the multi-raft log, while RocksDB handles physical data storage on local disks. In TiDB X, these components are replaced by a **new RF engine** (Raft engine) and a **redesigned KV engine**. The KV engine is an LSM tree storage engine that replaces RocksDB. Both new engines are specifically optimized for high performance and seamless integration with object storage. - **Compute workload separation**: The dotted lines in the diagram represent background read and write operations to the object storage layer. In TiDB X, these interactions between the RF/KV engines and object storage are decoupled from foreground processes, ensuring that background operations do not affect online traffic latency. @@ -129,11 +129,11 @@ The move to object storage does not degrade foreground read and write performanc In classic TiDB, clusters are often over-provisioned to handle peak traffic and background tasks simultaneously. TiDB X enables **auto-scaling**, allowing users to pay only for the resources consumed (pay-as-you-go). Background resources for heavy tasks are provisioned on demand and released when no longer needed, eliminating wasted costs. -TiDB X uses the [Request Capacity Unit](https://docs.pingcap.com/tidbcloud/tidb-cloud-glossary/#request-capacity-unit-rcu) (RCU) to measure provisioned compute capacity. One RCU provides a fixed amount of compute resources that can process a certain number of SQL requests. The number of RCUs you provision determines your cluster's baseline performance and throughput capacity. You can set an upper limit to control costs while still benefiting from elastic scaling. +TiDB X uses the [Request Capacity Unit](/tidb-cloud/tidb-cloud-glossary.md#request-capacity-unit-rcu) (RCU) to measure provisioned compute capacity. One RCU provides a fixed amount of compute resources that can process a certain number of SQL requests. The number of RCUs you provision determines your cluster's baseline performance and throughput capacity. You can set an upper limit to control costs while still benefiting from elastic scaling. ### From LSM tree to LSM forest -In classic TiDB, each TiKV node runs a single RocksDB instance that stores data for all Regions in one large LSM tree. Because data from thousands of Regions is mixed together, operations such as moving a Region, scaling out, or scaling in, can trigger extensive compaction. This can consume significant CPU and I/O resources and potentially impact online traffic. The single LSM-tree is protected by a global mutex. As data size grows, at scale (for example, more than 6 TiB of data or over 300,000 SST files per TiKV node), increased contention on the global mutex lock can impact both read and write performance. +In classic TiDB, each TiKV node runs a single RocksDB instance that stores data for all Regions in one large LSM tree. Because data from thousands of Regions is mixed together, operations such as moving a Region, scaling out, or scaling in, can trigger extensive compaction. This can consume significant CPU and I/O resources and potentially impact online traffic. The single LSM tree is protected by a global mutex. As data size grows, at scale (for example, more than 6 TiB of data or over 300,000 SST files per TiKV node), increased contention on the global mutex lock can impact both read and write performance. TiDB X redesigns the storage engine by moving from a single LSM tree to an **LSM forest**. While retaining the logical Region abstraction, TiDB X assigns each Region its own independent LSM tree. This physical isolation eliminates cross-Region compaction overhead during operations such as scaling, Region movement, and data loading. Operations on one Region are confined to its own tree, and there is no global mutex contention. @@ -151,7 +151,7 @@ The following table summarizes the architectural transitions from classic TiDB t | --- | --- | --- | --- | | Architecture | Shared-nothing (data stored on local disks) | Shared-storage (object storage as authoritative persistent storage) | Object storage enables cloud-native elasticity | | Stability | Foreground and background tasks share the same resources | Separation of compute and compute (elastic compute pools for heavy tasks) | Protects OLTP workloads under write-intensive or maintenance workloads | -| Performance | OLTP and tasks jobs contend for CPU and I/O | Dedicated elastic pools for heavy tasks | Lower OLTP latency while heavy tasks complete faster | +| Performance | OLTP and background tasks contend for CPU and I/O | Dedicated elastic pools for heavy tasks | Lower OLTP latency while heavy tasks complete faster | | Scaling mechanism | Physical data migration (SST file copying between TiKV nodes) | TiKV nodes only read or write SST files via object storage | 5×–10× faster scale-out and scale-in | | Storage engine | Single LSM tree per TiKV node (RocksDB) | LSM forest (one independent LSM tree per Region) | Eliminates global mutex contention and reduces compaction interference | | DDL execution | DDL competes with user traffic for local CPU and I/O | DDL offloaded to elastic compute resources | Faster schema changes with more predictable latency | diff --git a/tidb-cloud/tiproxy-management.md b/tidb-cloud/tiproxy-management.md new file mode 100644 index 0000000000000..b91fe71e813d2 --- /dev/null +++ b/tidb-cloud/tiproxy-management.md @@ -0,0 +1,128 @@ +--- +title: Manage TiProxy +summary: Learn about how to enable, disable, view, and modify TiProxy. +--- + +# Manage TiProxy + +This document describes how to enable, disable, view, and modify TiProxy. + +> **Note:** +> +> TiProxy is in beta and is currently available only for TiDB Cloud Dedicated clusters deployed on AWS. + +## Enable TiProxy + +You can enable TiProxy for either a new cluster or an existing cluster in any TiDB node group. + +### Decide the size and number of TiProxy nodes + +The size and number of TiProxy nodes depend on both the QPS and network bandwidth of your cluster. Network bandwidth is the sum of the client request and TiDB response bandwidth. + +The following table shows the maximum QPS and network bandwidth of each TiProxy size. + +| Size | Maximum QPS | Maximum network bandwidth | +| :---- | :---------- | :------------------------ | +| Small | 30K | 93 MiB/s | +| Large | 120K | 312 MiB/s | + +The available TiProxy sizes are `Small` and `Large`. The available TiProxy node numbers are 2, 3, 6, 9, 12, 15, 18, 21, and 24. The default two small-sized TiProxy nodes can provide 60K QPS and 186 MiB/s network bandwidth. It is recommended that you reserve 20% of the QPS capacity to prevent high latency. + +For example, if your cluster's maximum QPS is 100K and the maximum network bandwidth is 100 MiB/s, the size and number of TiProxy nodes mainly depend on the QPS. In this case, you can select 6 small-sized TiProxy nodes. + +### Enable TiProxy for a new cluster + +To enable TiProxy when creating a new cluster, click the TiProxy toggle and choose the TiProxy size and number. + +![Enable TiProxy](/media/tidb-cloud/tiproxy-enable-tiproxy.png) + +### Enable TiProxy for an existing cluster + +> **Note:** +> +> Enabling TiProxy will cause a rolling restart of TiDB nodes in the corresponding TiDB node group, which disconnects existing connections during the restart. In addition, creating new connections might hang for up to 30 seconds. Make sure that you enable TiProxy in the maintenance window. + +To enable TiProxy for an existing cluster, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. Click **...** in the upper-right corner, and click **Modify** in the drop-down menu. The **Modify Cluster** page is displayed. +3. On the **Modify Cluster** page, click the TiProxy toggle and choose the TiProxy size and number. + +![Enable TiProxy](/media/tidb-cloud/tiproxy-enable-tiproxy.png) + +### Limitations and quotas + +- There must be at least two TiDB nodes in a TiDB node group. +- The TiDB node size must be at least 4 vCPUs. +- The default maximum number of TiProxy nodes in an organization is `10`. For more information, see [Limitations and Quotas](/tidb-cloud/limitations-and-quotas.md). +- The version of the TiDB cluster must be v6.5.0 or later. + +## Disable TiProxy + +> **Note:** +> +> Disabling TiProxy will cause connections to disconnect. In addition, creating new connections might hang for up to 10 seconds. Make sure that you disable TiProxy in the maintenance window. + +To disable TiProxy, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. Click **...** in the upper-right corner, and click **Modify** in the drop-down menu. The **Modify Cluster** page is displayed. +3. On the **Modify Cluster** page, click the TiProxy toggle to disable TiProxy. + +![Disable TiProxy](/media/tidb-cloud/tiproxy-disable-tiproxy.png) + +## View TiProxy + +### View TiProxy topology + +To view the TiProxy topology, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. In the left navigation pane, click **Monitoring > Nodes**. The **Node Map** page is displayed. +3. On the **Node Map** page, the TiProxy topology is displayed in the **TiDB** pane. + +![TiProxy Topology](/media/tidb-cloud/tiproxy-topology.png) + +### View TiProxy metrics + +To view TiProxy metrics, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. In the left navigation pane, click **Monitoring > Metrics**. The **Metrics** page is displayed. +3. On the **Metrics** page, click **Server** and scroll down to the TiProxy-related metrics. To view TiProxy metrics for a specific TiDB node group, click **TiDB Node Group View**, select your TiDB node group, and then scroll down to the TiProxy-related metrics. + +The metrics include: + +- **TiProxy CPU Usage**: the CPU usage statistics of each TiProxy node. The upper limit is 100%. If the maximum CPU usage exceeds 80%, it is recommended that you scale out TiProxy. +- **TiProxy Connections**: the number of connections on each TiProxy node. +- **TiProxy Throughput**: the bytes transferred per second on each TiProxy node. If the maximum throughput reaches the maximum network bandwidth, it is recommended that you scale out TiProxy. For more information about the maximum network bandwidth, see [Decide the size and number of TiProxy nodes](#decide-the-size-and-number-of-tiproxy-nodes). +- **TiProxy Sessions Migration Reasons**: the number of session migrations that happen every minute and the reason for them. For example, when TiDB scales in and TiProxy migrates sessions to other TiDB nodes, the reason is `status`. For more migration reasons, see [TiProxy Monitoring Metrics](https://docs.pingcap.com/tidb/stable/tiproxy-grafana#balance). + +### View TiProxy bills + +To view TiProxy bills, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com), switch to your target organization using the combo box in the upper-left corner. +2. In the left navigation pane, click **Billing**. On the **Billing** page, the **Bills** tab is displayed by default. +3. In the **Summary by Service** section, TiProxy node cost is displayed under **TiDB Dedicated**, while TiProxy data transfer cost is included in **Data Transfer > Same Region**. + +![TiProxy Billing](/media/tidb-cloud/tiproxy-billing.png) + +## Modify TiProxy + +> **Note** +> +> - Modifying the TiProxy size directly is not supported. It is recommended that you modify the number of TiProxy nodes instead. If you have to modify the TiProxy size, you need to disable TiProxy in all the TiDB node groups and then enable it again to choose a different size. +> - Scaling in TiProxy will cause connections to disconnect. Make sure that you scale in TiProxy in the maintenance window. + +To scale in or scale out TiProxy, perform the following steps: + +1. In the [TiDB Cloud console](https://tidbcloud.com/), navigate to the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, and then click the name of your target cluster to go to its overview page. +2. Click **...** in the upper-right corner, and click **Modify** in the drop-down menu. The **Modify Cluster** page is displayed. +3. On the **Modify Cluster** page, modify the number of the TiProxy nodes. + +![Modify TiProxy](/media/tidb-cloud/tiproxy-enable-tiproxy.png) + +## Manage TiProxy in multiple TiDB node groups + +When you have multiple TiDB node groups, each TiDB node group has its dedicated TiProxy group. TiProxy routes traffic to the TiDB nodes in the same TiDB node group to isolate computing resources. You can enable, disable, or modify TiProxy in each TiDB node group. However, the TiProxy size in all the TiDB node groups must be the same. diff --git a/tidb-cloud/tiproxy-overview-for-cloud.md b/tidb-cloud/tiproxy-overview-for-cloud.md new file mode 100644 index 0000000000000..ad4a0cddaac64 --- /dev/null +++ b/tidb-cloud/tiproxy-overview-for-cloud.md @@ -0,0 +1,47 @@ +--- +title: Overview of TiProxy for TiDB Cloud +summary: Learn about the usage scenarios of TiProxy for TiDB Cloud. +--- + +# Overview of TiProxy for TiDB Cloud + +TiProxy is the official proxy component of PingCAP. It is placed between the client and the TiDB server to provide load balancing, connection persistence, and other features for TiDB. + +For more information, see [TiProxy Overview](https://docs.pingcap.com/tidb/stable/tiproxy-overview). + +> **Note:** +> +> TiProxy is in beta and is currently available only for TiDB Cloud Dedicated clusters deployed on AWS. + +## Scenarios + +TiProxy is suitable for the following scenarios: + +- Connection persistence: When a TiDB server performs scaling in, rolling upgrade, or rolling restart, the client connection breaks, resulting in an error. If the client does not have an idempotent error retry mechanism, you need to manually check and fix the error, which greatly increases the operational overhead. TiProxy can keep the client connection, so that the client does not report an error. +- Frequent scaling in and scaling out: The workload of an application might change periodically. To save costs, you can deploy TiDB on the cloud and automatically scale in and scale out TiDB servers according to the workload. However, scaling in might cause the client to disconnect, and scaling out might result in an unbalanced load. TiProxy can keep the client connection and achieve load balancing. +- CPU load imbalance: When background tasks consume a significant amount of CPU resources, or workloads across connections vary significantly, leading to an imbalanced CPU load, TiProxy can migrate connections based on CPU usage to achieve load balancing. For more details, see [CPU-based load balancing](https://docs.pingcap.com/tidb/stable/tiproxy-load-balance#cpu-based-load-balancing). + +For more scenarios, see [TiProxy User Scenarios](https://docs.pingcap.com/tidb/stable/tiproxy-overview#user-scenarios). + +## Limitations + +TiProxy cannot preserve client connections in the following scenarios: + +- Upgrading AWS EKS, Azure AKS, Google Cloud GKE, or Alibaba Cloud ACK. +- Disabling, scaling in, upgrading, or restarting TiProxy. +- A single statement or transaction that runs for more than 20 seconds. If your application needs a longer timeout, contact [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md). + +For more scenarios, see [TiProxy Limitations](https://docs.pingcap.com/tidb/stable/tiproxy-overview#limitations). + +## Billing + +TiProxy introduces two types of costs: + +- Node costs. For more information, see [Node Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#node-cost) +- Data transfer costs. For more information, see [Data Transfer Cost](https://www.pingcap.com/tidb-dedicated-pricing-details/#data-transfer-cost). TiProxy prioritizes routing traffic to the TiDB nodes in the same availability zone (AZ). However, if the TiDB workloads are uneven, it also routes traffic to other AZs, which can incur additional data transfer costs. + +You can view the TiProxy bill on the **Billing** page. For more information, see [View TiProxy bills](/tidb-cloud/tiproxy-management.md#view-tiproxy-bills). + +## SLA impact + +TiProxy has no impact on SLA. diff --git a/tidb-cloud/tune-performance.md b/tidb-cloud/tune-performance.md index c87a30c473b65..3a26b51d0ce65 100644 --- a/tidb-cloud/tune-performance.md +++ b/tidb-cloud/tune-performance.md @@ -1,25 +1,41 @@ --- title: Analyze and Tune Performance -summary: Learn how to analyze and tune performance of your TiDB Cloud cluster. +summary: Learn how to analyze and tune performance in TiDB Cloud. aliases: ['/tidbcloud/index-insight'] --- # Analyze and Tune Performance + + TiDB Cloud provides [Slow Query](#slow-query), [Statement Analysis](#statement-analysis), and [Key Visualizer](#key-visualizer) to analyze performance. -- Slow Query lets you search and view all slow queries in your TiDB cluster, and explore the bottlenecks of each slow query by viewing its execution plan, SQL execution information, and other details. + + + + +TiDB Cloud provides [Slow Query](#slow-query) and [SQL Statement](#sql-statement) to analyze performance. + + + +- Slow Query lets you search and view all slow queries in your TiDB clusterinstance, and explore the bottlenecks of each slow query by viewing its execution plan, SQL execution information, and other details. + +- Statement AnalysisSQL Statement enables you to directly observe the SQL execution on the page, and easily locate performance problems without querying the system tables. -- Statement Analysis enables you to directly observe the SQL execution on the page, and easily locate performance problems without querying the system tables. + - Key Visualizer helps you observe TiDB's data access patterns and data hotspots. > **Note:** > -> Currently, **Key Visualizer** is unavailable for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. +> Currently, **Key Visualizer** is only available on TiDB Cloud Dedicated clusters. + + ## View the Diagnosis page + + 1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page of your project, click the name of your target cluster to go to its overview page. > **Tip:** @@ -28,11 +44,27 @@ TiDB Cloud provides [Slow Query](#slow-query), [Statement Analysis](#statement-a 2. In the left navigation pane, click **Monitoring** > **Diagnosis**. + + + + +1. On the [**TiDB Instances**](https://tidbcloud.com/tidbs) page of your organization, click the name of your target instance to go to its overview page. + + > **Tip:** + > + > You can use the combo box in the upper-left corner to switch between organizations and instances. + +2. In the left navigation pane, click **Monitoring**. + + + ## Slow Query By default, SQL queries that take more than 300 milliseconds are considered as slow queries. -To view slow queries in a cluster, perform the following steps: +To view slow queries in a TiDB clusterinstance, perform the following steps: + + 1. Navigate to the [**Diagnosis**](#view-the-diagnosis-page) page of a cluster. @@ -42,10 +74,35 @@ To view slow queries in a cluster, perform the following steps: 4. (Optional) You can filter slow queries based on the target time range, the related databases, and SQL keywords. You can also limit the number of slow queries to be displayed. + + + + +1. Navigate to the overview page of the TiDB instance, and then click **Monitoring** > **Slow Query** in the left navigation pane. + +2. Select a slow query from the list to view its detailed execution information. + +3. (Optional) You can filter slow queries based on the target time range and SQL keywords. You can also limit the number of slow queries to be displayed. + + + The results are displayed in the form of a table, and you can sort the results by different columns. + + +> **Note:** +> +> To improve traffic visibility, {{{ .starter }}} and {{{ .essential }}} now display the real client IP address for connections via AWS PrivateLink in detailed execution information. Currently, this feature is in beta and is available only in the AWS region `Frankfurt (eu-central-1)`. + + + + For more information, see [Slow Queries in TiDB Dashboard](https://docs.pingcap.com/tidb/stable/dashboard-slow-query). + + + + ## Statement Analysis To use the statement analysis, perform the following steps: @@ -58,8 +115,28 @@ To use the statement analysis, perform the following steps: 4. (Optional) If you only care about certain databases, you can select the corresponding schema(s) in the next box to filter the results. + + + + +## SQL Statement + +To use the **SQL Statement** page, perform the following steps: + +1. Navigate to the overview page of the TiDB instance, and then click **Monitoring** > **SQL Statement** in the left navigation pane. + +2. Click a SQL statement in the list to view its detailed execution information. + +3. In the time interval box, select the time period to be analyzed. Then you can get the execution statistics for SQL statements across all databases in this period. + +4. (Optional) If you only care about certain databases, you can select the corresponding schema(s) in the next box to filter the results. + + + The results are displayed in the form of a table, and you can sort the results by different columns. + + For more information, see [Statement Execution Details in TiDB Dashboard](https://docs.pingcap.com/tidb/stable/dashboard-statement-details). ## Key Visualizer @@ -74,6 +151,8 @@ To view the key analytics, perform the following steps: 2. Click the **Key Visualizer** tab. -On the **Key Visualizer** page, a large heat map shows changes on access traffic over time. The average values ​​along each axis of the heat map are shown below and on the right side. The left side is the table name, index name and other information. +On the **Key Visualizer** page, a large heat map shows how access traffic changes over time. The average values along each axis of the heat map are shown below and on the right side. The left side displays the table name, index name, and other related information. For more information, see [Key Visualizer](https://docs.pingcap.com/tidb/stable/dashboard-key-visualizer). + + diff --git a/tidb-cloud/upgrade-tidb-cluster.md b/tidb-cloud/upgrade-tidb-cluster.md index a1a600eb8f4d9..65e0e2b753cb5 100644 --- a/tidb-cloud/upgrade-tidb-cluster.md +++ b/tidb-cloud/upgrade-tidb-cluster.md @@ -15,7 +15,18 @@ For the TiDB version that is too low, TiDB Cloud will regularly upgrade it unifo To submit an upgrade request, perform the steps in [TiDB Cloud Support](/tidb-cloud/tidb-cloud-support.md) to contact our support team. Note to provide the following information in the **Description** box: + + +- Cloud Provider: AWS, Azure, Google Cloud, or Alibaba Cloud +- Cluster Name: xxx + + + + + - Cloud Provider: AWS, Azure, or Google Cloud - Cluster Name: xxx + + TiDB Cloud technical support will confirm with you the time period for the upgrade. After you have confirmed the upgrade time, TiDB Cloud technical support will do the upgrade in the confirmed time period. diff --git a/tidb-cloud/use-chat2query-api.md b/tidb-cloud/use-chat2query-api.md index 5d6c2088f2dc8..db50657f60c54 100644 --- a/tidb-cloud/use-chat2query-api.md +++ b/tidb-cloud/use-chat2query-api.md @@ -11,7 +11,7 @@ Chat2Query API can only be accessed through HTTPS, ensuring that all data transm > **Note:** > -> Chat2Query API is available for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters. To use the Chat2Query API on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). +> Chat2Query API is only available for [TiDB Cloud Starter](/tidb-cloud/select-cluster-tier.md#starter) clusters hosted on AWS. To use the Chat2Query API on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). ## Before you begin diff --git a/tidb-cloud/use-chat2query-knowledge.md b/tidb-cloud/use-chat2query-knowledge.md index 99c0d614b5c29..dd76573b3f73d 100644 --- a/tidb-cloud/use-chat2query-knowledge.md +++ b/tidb-cloud/use-chat2query-knowledge.md @@ -11,7 +11,7 @@ Starting from v3, the Chat2Query API enables you to add or modify knowledge base > **Note:** > -> Knowledge base related endpoints are available for [TiDB Cloud Serverless](/tidb-cloud/select-cluster-tier.md#tidb-cloud-serverless) clusters by default. To use knowledge base related endpoints on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). +> Knowledge base related endpoints are only available for [{{{ .starter }}}](/tidb-cloud/select-cluster-tier.md#starter) clusters hosted on AWS. To use knowledge base related endpoints on [TiDB Cloud Dedicated](/tidb-cloud/select-cluster-tier.md#tidb-cloud-dedicated) clusters, contact [TiDB Cloud support](/tidb-cloud/tidb-cloud-support.md). ## Before you begin diff --git a/tidb-cloud/use-tidb-cloud-with-ai-tools.md b/tidb-cloud/use-tidb-cloud-with-ai-tools.md new file mode 100644 index 0000000000000..e60899cefda3d --- /dev/null +++ b/tidb-cloud/use-tidb-cloud-with-ai-tools.md @@ -0,0 +1,163 @@ +--- +title: Use {{{ .starter }}} with AI Tools +summary: Learn how to connect your {{{ .starter }}} cluster to AI-powered development tools that support the Model Context Protocol (MCP), such as Cursor, Claude Code, VS Code, and Windsurf. +--- + +# Use {{{ .starter }}} with AI Tools + +This document describes how to connect your {{{ .starter }}} cluster to AI-powered development tools that support the Model Context Protocol (MCP), such as Cursor, Claude Code, Visual Studio Code (VS Code), and Windsurf. + +By configuring your {{{ .starter }}} cluster as an MCP server, you can enable AI assistants in your development tools to query your database schema, understand your data model, and generate context-aware code suggestions. + +## Before you begin + +To complete this guide, you need the following: + +- A {{{ .starter }}} cluster. If you don't have any, you can [create a {{{ .starter }}} cluster](/develop/dev-guide-build-cluster-in-cloud.md). +- [Python 3.11 or higher](https://www.python.org/downloads/) installed. +- [uv](https://docs.astral.sh/uv/getting-started/installation/) installed. +- An AI development tool that supports MCP, such as: + + - [Cursor](https://cursor.com) + - [Claude Code](https://claude.com/product/claude-code) + - [Visual Studio Code](https://code.visualstudio.com) + - [Windsurf](https://windsurf.com) + +## Connect to AI tools + +After you create a {{{ .starter }}} cluster in TiDB Cloud, perform the following steps to connect it to your AI tool. + +1. On the [**Clusters**](https://tidbcloud.com/project/clusters) page, click the name of your target cluster to go to its overview page. Then, click **Use with AI Tools** in the upper-right corner. +2. In the **Access `your_cluster_name` with AI tools** dialog, select the **Branch** and **Database** that you want the AI tool to access. +3. Verify that you meet all the **Prerequisites** listed. If not, follow the on-screen instructions to install the required dependencies. +4. Configure the password: + + - If you have not set a password yet, click **Generate Password** to generate a random password. + + The generated password will not show again, so save your password in a secure location. + + - If you have already set a password, enter your password in the **Enter the password for easy setup** field. + - If you forget the password, click **Reset password** in the **Prerequisites** section to generate a new one. + + Note that resetting your password disconnects all existing root user sessions. + +5. Select the tab for your AI tool: **Cursor**, **Claude Code**, **VS Code**, or **Windsurf**. +6. Complete the setup steps for the selected tool. + + For more information, see [Tool-specific setup](#tool-specific-setup). + +## Tool-specific setup + +### Cursor + +To configure Cursor as an MCP client for TiDB, you can use one of the following methods: + +- **Method 1**: in the **Access `your_cluster_name` with AI tools** dialog of the [TiDB Cloud console](https://tidbcloud.com), click **Add to Cursor** to launch Cursor, and then click **Install**. +- **Method 2**: manually add the following configuration to your `.cursor/mcp.json` file: + + ```json + { + "mcpServers": { + "TiDB": { + "command": "uvx --from pytidb[mcp] tidb-mcp-server", + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } + } + ``` + +### Claude Code + +To configure Claude Code as an MCP client for TiDB, you can use one of the following methods: + +- **Method 1**: copy the setup command from the **Access `your_cluster_name` with AI tools** dialog of the [TiDB Cloud console](https://tidbcloud.com/), and then run it in your terminal: + + ```bash + claude mcp add --transport stdio TiDB \ + --env TIDB_HOST='' \ + --env TIDB_PORT= \ + --env TIDB_USERNAME='' \ + --env TIDB_PASSWORD='' \ + --env TIDB_DATABASE='' \ + -- uvx --from 'pytidb[mcp]' 'tidb-mcp-server' + ``` + +- **Method 2**: add the following configuration to your project-level `.mcp.json` file. For more information, see the [Claude Code documentation](https://code.claude.com/docs/en/mcp#project-scope). + + ```json + { + "mcpServers": { + "TiDB": { + "type": "stdio", + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } + } + ``` + +### VS Code + +To configure VS Code as an MCP client for TiDB, you can use one of the following methods: + +- **Method 1**: in the **Access `your_cluster_name` with AI tools** dialog of the [TiDB Cloud console](https://tidbcloud.com/), click **Add to VS Code** to launch VS Code, and then click **Install**. +- **Method 2**: add the following configuration to your `.vscode/mcp.json` file: + + ```json + { + "mcpServers": { + "TiDB": { + "type": "stdio", + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } + } + ``` + +### Windsurf + +To add the TiDB MCP plugin to Windsurf, update your `mcp_config.json` file as follows. For more information, see the [Windsurf documentation](https://docs.windsurf.com/windsurf/cascade/mcp#adding-a-new-mcp-plugin). + +```json +{ + "mcpServers": { + "TiDB": { + "command": "uvx", + "args": ["--from", "pytidb[mcp]", "tidb-mcp-server"], + "env": { + "TIDB_HOST": "", + "TIDB_PORT": "", + "TIDB_USERNAME": "", + "TIDB_PASSWORD": "", + "TIDB_DATABASE": "" + } + } + } +} +``` + +## See also + +- [Try Out TiDB + Vector Search](/ai/quickstart-via-python.md) +- [Developer Guide Overview](https://docs.pingcap.com/developer/) diff --git a/tidb-cloud/v8.5-performance-highlights.md b/tidb-cloud/v8.5-performance-highlights.md index 9deb7dab0a52f..9698ecf664163 100644 --- a/tidb-cloud/v8.5-performance-highlights.md +++ b/tidb-cloud/v8.5-performance-highlights.md @@ -5,7 +5,7 @@ summary: Introduce the performance improvements for TiDB Cloud Dedicated cluster # TiDB Cloud Performance Highlights for TiDB v8.5.0 -[TiDB v8.5.0](https://docs.pingcap.com/tidb/v8.5/release-8.5.0) is an important Long-Term Support (LTS) release, which delivers notable improvements in performance, scalability, and operational efficiency. +[TiDB v8.5.0](https://docs.pingcap.com/tidb/stable/release-8.5.0) is an important Long-Term Support (LTS) release, which delivers notable improvements in performance, scalability, and operational efficiency. This document outlines the performance improvements in v8.5.0 across the following areas: diff --git a/tidb-distributed-execution-framework.md b/tidb-distributed-execution-framework.md index 2637cf6f0283b..13966615eb748 100644 --- a/tidb-distributed-execution-framework.md +++ b/tidb-distributed-execution-framework.md @@ -121,7 +121,7 @@ As shown in the preceding diagram, the execution of tasks in the DXF is mainly h -* [Execution Principles and Best Practices of DDL Statements](/ddl-introduction.md) +* [Execution Principles and Best Practices of DDL Statements](/best-practices/ddl-introduction.md) diff --git a/tidb-lightning/data-import-best-practices.md b/tidb-lightning/data-import-best-practices.md index fedfa093ad805..9b49044c53716 100644 --- a/tidb-lightning/data-import-best-practices.md +++ b/tidb-lightning/data-import-best-practices.md @@ -146,7 +146,7 @@ If the PD Scatter Region latency during the import process exceeds 30 minutes, c ### Disable the analyze operation -In the case of a large single table (for example, with over 1 billion rows and more than 50 columns), it is recommended to disable the `analyze` operation (`analyze="off"`) during the import process, and manually execute the [`ANALYZE TABLE`](/sql-statements//sql-statement-analyze-table.md) statement after the import is completed. +For a large single table (for example, with over 1 billion rows and more than 50 columns), it is recommended to disable the `analyze` operation (`analyze="off"`) during the import process, and manually execute the [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) statement after the import is completed. For more information about the configuration of `analyze`, see [TiDB Lightning task configuration](/tidb-lightning/tidb-lightning-configuration.md#tidb-lightning-task). diff --git a/tiflash-upgrade-guide.md b/tiflash-upgrade-guide.md index ac36b69460260..d962590ac4953 100644 --- a/tiflash-upgrade-guide.md +++ b/tiflash-upgrade-guide.md @@ -137,7 +137,7 @@ Starting from v7.4, to reduce the read and write amplification generated during ## From v7.x to v8.4 or a later version -Starting from v8.4, the underlying storage format of TiFlash is updated to support [vector search](/vector-search/vector-search-overview.md). Therefore, after TiFlash is upgraded to v8.4 or a later version, in-place downgrading to the original version is not supported. +Starting from v8.4, the underlying storage format of TiFlash is updated to support [vector search](/ai/concepts/vector-search-overview.md). Therefore, after TiFlash is upgraded to v8.4 or a later version, in-place downgrading to the original version is not supported. ## From v8.x to v9.0 or a later version diff --git a/tiflash/tiflash-supported-pushdown-calculations.md b/tiflash/tiflash-supported-pushdown-calculations.md index c06eb805140da..f1af09f7516f6 100644 --- a/tiflash/tiflash-supported-pushdown-calculations.md +++ b/tiflash/tiflash-supported-pushdown-calculations.md @@ -42,7 +42,7 @@ TiFlash supports the following push-down expressions: | [Regular expression functions and operators](/functions-and-operators/string-functions.md) | `REGEXP`, `REGEXP_LIKE()`, `REGEXP_INSTR()`, `REGEXP_SUBSTR()`, `REGEXP_REPLACE()`, `RLIKE` | | [Date functions](/functions-and-operators/date-and-time-functions.md) | `DATE_FORMAT()`, `TIMESTAMPDIFF()`, `FROM_UNIXTIME()`, `UNIX_TIMESTAMP(int)`, `UNIX_TIMESTAMP(decimal)`, `STR_TO_DATE(date)`, `STR_TO_DATE(datetime)`, `DATEDIFF()`, `YEAR()`, `MONTH()`, `DAY()`, `EXTRACT(datetime)`, `DATE()`, `HOUR()`, `MICROSECOND()`, `MINUTE()`, `SECOND()`, `SYSDATE()`, `DATE_ADD/ADDDATE(datetime, int)`, `DATE_ADD/ADDDATE(string, int/real)`, `DATE_SUB/SUBDATE(datetime, int)`, `DATE_SUB/SUBDATE(string, int/real)`, `QUARTER()`, `DAYNAME()`, `DAYOFMONTH()`, `DAYOFWEEK()`, `DAYOFYEAR()`, `LAST_DAY()`, `MONTHNAME()`, `TO_SECONDS()`, `TO_DAYS()`, `FROM_DAYS()`, `WEEKOFYEAR()` | | [JSON function](/functions-and-operators/json-functions.md) | `JSON_LENGTH()`, `->`, `->>`, `JSON_EXTRACT()`, `JSON_ARRAY()`, `JSON_DEPTH()`, `JSON_VALID()`, `JSON_KEYS()`, `JSON_CONTAINS_PATH()`, `JSON_UNQUOTE()` | -| [Vector function](/vector-search/vector-search-functions-and-operators.md) | `VEC_L2_DISTANCE`, `VEC_COSINE_DISTANCE`, `VEC_NEGATIVE_INNER_PRODUCT`, `VEC_L1_DISTANCE`, `VEC_DIMS`, `VEC_L2_NORM`, `VEC_AS_TEXT` | +| [Vector function](/ai/reference/vector-search-functions-and-operators.md) | `VEC_L2_DISTANCE`, `VEC_COSINE_DISTANCE`, `VEC_NEGATIVE_INNER_PRODUCT`, `VEC_L1_DISTANCE`, `VEC_DIMS`, `VEC_L2_NORM`, `VEC_AS_TEXT` | | [Conversion functions](/functions-and-operators/cast-functions-and-operators.md) | `CAST(int AS DOUBLE), CAST(int AS DECIMAL)`, `CAST(int AS STRING)`, `CAST(int AS TIME)`, `CAST(double AS INT)`, `CAST(double AS DECIMAL)`, `CAST(double AS STRING)`, `CAST(double AS TIME)`, `CAST(string AS INT)`, `CAST(string AS DOUBLE), CAST(string AS DECIMAL)`, `CAST(string AS TIME)`, `CAST(decimal AS INT)`, `CAST(decimal AS STRING)`, `CAST(decimal AS TIME)`, `CAST(decimal AS DOUBLE)`, `CAST(time AS INT)`, `CAST(time AS DECIMAL)`, `CAST(time AS STRING)`, `CAST(time AS REAL)`, `CAST(json AS JSON)`, `CAST(json AS STRING)`, `CAST(int AS JSON)`, `CAST(real AS JSON)`, `CAST(decimal AS JSON)`, `CAST(string AS JSON)`, `CAST(time AS JSON)`, `CAST(duration AS JSON)` | | [Aggregate functions](/functions-and-operators/aggregate-group-by-functions.md) | `MIN()`, `MAX()`, `SUM()`, `COUNT()`, `AVG()`, `APPROX_COUNT_DISTINCT()`, `GROUP_CONCAT()` | | [Miscellaneous functions](/functions-and-operators/miscellaneous-functions.md) | `INET_NTOA()`, `INET_ATON()`, `INET6_NTOA()`, `INET6_ATON()` | diff --git a/upgrade-tidb-using-tiup.md b/upgrade-tidb-using-tiup.md index a6b61e2467058..cbc193b5511af 100644 --- a/upgrade-tidb-using-tiup.md +++ b/upgrade-tidb-using-tiup.md @@ -52,7 +52,7 @@ This document applies to upgrading to TiDB v8.5.0 from the following versions: v - TiDB currently does not support version downgrade or rolling back to an earlier version after the upgrade. - Support upgrading the versions of TiCDC, TiFlash, and other components. - When upgrading TiFlash from versions earlier than v6.3.0 to v6.3.0 and later versions, note that the CPU must support the AVX2 instruction set under the Linux AMD64 architecture and the ARMv8 instruction set architecture under the Linux ARM64 architecture. For details, see the description in [v6.3.0 Release Notes](/releases/release-6.3.0.md#others). -- For detailed compatibility changes of different versions, see the [Release Notes](/releases/release-notes.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. +- For detailed compatibility changes of different versions, see the [Release Notes](/releases/_index.md) of each version. Modify your cluster configuration according to the "Compatibility Changes" section of the corresponding release notes. - When updating clusters from versions earlier than v5.3 to v5.3 or later versions, note that there is a time format change in the alerts generated by the default deployed Prometheus. This format change is introduced starting from Prometheus v2.27.1. For more information, see [Prometheus commit](https://github.com/prometheus/prometheus/commit/7646cbca328278585be15fa615e22f2a50b47d06). ## Preparations @@ -307,7 +307,7 @@ Re-execute the `tiup cluster upgrade` command to resume the upgrade. The upgrade ### How to fix the issue that the upgrade gets stuck when upgrading to v6.2.0 or later versions? -Starting from v6.2.0, TiDB enables the [concurrent DDL framework](/ddl-introduction.md#how-the-online-ddl-asynchronous-change-works-in-tidb) by default to execute concurrent DDLs. This framework changes the DDL job storage from a KV queue to a table queue. This change might cause the upgrade to get stuck in some scenarios. The following are some scenarios that might trigger this issue and the corresponding solutions: +Starting from v6.2.0, TiDB enables the [concurrent DDL framework](/best-practices/ddl-introduction.md#how-the-online-ddl-asynchronous-change-works-in-tidb) by default to execute concurrent DDLs. This framework changes the DDL job storage from a KV queue to a table queue. This change might cause the upgrade to get stuck in some scenarios. The following are some scenarios that might trigger this issue and the corresponding solutions: - Upgrade gets stuck due to plugin loading diff --git a/variables.json b/variables.json index 23df8e63a812b..664803713d44a 100644 --- a/variables.json +++ b/variables.json @@ -1,6 +1,7 @@ { "tidb": "TiDB", "tidb-version": "v8.5.0", + "tidb-operator-version": "v1.6.4", "self-managed": "TiDB Self-Managed", "starter": "TiDB Cloud Starter", "essential": "TiDB Cloud Essential", diff --git a/vector-search/vector-search-limitations.md b/vector-search/vector-search-limitations.md deleted file mode 100644 index 531b5c962d45b..0000000000000 --- a/vector-search/vector-search-limitations.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Vector Search Limitations -summary: Learn the limitations of the TiDB vector search. ---- - -# Vector Search Limitations - -This document describes the known limitations of TiDB vector search. - - - -> **Warning:** -> -> The vector search feature is experimental. It is not recommended that you use it in the production environment. This feature might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - - - -> **Note:** -> -> The vector search feature is in beta. It might be changed without prior notice. If you find a bug, you can report an [issue](https://github.com/pingcap/tidb/issues) on GitHub. - - - -> **Note:** -> -> The vector search feature is available on TiDB Self-Managed, [{{{ .starter }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#starter), [{{{ .essential }}}](https://docs.pingcap.com/tidbcloud/select-cluster-tier#essential), and [TiDB Cloud Dedicated](https://docs.pingcap.com/tidbcloud/select-cluster-tier#tidb-cloud-dedicated). For TiDB Self-Managed and TiDB Cloud Dedicated, the TiDB version must be v8.4.0 or later (v8.5.0 or later is recommended). - -## Vector data type limitations - -- Each [vector](/vector-search/vector-search-data-types.md) supports up to 16383 dimensions. -- Vector data types cannot store `NaN`, `Infinity`, or `-Infinity` values. -- Vector data types cannot store double-precision floating-point numbers. If you insert or store double-precision floating-point numbers in vector columns, TiDB converts them to single-precision floating-point numbers. -- Vector columns cannot be used as primary keys or as part of a primary key. -- Vector columns cannot be used as unique indexes or as part of a unique index. -- Vector columns cannot be used as partition keys or as part of a partition key. -- Currently, TiDB does not support modifying a vector column to other data types (such as `JSON` and `VARCHAR`). - -## Vector index limitations - -See [Vector search restrictions](/vector-search/vector-search-index.md#restrictions). - -## Compatibility with TiDB tools - - - -- Make sure that you are using v8.4.0 or a later version of BR to back up and restore data. Restoring tables with vector data types to TiDB clusters earlier than v8.4.0 is not supported. -- TiDB Data Migration (DM) does not support migrating or replicating MySQL vector data types to TiDB. -- When TiCDC replicates vector data to a downstream that does not support vector data types, it will change the vector data types to another type. For more information, see [Compatibility with vector data types](/ticdc/ticdc-compatibility.md#compatibility-with-vector-data-types). - - - - - -- The Data Migration feature in the TiDB Cloud console does not support migrating or replicating MySQL vector data types to TiDB Cloud. - - - -## Feedback - -We value your feedback and are always here to help: - - - -- [Join our Discord](https://discord.gg/zcqexutz2R) - - - - - -- [Join our Discord](https://discord.gg/zcqexutz2R) -- [Visit our Support Portal](https://tidb.support.pingcap.com/) - -