diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..43c6a002 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,10 @@ +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 + +# tab_size = 4 spaces +[*.go] +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000..0346f4eb --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,3 @@ +Ask questions at [Gitter](https://gitter.im/xiaomi-dba/soar). + +[Open an issue](https://github.com/xiaomi/soar/issues/new) to discuss your plans before doing any work on SOAR. diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..6f9b70a5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,23 @@ +--- +name: Bug Report +about: You're experiencing an issue with SOAR that is different than the documented behavior. + +--- + +Please answer these questions before submitting your issue. Thanks! + +1. What did you do? +If possible, provide a recipe for reproducing the error. + + +2. What did you expect to see? + + + +3. What did you see instead? + + + +4. What version of are you using (`soar -version`)? + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..fa3f698d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,15 @@ +--- +name: Feature Request +about: If you have something you think SOAR could improve or add support for. + +--- + +Please search the existing issues for relevant feature requests, add upvotes to pre-existing requests. + +#### Feature Description + +A written overview of the feature. + +#### Use Case(s) + +Any relevant use-cases that you see. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 00000000..8212e47f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,10 @@ +--- +name: Question +about: If you have a question, please check out our other community resources instead of opening an issue. + +--- + +Issues on GitHub are intended to be related to bugs or feature requests, so we recommend using our other community resources instead of asking here. + +- [SOAR Doc](http://github.com/XiaoMi/soar/blob/master/README.md) +- Any other questions can be asked in the community [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/xiaomi-dba/soar) diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..8a726920 --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +soar +soar.darwin-386 +soar.darwin-amd64 +soar.linux-386 +soar.linux-amd64 +soar.windows-386 +soar.windows-amd64 +common/version.go +doc/blueprint/ +*.iml +*.swp +*.log +coverage.* +y.output + +.DS_Store +.vscode/ +.idea +_tools/ + +TestMarkdown2Html.html diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..d03d6d57 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.10 + +before_install: + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - gometalinter.v1 --install + +script: + - gometalinter.v1 --config doc/example/metalinter.json ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/CHANGES.md b/CHANGES.md new file mode 100644 index 00000000..a3cd8d16 --- /dev/null +++ b/CHANGES.md @@ -0,0 +1,69 @@ +# 更新日志 + +## 2018-10 +- 2018-10-20 开源先锋日(OSCAR)对外正式开源发布代码 + +## 2018-09 +- 修复多个启发式建议不准确BUG,优化部分建议文案使得建议更清晰 +- 基于TiDB Parser完善多个DDL类型语句的建议 +- 新增lint report-type类型,支持Vim Plugin优化建议输出 +- 更新整理项目文档,开源准备 +- 2018-09-21 Gdevops SOAR首次对外进行技术分享宣传 + +## 2018-08 +- 利用docker临时容器进行daily测试 +- 添加main_test全功能回归测试 +- 修复在测试中发现的问题 +- mymysql合并MySQL8.0相关PR,修改vendor依赖 +- 改善HeuristicRule中的文案 +- 持续集成Vitess Parser的改进 +- NewQuery4Audit结构体中引入TiDB Parser +- 通过TiAST完成大量与 DDL 相关的TODO +- 修改heuristic rules检查的返回值,提升拓展性 +- 建议中引入Position,用于表示建议产生于SQL的位置 +- 新增多个HeuristicRule +- Makefile中添加依赖检查,优化Makefile中逻辑,添加新功能 +- 优化gometalinter性能,引入新的代码质量检测工具,提升代码质量 +- 引入 retool 用于管理依赖的工具 +- 优化 doc 文档 + +## 2018-07 +- 补充文档,添加项目LOGO +- 改善代码质量提升测试覆盖度 +- mymysql升级,支持MySQL 8.0 +- 提供remove-comment小工具 +- 提供索引重复检查小工具 +- HeuristicRule新增RuleSpaceAfterDot +- 支持字符集和Collation不相同时的隐式数据类型转换的检查 + +## 2018-06 +- 支持更多的SQL Rewrite规则 +- 添加SQL执行超时限制 +- 索引优化建议支持对约束的检查 +- 修复数据采样中null值处理不正确的问题 +- Explain支持last_query_cost + +## 2018-05 +- 添加数据采样功能 +- 添加语句执行安全检查 +- 支持DDL语法检查 +- 支持DDL在测试环境的执行 +- 支持隐式数据类型转换检查 +- 支持索引去重 +- 索引优化建议支持前缀索引 +- 支持SQL Pretty输出 + +## 2018-04 +- 支持语法检查 +- 支持测试环境 +- 支持MySQL原数据的获取 +- 支持基于数据库环境信息给予索引优化建议 +- 支持不依赖数据库原信息的简单索引优化建议 +- 添加日志模块 +- 引入配置文件 + +## 2018-03 +- 基本架构设计 +- 添加大量底层函数用于处理AST +- 添加Insert、Delete、Update转写成Select的基本函数 +- 支持MySQL Explain信息输出 diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..b4c41545 --- /dev/null +++ b/Makefile @@ -0,0 +1,206 @@ +# This how we want to name the binary output +# +# use checkmake linter https://github.com/mrtazz/checkmake +# $ checkmake Makefile +# +BINARY=soar +PATH := ${GOPATH}/bin:$(PATH) + +# These are the values we want to pass for VERSION and BUILD +BUILD_TIME=`date +%Y%m%d%H%M` +COMMIT_VERSION=`git rev-parse HEAD` +GO_VERSION_MIN=1.10 + +# Add mysql version for testing `MYSQL_VERSION=5.7 make docker` +# use mysql:latest as default +MYSQL_VERSION := $(or ${MYSQL_VERSION}, ${MYSQL_VERSION}, latest) + +.PHONY: all +all: | fmt build + +# Dependency check +.PHONY: deps +deps: + @echo "\033[92mDependency check\033[0m" + @bash ./deps.sh + # The retool tools.json is setup from retool-install.sh + retool sync + retool do gometalinter.v2 intall + +# Code format +.PHONY: fmt +fmt: + @echo "\033[92mRun gofmt on all source files ...\033[0m" + @echo "gofmt -l -s -w ..." + @ret=0 && for d in $$(go list -f '{{.Dir}}' ./... | grep -v /vendor/); do \ + gofmt -l -s -w $$d/*.go || ret=$$? ; \ + done ; exit $$ret + +# Run golang test cases +.PHONY: test +test: + @echo "\033[92mRun all test cases ...\033[0m" + go test ./... + @echo "test Success!" + +# Code Coverage +# colorful coverage numerical >=90% GREEN, <80% RED, Other YELLOW +.PHONY: cover +cover: test + @echo "\033[92mRun test cover check ...\033[0m" + go test -coverpkg=./... -coverprofile=coverage.data ./... | column -t + go tool cover -html=coverage.data -o coverage.html + go tool cover -func=coverage.data -o coverage.txt + @tail -n 1 coverage.txt | awk '{sub(/%/, "", $$NF); \ + if($$NF < 80) \ + {print "\033[91m"$$0"%\033[0m"} \ + else if ($$NF >= 90) \ + {print "\033[92m"$$0"%\033[0m"} \ + else \ + {print "\033[93m"$$0"%\033[0m"}}' + +# Builds the project +build: fmt tidb-parser + @echo "\033[92mBuilding ...\033[0m" + @bash ./genver.sh $(GO_VERSION_MIN) + @ret=0 && for d in $$(go list -f '{{if (eq .Name "main")}}{{.ImportPath}}{{end}}' ./...); do \ + go build $$d || ret=$$? ; \ + done ; exit $$ret + @echo "build Success!" + +.PHONY: fast +fast: + @echo "\033[92mBuilding ...\033[0m" + @bash ./genver.sh $(GO_VERSION_MIN) + @ret=0 && for d in $$(go list -f '{{if (eq .Name "main")}}{{.ImportPath}}{{end}}' ./...); do \ + go build $$d || ret=$$? ; \ + done ; exit $$ret + @echo "build Success!" + +# Installs our project: copies binaries +install: build + @echo "\033[92mInstall ...\033[0m" + go install ./... + @echo "install Success!" + +# Generate doc use -list* command +.PHONY: doc +doc: fast + @echo "\033[92mAuto generate doc ...\033[0m" + ./soar -list-heuristic-rules > doc/heuristic.md + ./soar -list-rewrite-rules > doc/rewrite.md + ./soar -list-report-types > doc/report_type.md + +# Add or change a heuristic rule +.PHONY: heuristic +heuristic: doc docker + @echo "\033[92mUpdate Heuristic rule golden files ...\033[0m" + go test github.com/XiaoMi/soar/advisor -v -update -run TestListHeuristicRules + go test github.com/XiaoMi/soar/advisor -v -update -run TestMergeConflictHeuristicRules + docker stop soar-mysql 2>/dev/null || true + +# Update vitess vendor +.PHONY: vitess +vitess: + @echo "\033[92mUpdate vitess deps ...\033[0m" + govendor fetch -v vitess.io/vitess/... + +# Update tidb vendor +.PHONY: tidb +tidb: + @echo "\033[92mUpdate tidb deps ...\033[0m" + @echo -n "Current TiDB commit hash: " + @(cd ${GOPATH}/src/github.com/pingcap/tidb/ 2>/dev/null && git checkout master && git rev-parse HEAD) || echo "(init)" + go get -v -u github.com/pingcap/tidb/store/tikv + @echo -n "TiDB update to: " + @cd ${GOPATH}/src/github.com/pingcap/tidb/ && git rev-parse HEAD + +# Update all vendor +.PHONY: vendor +vendor: vitess tidb + +# make tidb parser +.PHONY: tidb-parser +tidb-parser: tidb + @echo "\033[92mimporting tidb sql parser ...\033[0m" + @cd ${GOPATH}/src/github.com/pingcap/tidb && git checkout -b soar ec9672cea6612481b1da845dbab620b7a5581ca4 && make parser + +# gometalinter +# 如果有不想改的lint问题可以使用metalinter.sh加黑名单 +#@bash doc/example/metalinter.sh +.PHONY: lint +lint: build + @echo "\033[92mRun linter check ...\033[0m" + CGO_ENABLED=0 retool do gometalinter.v2 -j 1 --config doc/example/metalinter.json ./... + retool do revive -formatter friendly --exclude vendor/... -config doc/example/revive.toml ./... + retool do golangci-lint --tests=false run + @echo "gometalinter check your code is pretty good" + +.PHONY: release +release: deps build + @echo "\033[92mCross platform building for release ...\033[0m" + @for GOOS in darwin linux windows; do \ + for GOARCH in 386 amd64; do \ + for d in $$(go list -f '{{if (eq .Name "main")}}{{.ImportPath}}{{end}}' ./...); do \ + b=$$(basename $${d}) ; \ + echo "Building $${b}.$${GOOS}-$${GOARCH} ..."; \ + GOOS=$${GOOS} GOARCH=$${GOARCH} go build -ldflags="-s -w" -v -o $${b}.$${GOOS}-$${GOARCH} $$d 2>/dev/null ; \ + done ; \ + done ;\ + done + +.PHONY: docker +docker: + @echo "\033[92mBuild mysql test enviorment\033[0m" + @docker stop soar-mysql 2>/dev/null || true + @echo "docker run --name soar-mysql mysql:$(MYSQL_VERSION)" + @docker run --name soar-mysql --rm -d \ + -e MYSQL_ROOT_PASSWORD=1tIsB1g3rt \ + -e MYSQL_DATABASE=sakila \ + -p 3306:3306 \ + -v `pwd`/doc/example/sakila.sql.gz:/docker-entrypoint-initdb.d/sakila.sql.gz \ + mysql:$(MYSQL_VERSION) + + @echo -n "waiting for sakila database initializing " + @while ! mysql -h 127.0.0.1 -u root sakila -p1tIsB1g3rt -NBe "do 1;" 2>/dev/null; do \ + printf '.' ; \ + sleep 1 ; \ + done ; \ + echo '.' + @echo "mysql test enviorment is ready!" + +.PHONY: connect +connect: + mysql -h 127.0.0.1 -u root -p1tIsB1g3rt + +.PHONY: main_test +main_test: install + @echo "\033[92mrunning main_test\033[0m" + @echo "soar -list-test-sqls | soar" + @./doc/example/main_test.sh + @echo "main_test Success!" + +.PHONY: daily +daily: | deps fmt vendor tidb-parser docker cover doc lint release install main_test clean logo + @echo "\033[92mdaily build finished\033[0m" + +.PHONY: logo +logo: + @echo "\033[93m" + @cat doc/images/logo.ascii + @echo "\033[m" + +# Cleans our projects: deletes binaries +.PHONY: clean +clean: + @echo "\033[92mCleanup ...\033[0m" + go clean + @for GOOS in darwin linux windows; do \ + for GOARCH in 386 amd64; do \ + rm -f ${BINARY}.$${GOOS}-$${GOARCH} ;\ + done ;\ + done + rm -f ${BINARY} coverage.* + find . -name "*.log" -delete + git clean -fi + docker stop soar-mysql 2>/dev/null || true diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 00000000..84fbfb3a --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,8 @@ + +Copyright 2018 Xiaomi, Inc. All Rights Reserved. +This product includes software developed by Xiaomi, Inc. +(http://www.mi.com/). +This product is licensed to you under the Apache License, Version 2.0 +(the "License"). You may not use this product except in compliance with +the License. + diff --git a/README.md b/README.md new file mode 100644 index 00000000..d630b80b --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +![SOAR](http://github.com/XiaoMi/soar/raw/master/doc/images/logo.png) + +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/xiaomi-dba/soar) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://github.com/XiaoMi/soar/blob/master/LICENSE) + +[文档](http://github.com/XiaoMi/soar/tree/master/doc) | [FAQ](http://github.com/XiaoMi/soar/blob/master/doc/FAQ.md) | [变更记录](http://github.com/XiaoMi/soar/blob/master/CHANGES.md) | [路线图](http://github.com/XiaoMi/soar/blob/master/doc/roadmap.md) | [English](http://github.com/XiaoMi/soar/blob/master/README_EN.md) + +## SOAR + +SOAR(SQL Optimizer And Rewriter)是一个对SQL进行优化和改写的自动化工具。 由小米人工智能与云平台的数据库团队开发与维护。 + +## 功能特点 +* 跨平台支持(支持Linux, Mac环境,Windows环境理论上也支持,不过未全面测试) +* 支持基于启发式算法的语句优化 +* 支持复杂查询的多列索引优化(UPDATE, INSERT, DELETE, SELECT) +* 支持EXPLAIN信息丰富解读 +* 支持SQL指纹、压缩和美化 +* 支持同一张表多条ALTER请求合并 +* 支持自定义规则的SQL改写 + +## 快速入门 + +* [安装使用](http://github.com/XiaoMi/soar/blob/master/doc/install.md) +* [体系架构](http://github.com/XiaoMi/soar/blob/master/doc/structure.md) +* [配置文件](http://github.com/XiaoMi/soar/blob/master/doc/config.md) +* [常用命令](http://github.com/XiaoMi/soar/blob/master/doc/cheatsheet.md) +* [产品对比](http://github.com/XiaoMi/soar/blob/master/doc/comparison.md) +* [路线图](http://github.com/XiaoMi/soar/blob/master/doc/roadmap.md) + +## 交流与反馈 + +* 欢迎通过Github Issues提交问题报告与建议 +* QQ群: 779359816 +* [Gitter](https://gitter.im/xiaomi-dba/soar) + +## License + +[Apache License 2.0](http://github.com/XiaoMi/soar/blob/master/LICENSE). diff --git a/README_EN.md b/README_EN.md new file mode 100644 index 00000000..31c11daf --- /dev/null +++ b/README_EN.md @@ -0,0 +1,36 @@ +![SOAR](http://github.com/XiaoMi/soar/raw/master/doc/images/logo.png) + +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/xiaomi-dba/soar) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://github.com/XiaoMi/soar/blob/master/LICENSE) + +[Docs](http://github.com/XiaoMi/soar/tree/master/doc) | [FAQ](http://github.com/XiaoMi/soar/blob/master/doc/FAQ_en.md) | [中文](http://github.com/XiaoMi/soar/blob/master/README.md) + +## SOAR + +SOAR (SQL Optimizer And Rewriter) is a tool, which can help SQL optimization and rewrite. It's developed and maintained by the DBA Team of Xiaomi AI&Cloud. + +## Features + +* Cross-platform support, such as Linux, Mac, and Windows +* Support Heuristic Rules Suggestion +* Support Complicate SQL Indexing Optimize +* Support EXPLAIN analyze for query plan +* Support SQL fingerprint, compress and built-in pretty print +* Support merge multi ALTER query into one SQL +* Support self-config rewrite rules from SQL Rewrite +* Suggestions were written in Chinese. But SOAR also gives many tools, which can be used without understanding Chinese. + +## QuickStart + +* [Install](http://github.com/XiaoMi/soar/blob/master/doc/install_en.md) +* [CheatSheet](http://github.com/XiaoMi/soar/blob/master/doc/cheatsheet_en.md) +* [Related works](http://github.com/XiaoMi/soar/blob/master/doc/comparison_en.md) + +## Communication + +* GitHub issues: bug reports, usage issues, feature requests +* [Gitter](https://gitter.im/xiaomi-dba/soar) +* IM QQ Group: 779359816 + +## License + +[Apache License 2.0](http://github.com/XiaoMi/soar/blob/master/LICENSE). diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..a3df0a69 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/advisor/doc.go b/advisor/doc.go new file mode 100644 index 00000000..e435db2b --- /dev/null +++ b/advisor/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package advisor contain heuristic rules, index rules and explain translator. +package advisor diff --git a/advisor/explainer.go b/advisor/explainer.go new file mode 100644 index 00000000..62c01b79 --- /dev/null +++ b/advisor/explainer.go @@ -0,0 +1,285 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "fmt" + "strings" + + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/database" +) + +var explainRuleID int + +// [EXP.XXX]Rule +var explainRules map[string]Rule + +// [table_name]"suggest text" +var tablesSuggests map[string][]string + +/* +var explainIgnoreTables = []string{ + "dual", + "", +} +*/ + +// explain建议的形式 +// Item: EXP.XXX +// Severity: L[0-8] +// Summary: full table scan, not use index, full index scan... +// Content: XX TABLE xxx + +// +func checkExplainSelectType(exp *database.ExplainInfo) { + // 判断是否跳过不检查 + if len(common.Config.ExplainWarnSelectType) == 1 { + if common.Config.ExplainWarnSelectType[0] == "" { + return + } + } else if len(common.Config.ExplainWarnSelectType) < 1 { + return + } + + if exp.ExplainFormat == database.JSONFormatExplain { + // TODO + // JSON形式遍历分析不方便,转成Row格式也没有SelectType暂不处理 + return + } + for _, v := range common.Config.ExplainWarnSelectType { + for _, row := range exp.ExplainRows { + if row.SelectType == v && v != "" { + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("SelectType:%s", row.SelectType)) + } + } + } +} + +// 用户可以设置AccessType的建议级别,匹配到的查询会给出建议 +func checkExplainAccessType(exp *database.ExplainInfo) { + // 判断是否跳过不检查 + if len(common.Config.ExplainWarnAccessType) == 1 { + if common.Config.ExplainWarnAccessType[0] == "" { + return + } + } else if len(common.Config.ExplainWarnAccessType) < 1 { + return + } + + rows := exp.ExplainRows + if exp.ExplainFormat == database.JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = database.ConvertExplainJSON2Row(exp.ExplainJSON) + } + for _, v := range common.Config.ExplainWarnAccessType { + for _, row := range rows { + if row.AccessType == v && v != "" { + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("Scalability:%s", row.Scalability)) + } + } + } +} + +// TODO: +/* +func checkExplainPossibleKeys(exp *database.ExplainInfo) { + // 判断是否跳过不检查 + if common.Config.ExplainMinPossibleKeys == 0 { + return + } + + rows := exp.ExplainRows + if exp.ExplainFormat == database.JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = database.ConvertExplainJSON2Row(exp.ExplainJSON) + } + for _, row := range rows { + if len(row.PossibleKeys) < common.Config.ExplainMinPossibleKeys { + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("PossibleKeys:%d < %d", + len(row.PossibleKeys), common.Config.ExplainMinPossibleKeys)) + } + } +} +*/ + +// TODO: +/* +func checkExplainKeyLen(exp *database.ExplainInfo) { +} +*/ + +// TODO: +/* +func checkExplainKey(exp *database.ExplainInfo) { + // 小于最小使用试用key数量 + //return intval($explainResult) < intval($userCond); + //explain-min-keys int +} +*/ + +func checkExplainRef(exp *database.ExplainInfo) { + rows := exp.ExplainRows + if exp.ExplainFormat == database.JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = database.ConvertExplainJSON2Row(exp.ExplainJSON) + } + for i, row := range rows { + if strings.Join(row.Ref, "") == "NULL" || strings.Join(row.Ref, "") == "" { + if i == 0 && len(rows) > 1 { + continue + } + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("Ref:null")) + } + } +} + +func checkExplainRows(exp *database.ExplainInfo) { + // 判断是否跳过不检查 + if common.Config.ExplainMaxRows <= 0 { + return + } + + rows := exp.ExplainRows + if exp.ExplainFormat == database.JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = database.ConvertExplainJSON2Row(exp.ExplainJSON) + } + + for _, row := range rows { + if row.Rows >= common.Config.ExplainMaxRows { + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("Rows:%d", row.Rows)) + } + } +} + +// TODO: +/* +func checkExplainExtra(exp *database.ExplainInfo) { + // 包含用户配置的逗号分隔关键词之一则提醒 + // return self::contains($explainResult, $userCond); + // explain-warn-extra []string +} +*/ + +func checkExplainFiltered(exp *database.ExplainInfo) { + // 判断是否跳过不检查 + if common.Config.ExplainMaxFiltered <= 0.001 { + return + } + + rows := exp.ExplainRows + if exp.ExplainFormat == database.JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = database.ConvertExplainJSON2Row(exp.ExplainJSON) + } + for i, row := range rows { + if i == 0 && len(rows) > 1 { + continue + } + if row.Filtered >= common.Config.ExplainMaxFiltered { + tablesSuggests[row.TableName] = append(tablesSuggests[row.TableName], fmt.Sprintf("Filtered:%.2f%s", row.Filtered, "%")) + } + } +} + +// ExplainAdvisor 基于explain信息给出建议 +func ExplainAdvisor(exp *database.ExplainInfo) map[string]Rule { + common.Log.Debug("ExplainAdvisor SQL: %v", exp.SQL) + explainRuleID = 0 + explainRules = make(map[string]Rule) + tablesSuggests = make(map[string][]string) + + checkExplainSelectType(exp) + checkExplainAccessType(exp) + checkExplainFiltered(exp) + checkExplainRef(exp) + checkExplainRows(exp) + + // 打印explain table + content := database.PrintMarkdownExplainTable(exp) + + if common.Config.ShowWarnings { + content += "\n" + database.MySQLExplainWarnings(exp) + } + + // 对explain table中各项难于理解的值做解释 + cases := database.ExplainInfoTranslator(exp) + + // 添加last_query_cost + if common.Config.ShowLastQueryCost { + content += "\n" + database.MySQLExplainQueryCost(exp) + } + + if content != "" { + explainRules["EXP.000"] = Rule{ + Item: "EXP.000", + Severity: "L0", + Summary: "Explain信息", + Content: content, + Case: cases, + Func: (*Query4Audit).RuleOK, + } + } + /* + for t, s := range tablesSuggests { + // 检查explain对应的表是否需要跳过,如dual,空表等 + ig := false + for _, ti := range explainIgnoreTables { + if ti == t { + ig = true + } + } + if ig { + continue + } + ruleId := fmt.Sprintf("EXP.%03d", explainRuleId+1) + explainRuleId = explainRuleId + 1 + explainRules[ruleId] = Rule{ + Item: ruleId, + Severity: "L0", + Summary: fmt.Sprintf("表 `%s` 查询效率不高", t), + Content: fmt.Sprint("原因:", strings.Join(s, ",")), + Case: "", + Func: (*Query4Audit).RuleOK, + } + } + */ + return explainRules +} + +// DigestExplainText 分析用户输入的EXPLAIN信息 +func DigestExplainText(text string) { + // explain信息就不要显示完美了,美不美自己看吧。 + common.Config.IgnoreRules = append(common.Config.IgnoreRules, "OK") + + if !IsIgnoreRule("EXP.") { + explainInfo, err := database.ParseExplainText(text) + if err != nil { + common.Log.Error("main ParseExplainText Error: %v", err) + return + } + expSuggest := ExplainAdvisor(explainInfo) + _, output := FormatSuggest("", common.Config.ReportType, expSuggest) + if common.Config.ReportType == "html" { + fmt.Println(common.MarkdownHTMLHeader()) + fmt.Println(common.Markdown2HTML(output)) + } else { + fmt.Println(output) + } + } +} diff --git a/advisor/explainer_test.go b/advisor/explainer_test.go new file mode 100644 index 00000000..0fc8a86a --- /dev/null +++ b/advisor/explainer_test.go @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "testing" + + "github.com/XiaoMi/soar/common" +) + +func TestDigestExplainText(t *testing.T) { + var text = `+----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | ++----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| 1 | SIMPLE | country | index | PRIMARY,country_id | country | 152 | NULL | 109 | Using index | +| 1 | SIMPLE | city | ref | idx_fk_country_id,idx_country_id_city,idx_all,idx_other | idx_fk_country_id | 2 | sakila.country.country_id | 2 | Using index | ++----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+` + common.Config.ReportType = "explain-digest" + err := common.GoldenDiff(func() { DigestExplainText(text) }, t.Name(), update) + if nil != err { + t.Fatal(err) + } +} diff --git a/advisor/heuristic.go b/advisor/heuristic.go new file mode 100644 index 00000000..b95cfe83 --- /dev/null +++ b/advisor/heuristic.go @@ -0,0 +1,3252 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/database" + + "github.com/gedex/inflector" + "github.com/percona/go-mysql/query" + tidb "github.com/pingcap/tidb/ast" + "github.com/pingcap/tidb/mysql" + "github.com/pingcap/tidb/types" + "vitess.io/vitess/go/vt/sqlparser" +) + +// RuleOK OK +func (q *Query4Audit) RuleOK() Rule { + return HeuristicRules["OK"] +} + +// RuleImplicitAlias ALI.001 +func (q *Query4Audit) RuleImplicitAlias() Rule { + var rule = q.RuleOK() + tkns := ast.Tokenizer(q.Query) + if tkns[0].Type != sqlparser.SELECT { + return rule + } + for i, tkn := range tkns { + if tkn.Type == sqlparser.ID && i+1 < len(tkns) && tkn.Type == tkns[i+1].Type { + rule = HeuristicRules["ALI.001"] + break + } + } + return rule +} + +// RuleStarAlias ALI.002 +func (q *Query4Audit) RuleStarAlias() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(?i)(\*\s+as\b)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["ALI.002"] + } + return rule +} + +// RuleSameAlias ALI.003 +func (q *Query4Audit) RuleSameAlias() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.AliasedExpr: + switch n := expr.Expr.(type) { + case *sqlparser.ColName: + if n.Name.String() == expr.As.String() { + rule = HeuristicRules["ALI.003"] + return false, nil + } + } + case *sqlparser.AliasedTableExpr: + switch n := expr.Expr.(type) { + case sqlparser.TableName: + if n.Name.String() == expr.As.String() { + rule = HeuristicRules["ALI.003"] + return false, nil + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RulePrefixLike ARG.001 +func (q *Query4Audit) RulePrefixLike() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.ComparisonExpr: + if expr.Operator == "like" { + switch sqlval := expr.Right.(type) { + case *sqlparser.SQLVal: + // prefix like with '%', '_' + if sqlval.Type == 0 && (sqlval.Val[0] == 0x25 || sqlval.Val[0] == 0x5f) { + rule = HeuristicRules["ARG.001"] + return false, nil + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleEqualLike ARG.002 +func (q *Query4Audit) RuleEqualLike() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.ComparisonExpr: + if expr.Operator == "like" { + switch sqlval := expr.Right.(type) { + case *sqlparser.SQLVal: + // not start with '%', '_' && not end with '%', '_' + if sqlval.Type == 0 { + if sqlval.Val[0] != 0x25 && + sqlval.Val[0] != 0x5f && + sqlval.Val[len(sqlval.Val)-1] != 0x5f && + sqlval.Val[len(sqlval.Val)-1] != 0x25 { + rule = HeuristicRules["ARG.002"] + return false, nil + } + } else { + rule = HeuristicRules["ARG.002"] + return false, nil + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleImplicitConversion ARG.003 +// 隐式类型转换检查:该项检查一定是在开启测试环境或线上环境情境下下进行的 +func (idxAdv *IndexAdvisor) RuleImplicitConversion() Rule { + /* + * 两个参数至少有一个是 NULL 时,比较的结果也是 NULL,例外是使用 <=> 对两个 NULL 做比较时会返回 1,这两种情况都不需要做类型转换 + * 两个参数都是字符串,会按照字符串来比较,不做类型转换 + * 两个参数都是整数,按照整数来比较,不做类型转换 + * 十六进制的值和非数字做比较时,会被当做二进制串 + * 有一个参数是 TIMESTAMP 或 DATETIME,并且另外一个参数是常量,常量会被转换为 timestamp + * 有一个参数是 decimal 类型,如果另外一个参数是 decimal 或者整数,会将整数转换为 decimal 后进行比较,如果另外一个参数是浮点数,则会把 decimal 转换为浮点数进行比较 + * 所有其他情况下,两个参数都会被转换为浮点数再进行比较 + */ + rule := HeuristicRules["OK"] + // 未开启测试环境不进行检查 + if common.Config.TestDSN.Disable { + return rule + } + + var content string + conditions := ast.FindAllCondition(idxAdv.Ast) + for _, cond := range conditions { + var colList []*common.Column + var values []*sqlparser.SQLVal + + // condition 左右两侧有且只有如下几种可能: + // 1. 左列 & 右列 + // 2. 左列 & 右值(含函数) (或相反) + // 3. 左值(含函数) & 右值(含函数) (无需关注) + switch node := cond.(type) { + case *sqlparser.ComparisonExpr: + // 获取condition左侧的信息 + switch nLeft := node.Left.(type) { + case *sqlparser.SQLVal, *sqlparser.ValTuple: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch val := node.(type) { + case *sqlparser.SQLVal: + values = append(values, val) + } + return true, nil + }, nLeft) + common.LogIfError(err, "") + + case *sqlparser.ColName: + left := &common.Column{Name: nLeft.Name.String()} + if !nLeft.Qualifier.Name.IsEmpty() { + left.Table = nLeft.Qualifier.Name.String() + } + colList = append(colList, left) + } + + // 获取condition右侧的信息 + switch nRight := node.Right.(type) { + case *sqlparser.SQLVal, *sqlparser.ValTuple: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch val := node.(type) { + case *sqlparser.SQLVal: + values = append(values, val) + } + return true, nil + }, nRight) + common.LogIfError(err, "") + + case *sqlparser.ColName: + right := &common.Column{Name: nRight.Name.String()} + if !nRight.Qualifier.Name.IsEmpty() { + right.Table = nRight.Qualifier.Name.String() + } + colList = append(colList, right) + } + + if len(colList) == 0 { + continue + } + + // 补全列信息 + colList = CompleteColumnsInfo(idxAdv.Ast, colList, idxAdv.vEnv) + + // 列与列比较 + if len(colList) == 2 { + // 列信息补全后如果依然没有表信息,说明在该数据库中不存在该列 + // 如果列信息获取异常,可能会存在无法获取到数据类型的情况,对于这种情况将不会给予建议。 + needBreak := false + for _, col := range colList { + if col.Table == "" { + common.Log.Warning("Column %s not exists", col.Name) + needBreak = true + } + + if col.DataType == "" { + common.Log.Warning("Can't get column %s data type", col.Name) + needBreak = true + } + + } + + if needBreak { + break + } + + // 检查数据类型不一致导致的隐式数据转换 + type1 := common.GetDataTypeBase(colList[0].DataType) + type2 := common.GetDataTypeBase(colList[1].DataType) + common.Log.Debug("DataType: `%s`.`%s` (%s) VS `%s`.`%s` (%s)", + colList[0].Table, colList[0].Name, type1, + colList[1].Table, colList[1].Name, type2) + if strings.ToLower(type1) != strings.ToLower(type2) { + content += fmt.Sprintf("`%s`.`%s` (%s) VS `%s`.`%s` (%s) datatype not match", + colList[0].Table, colList[0].Name, type1, + colList[1].Table, colList[1].Name, type2) + continue + } + + // 检查字符集不一致导致的隐式数据转换 + common.Log.Debug("Charset: `%s`.`%s` (%s) VS `%s`.`%s` (%s)", + colList[0].Table, colList[0].Name, colList[0].Character, + colList[1].Table, colList[1].Name, colList[1].Character) + if colList[0].Character != colList[1].Character { + content += fmt.Sprintf("`%s`.`%s` (%s) VS `%s`.`%s` (%s) charset not match", + colList[0].Table, colList[0].Name, colList[0].Character, + colList[1].Table, colList[1].Name, colList[1].Character) + continue + } + + // 检查排序排序不一致导致的隐式数据转换 + common.Log.Debug("Collation: `%s`.`%s` (%s) VS `%s`.`%s` (%s)", + colList[0].Table, colList[0].Name, colList[0].Collation, + colList[1].Table, colList[1].Name, colList[1].Collation) + if colList[0].Collation != colList[1].Collation { + content += fmt.Sprintf("`%s`.`%s` (%s) VS `%s`.`%s` (%s) collation not match", + colList[0].Table, colList[0].Name, colList[0].Collation, + colList[1].Table, colList[1].Name, colList[1].Collation) + continue + } + } + + typMap := map[sqlparser.ValType][]string{ + // date, time, datetime, timestamp, year + sqlparser.StrVal: { + "char", "varchar", "tinytext", "text", "mediumtext", "longtext", + "date", "time", "datetime", "timestamp", "year", + }, + sqlparser.IntVal: { + "tinyint", "smallint", "mediumint", "int", "integer", "bigint", "timestamp", "year", + }, + sqlparser.FloatVal: { + "float", "double", "real", "decimal", + }, + } + + typNameMap := map[sqlparser.ValType]string{ + sqlparser.StrVal: "string", + sqlparser.IntVal: "int", + sqlparser.FloatVal: "float", + } + + // 列与值比较 + for _, val := range values { + if colList[0].DataType == "" { + common.Log.Debug("Can't get %s datatype", colList[0].Name) + break + } + + isCovered := true + if types, ok := typMap[val.Type]; ok { + for _, t := range types { + if strings.HasPrefix(colList[0].DataType, t) { + isCovered = false + } + } + } + + if isCovered { + if colList[0].Table == "" { + common.Log.Warning("Column %s not exists", colList[0].Name) + continue + } + + c := fmt.Sprintf("%s.%s definition is %s not %s", + colList[0].Table, colList[0].Name, colList[0].DataType, typNameMap[val.Type]) + + common.Log.Debug("Implicit data type conversion: %s", c) + content += c + } + } + + case *sqlparser.RangeCond: + // TODO + case *sqlparser.IsExpr: + // TODO + } + } + if content != "" { + rule = HeuristicRules["ARG.003"] + rule.Content = content + } + return rule +} + +// RuleNoWhere CLA.001 & CLA.014 & CLA.015 +func (q *Query4Audit) RuleNoWhere() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Select: + if n.Where == nil && sqlparser.String(n.From) != "dual" { + rule = HeuristicRules["CLA.001"] + return false, nil + } + case *sqlparser.Delete: + if n.Where == nil { + rule = HeuristicRules["CLA.014"] + return false, nil + } + case *sqlparser.Update: + if n.Where == nil { + rule = HeuristicRules["CLA.015"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleOrderByRand CLA.002 +func (q *Query4Audit) RuleOrderByRand() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.OrderBy: + for _, order := range n { + switch expr := order.Expr.(type) { + case *sqlparser.FuncExpr: + if expr.Name.String() == "rand" { + rule = HeuristicRules["CLA.002"] + return false, nil + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleOffsetLimit CLA.003 +func (q *Query4Audit) RuleOffsetLimit() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Limit: + if n != nil && n.Offset != nil { + switch v := n.Offset.(type) { + case *sqlparser.SQLVal: + offset, err := strconv.Atoi(string(v.Val)) + // 检查一下Offset阈值,太小了给这个建议也没什么用,阈值写死了没加配置 + if err == nil && offset > 1000 { + rule = HeuristicRules["CLA.003"] + return false, nil + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleGroupByConst CLA.004 +func (q *Query4Audit) RuleGroupByConst() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.GroupBy: + for _, group := range n { + switch group.(type) { + case *sqlparser.SQLVal: + rule = HeuristicRules["CLA.004"] + return false, nil + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleGroupByConst GRP.001 +func (idxAdv *IndexAdvisor) RuleGroupByConst() Rule { + rule := HeuristicRules["OK"] + + // 非GroupBy语句 + if len(idxAdv.groupBy) == 0 || len(idxAdv.whereEQ) == 0 { + return rule + } + + for _, groupByCols := range idxAdv.groupBy { + for _, whereEQCols := range idxAdv.whereEQ { + if (groupByCols.Name == whereEQCols.Name) && + (groupByCols.DB == whereEQCols.DB) && + (groupByCols.Table == whereEQCols.Table) { + rule = HeuristicRules["GRP.001"] + break + } + } + } + return rule +} + +// RuleOrderByConst CLA.005 +func (q *Query4Audit) RuleOrderByConst() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.OrderBy: + for _, order := range n { + switch order.Expr.(type) { + case *sqlparser.SQLVal: + rule = HeuristicRules["CLA.005"] + return false, nil + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleOrderByConst CLA.005 +// TODO: SELECT col FROM tbl WHERE col IN('NEWS') ORDER BY col; +func (idxAdv *IndexAdvisor) RuleOrderByConst() Rule { + rule := HeuristicRules["OK"] + + // 非GroupBy语句 + if len(idxAdv.orderBy) == 0 || len(idxAdv.whereEQ) == 0 { + return rule + } + + for _, groupbyCols := range idxAdv.orderBy { + for _, whereEQCols := range idxAdv.whereEQ { + if (groupbyCols.Name == whereEQCols.Name) && + (groupbyCols.DB == whereEQCols.DB) && + (groupbyCols.Table == whereEQCols.Table) { + rule = HeuristicRules["CLA.005"] + break + } + } + } + return rule +} + +// RuleDiffGroupByOrderBy CLA.006 +func (q *Query4Audit) RuleDiffGroupByOrderBy() Rule { + var rule = q.RuleOK() + var groupbyTbls []sqlparser.TableIdent + var orderbyTbls []sqlparser.TableIdent + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.GroupBy: + // 检查group by涉及到表的个数 + for _, group := range n { + switch g := group.(type) { + case *sqlparser.ColName: + tblExist := false + for _, t := range groupbyTbls { + if t.String() == g.Qualifier.Name.String() { + tblExist = true + } + } + if !tblExist { + groupbyTbls = append(groupbyTbls, g.Qualifier.Name) + if len(groupbyTbls) > 1 { + rule = HeuristicRules["CLA.006"] + + return false, nil + } + } + } + } + case sqlparser.OrderBy: + // 检查order by涉及到表的个数 + for _, order := range n { + switch o := order.Expr.(type) { + case *sqlparser.ColName: + tblExist := false + for _, t := range orderbyTbls { + if t.String() == o.Qualifier.Name.String() { + tblExist = true + } + } + if !tblExist { + orderbyTbls = append(orderbyTbls, o.Qualifier.Name) + if len(orderbyTbls) > 1 { + rule = HeuristicRules["CLA.006"] + + return false, nil + } + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + + if rule.Item == "OK" { + // 检查group by, order by涉及到表的个数 + for _, g := range groupbyTbls { + tblExist := false + for _, o := range orderbyTbls { + if g.String() == o.String() { + tblExist = true + } + } + if !tblExist && len(orderbyTbls) > 0 { + rule = HeuristicRules["CLA.006"] + + return rule + } + } + } + + return rule +} + +// RuleMixOrderBy CLA.007 +func (q *Query4Audit) RuleMixOrderBy() Rule { + var rule = q.RuleOK() + var direction string + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.OrderBy: + for _, order := range n { + // 比较相邻两个order by列的方向 + if direction != "" && order.Direction != direction { + rule = HeuristicRules["CLA.007"] + + return false, nil + } + direction = order.Direction + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleExplicitOrderBy CLA.008 +func (q *Query4Audit) RuleExplicitOrderBy() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Select: + // 有group by,但没有order by + if n.GroupBy != nil && n.OrderBy == nil { + rule = HeuristicRules["CLA.008"] + + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleOrderByExpr CLA.009 +func (q *Query4Audit) RuleOrderByExpr() Rule { + var rule = q.RuleOK() + var orderByCols []string + var selectCols []string + funcExp := regexp.MustCompile(`[a-z0-9]\(`) + allowExp := regexp.MustCompile("[a-z0-9_,.` ()]") + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.OrderBy: + orderBy := sqlparser.String(n) + // 函数名方式,如:from_unixtime(col) + if funcExp.MatchString(orderBy) { + rule = HeuristicRules["CLA.009"] + + return false, nil + } + + // 运算符方式,如:colA - colB + trim := allowExp.ReplaceAllFunc([]byte(orderBy), func(s []byte) []byte { + return []byte("") + }) + if string(trim) != "" { + rule = HeuristicRules["CLA.009"] + + return false, nil + } + + for _, o := range strings.Split(strings.TrimPrefix(orderBy, " order by "), ",") { + orderByCols = append(orderByCols, strings.TrimSpace(strings.Split(o, " ")[0])) + } + case *sqlparser.Select: + for _, s := range n.SelectExprs { + selectCols = append(selectCols, sqlparser.String(s)) + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + + // AS情况,如:SELECT colA-colB a FROM tbl ORDER BY a; + for _, o := range orderByCols { + if o == "" { + continue + } + for _, s := range selectCols { + if strings.HasSuffix(s, " as "+o) { + buf := strings.TrimSuffix(s, " as "+o) + // 运算符 + trim := allowExp.ReplaceAllFunc([]byte(buf), func(s []byte) []byte { + return []byte("") + }) + if string(trim) != "" { + rule = HeuristicRules["CLA.009"] + + } + // 函数 + if funcExp.MatchString(s) { + rule = HeuristicRules["CLA.009"] + + } + } + } + } + return rule +} + +// RuleGroupByExpr CLA.010 +func (q *Query4Audit) RuleGroupByExpr() Rule { + var rule = q.RuleOK() + var groupByCols []string + var selectCols []string + funcExp := regexp.MustCompile(`(?i)[a-z0-9]\(`) + allowExp := regexp.MustCompile("(?i)[a-z0-9_,.` ()]") + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.GroupBy: + groupBy := sqlparser.String(n) + // 函数名方式,如:from_unixtime(col) + if funcExp.MatchString(groupBy) { + rule = HeuristicRules["CLA.010"] + + return false, nil + } + + // 运算符方式,如:colA - colB + trim := allowExp.ReplaceAllFunc([]byte(groupBy), func(s []byte) []byte { + return []byte("") + }) + if string(trim) != "" { + rule = HeuristicRules["CLA.010"] + + return false, nil + } + + for _, o := range strings.Split(strings.TrimPrefix(groupBy, " group by "), ",") { + groupByCols = append(groupByCols, strings.TrimSpace(strings.Split(o, " ")[0])) + } + case *sqlparser.Select: + for _, s := range n.SelectExprs { + selectCols = append(selectCols, sqlparser.String(s)) + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + + // AS情况,如:SELECT colA-colB a FROM tbl GROUP BY a; + for _, g := range groupByCols { + if g == "" { + continue + } + for _, s := range selectCols { + if strings.HasSuffix(s, " as "+g) { + buf := strings.TrimSuffix(s, " as "+g) + // 运算符 + trim := allowExp.ReplaceAllFunc([]byte(buf), func(s []byte) []byte { + return []byte("") + }) + if string(trim) != "" { + rule = HeuristicRules["CLA.010"] + + } + // 函数 + if funcExp.MatchString(s) { + rule = HeuristicRules["CLA.010"] + + } + } + } + } + return rule +} + +// RuleTblCommentCheck CLA.011 +func (q *Query4Audit) RuleTblCommentCheck() Rule { + var rule = q.RuleOK() + switch node := q.Stmt.(type) { + case *sqlparser.DDL: + if node.Action != "create" { + return rule + } + if node.TableSpec == nil { + return rule + } + if options := node.TableSpec.Options; options == "" { + rule = HeuristicRules["CLA.011"] + + } else { + reg := regexp.MustCompile("(?i)comment") + if !reg.MatchString(options) { + rule = HeuristicRules["CLA.011"] + } + } + } + return rule +} + +// RuleSelectStar COL.001 +func (q *Query4Audit) RuleSelectStar() Rule { + var rule = q.RuleOK() + // 先把count(*)替换为count(1) + re := regexp.MustCompile(`(?i)count\s*\(\s*\*\s*\)`) + sql := re.ReplaceAllString(q.Query, "count(1)") + stmt, err := sqlparser.Parse(sql) + if err != nil { + common.Log.Debug("RuleSelectStar sqlparser.Parse Error: %v", err) + return rule + } + err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.StarExpr: + rule = HeuristicRules["COL.001"] + return false, nil + } + return true, nil + }, stmt) + common.LogIfError(err, "") + return rule +} + +// RuleInsertColDef COL.002 +func (q *Query4Audit) RuleInsertColDef() Rule { + var rule = q.RuleOK() + switch node := q.Stmt.(type) { + case *sqlparser.Insert: + if node.Columns == nil { + rule = HeuristicRules["COL.002"] + return rule + } + } + return rule +} + +// RuleAddDefaultValue COL.004 +func (q *Query4Audit) RuleAddDefaultValue() Rule { + var rule = q.RuleOK() + for _, node := range q.TiStmt { + switch n := node.(type) { + case *tidb.CreateTableStmt: + for _, c := range n.Cols { + colDefault := false + for _, o := range c.Options { + // 忽略AutoIncrement类型的默认值检查 + if o.Tp == tidb.ColumnOptionDefaultValue || o.Tp == tidb.ColumnOptionAutoIncrement { + colDefault = true + } + } + if !colDefault { + rule = HeuristicRules["COL.004"] + break + } + } + case *tidb.AlterTableStmt: + for _, s := range n.Specs { + switch s.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, tidb.AlterTableModifyColumn: + for _, c := range s.NewColumns { + colDefault := false + for _, o := range c.Options { + // 忽略AutoIncrement类型的默认值检查 + if o.Tp == tidb.ColumnOptionDefaultValue || o.Tp == tidb.ColumnOptionAutoIncrement { + colDefault = true + } + } + if !colDefault { + rule = HeuristicRules["COL.004"] + break + } + } + } + } + } + } + return rule +} + +// RuleColCommentCheck COL.005 +func (q *Query4Audit) RuleColCommentCheck() Rule { + var rule = q.RuleOK() + for _, node := range q.TiStmt { + switch n := node.(type) { + case *tidb.CreateTableStmt: + for _, c := range n.Cols { + colComment := false + for _, o := range c.Options { + if o.Tp == tidb.ColumnOptionComment { + colComment = true + } + } + if !colComment { + rule = HeuristicRules["COL.005"] + break + } + } + case *tidb.AlterTableStmt: + for _, s := range n.Specs { + switch s.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, tidb.AlterTableModifyColumn: + for _, c := range s.NewColumns { + colComment := false + for _, o := range c.Options { + if o.Tp == tidb.ColumnOptionComment { + colComment = true + } + } + if !colComment { + rule = HeuristicRules["COL.005"] + break + } + } + } + } + } + } + return rule +} + +// RuleIPString LIT.001 +func (q *Query4Audit) RuleIPString() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`['"]\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["LIT.001"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleDataNotQuote LIT.002 +func (q *Query4Audit) RuleDataNotQuote() Rule { + var rule = q.RuleOK() + // 2010-01-01 + re := regexp.MustCompile(`.\d{4}\s*-\s*\d{1,2}\s*-\s*\d{1,2}\b`) + sqls := re.FindAllString(q.Query, -1) + for _, sql := range sqls { + re = regexp.MustCompile(`^['"\w-].*`) + if re.FindString(sql) == "" { + rule = HeuristicRules["LIT.002"] + } + } + + // 10-01-01 + re = regexp.MustCompile(`.\d{2}\s*-\s*\d{1,2}\s*-\s*\d{1,2}\b`) + sqls = re.FindAllString(q.Query, -1) + for _, sql := range sqls { + re = regexp.MustCompile(`^['"\w-].*`) + if re.FindString(sql) == "" { + rule = HeuristicRules["LIT.002"] + } + } + + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + return rule +} + +// RuleSQLCalcFoundRows KWR.001 +func (q *Query4Audit) RuleSQLCalcFoundRows() Rule { + var rule = q.RuleOK() + tkns := ast.Tokenizer(q.Query) + for _, tkn := range tkns { + if tkn.Val == "sql_calc_found_rows" { + rule = HeuristicRules["KWR.001"] + break + } + } + return rule +} + +// RuleCommaAnsiJoin JOI.001 +func (q *Query4Audit) RuleCommaAnsiJoin() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Select: + ansiJoin := false + commaJoin := false + for _, f := range n.From { + switch f.(type) { + case *sqlparser.JoinTableExpr: + ansiJoin = true + case *sqlparser.AliasedTableExpr: + commaJoin = true + } + } + if ansiJoin && commaJoin { + rule = HeuristicRules["JOI.001"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleDupJoin JOI.002 +func (q *Query4Audit) RuleDupJoin() Rule { + var rule = q.RuleOK() + var tables []string + switch q.Stmt.(type) { + // TODO: 这里未检查UNION SELECT + case *sqlparser.Union: + return rule + default: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.AliasedTableExpr: + switch table := n.Expr.(type) { + case sqlparser.TableName: + for _, t := range tables { + if t == table.Name.String() { + rule = HeuristicRules["JOI.002"] + return false, nil + } + } + tables = append(tables, table.Name.String()) + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + } + return rule +} + +// RuleImpossibleOuterJoin JOI.003 +// TODO: 未实现完 +func (idxAdv *IndexAdvisor) RuleImpossibleOuterJoin() Rule { + rule := HeuristicRules["OK"] + + var joinTables []string // JOIN相关表名 + var whereEQTables []string // WHERE等值判断条件表名 + var joinNotWhereTables []string // 是JOIN相关表,但未出现在WHERE等值判断条件中的表名 + + // 非JOIN语句 + if len(idxAdv.joinCond) == 0 || len(idxAdv.whereEQ) == 0 { + return rule + } + + for _, l1 := range idxAdv.joinCond { + for _, l2 := range l1 { + if l2.Table != "" && l2.Table != "dual" { + joinTables = append(joinTables, l2.Table) + } + } + } + + for _, w := range idxAdv.whereEQ { + whereEQTables = append(whereEQTables, w.Table) + } + + for _, j := range joinTables { + found := false + for _, w := range whereEQTables { + if j == w { + found = true + } + } + if !found { + joinNotWhereTables = append(joinNotWhereTables, j) + } + } + + // TODO: + fmt.Println(joinNotWhereTables) + /* + if len(joinNotWhereTables) == 0 { + rule = HeuristicRules["JOI.003"] + } + */ + rule = HeuristicRules["JOI.003"] + return rule +} + +// TODO: JOI.004 + +// RuleNoDeterministicGroupby RES.001 +func (q *Query4Audit) RuleNoDeterministicGroupby() Rule { + var rule = q.RuleOK() + var groupbyCols []*common.Column + var selectCols []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Select: + // 过滤select列 + selectCols = ast.FindColumn(n.SelectExprs) + // 过滤group by列 + groupbyCols = ast.FindColumn(n.GroupBy) + // `select *`, but not `select count(*)` + if strings.Contains(sqlparser.String(n), " * ") && len(groupbyCols) > 0 { + rule = HeuristicRules["RES.001"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + + // TODO:暂时只检查了列名,未对库表名进行检查,也未处理AS + for _, s := range selectCols { + // 无group by退出 + if len(groupbyCols) == 0 { + break + } + found := false + for _, g := range groupbyCols { + if g.Name == s.Name { + found = true + } + } + if !found { + rule = HeuristicRules["RES.001"] + break + } + } + return rule +} + +// RuleNoDeterministicLimit RES.002 +func (q *Query4Audit) RuleNoDeterministicLimit() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.Select: + if n.Limit != nil && n.OrderBy == nil { + rule = HeuristicRules["RES.002"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleUpdateDeleteWithLimit RES.003 +func (q *Query4Audit) RuleUpdateDeleteWithLimit() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.Update: + if s.Limit != nil { + rule = HeuristicRules["RES.003"] + } + } + return rule +} + +// RuleUpdateDeleteWithOrderby RES.004 +func (q *Query4Audit) RuleUpdateDeleteWithOrderby() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.Update: + if s.OrderBy != nil { + rule = HeuristicRules["RES.004"] + } + } + return rule +} + +// RuleUpdateSetAnd RES.005 +func (q *Query4Audit) RuleUpdateSetAnd() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.Update: + if strings.Contains(sqlparser.String(s.Exprs), " and ") { + rule = HeuristicRules["RES.005"] + } + } + return rule +} + +// RuleImpossibleWhere RES.006 +func (q *Query4Audit) RuleImpossibleWhere() Rule { + var rule = q.RuleOK() + // BETWEEN 10 AND 5 + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.RangeCond: + if n.Operator == "between" { + from := 0 + to := 0 + switch s := n.From.(type) { + case *sqlparser.SQLVal: + from, _ = strconv.Atoi(string(s.Val)) + } + switch s := n.To.(type) { + case *sqlparser.SQLVal: + to, _ = strconv.Atoi(string(s.Val)) + } + if from > to { + rule = HeuristicRules["RES.006"] + return false, nil + } + } + case *sqlparser.ComparisonExpr: + factor := false + switch n.Operator { + case "!=", "<>": + case "=", "<=>": + factor = true + default: + return true, nil + } + + var left []byte + var right []byte + + // left + switch l := n.Left.(type) { + case *sqlparser.SQLVal: + left = l.Val + default: + return true, nil + } + + // right + switch r := n.Right.(type) { + case *sqlparser.SQLVal: + right = r.Val + default: + return true, nil + } + + // compare + if (!bytes.Equal(left, right) && factor) || (bytes.Equal(left, right) && !factor) { + rule = HeuristicRules["RES.006"] + } + return false, nil + } + + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleMeaninglessWhere RES.007 +func (q *Query4Audit) RuleMeaninglessWhere() Rule { + var rule = q.RuleOK() + // 1=1, 0=0 + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.ComparisonExpr: + factor := false + switch n.Operator { + case "!=", "<>": + factor = true + case "=", "<=>": + default: + return true, nil + } + + var left []byte + var right []byte + + // left + switch l := n.Left.(type) { + case *sqlparser.SQLVal: + left = l.Val + default: + return true, nil + } + + // right + switch r := n.Right.(type) { + case *sqlparser.SQLVal: + right = r.Val + default: + return true, nil + } + + // compare + if (bytes.Equal(left, right) && !factor) || (!bytes.Equal(left, right) && factor) { + rule = HeuristicRules["RES.007"] + } + return false, nil + } + + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleLoadFile RES.008 +func (q *Query4Audit) RuleLoadFile() Rule { + var rule = q.RuleOK() + // 去除注释 + sql := string(database.RemoveSQLComments([]byte(q.Query))) + // 去除多余的空格和回车 + sql = strings.Join(strings.Fields(sql), " ") + tks := ast.Tokenize(sql) + for i, tk := range tks { + // 注意:每个关键字token的结尾是带空格的,这里偷懒没trimspace直接加空格比较 + // LOAD DATA... + if strings.ToLower(tk.Val) == "load " && i+1 < len(tks) && + strings.ToLower(tks[i+1].Val) == "data " { + rule = HeuristicRules["RES.008"] + break + } + + // SELECT ... INTO OUTFILE + if strings.ToLower(tk.Val) == "into " && i+1 < len(tks) && + (strings.ToLower(tks[i+1].Val) == "outfile " || strings.ToLower(tks[i+1].Val) == "dumpfile ") { + rule = HeuristicRules["RES.008"] + break + } + } + return rule +} + +// RuleStandardINEQ STA.001 +func (q *Query4Audit) RuleStandardINEQ() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(!=)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["STA.001"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleUseKeyWord KWR.002 +func (q *Query4Audit) RuleUseKeyWord() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + if q.TiStmt == nil { + common.Log.Error("TiStmt is nil, SQL: %s", q.Query) + return rule + } + + for _, tiStmtNode := range q.TiStmt { + switch stmt := tiStmtNode.(type) { + case *tidb.AlterTableStmt: + // alter + for _, spec := range stmt.Specs { + for _, column := range spec.NewColumns { + if ast.IsMysqlKeyword(column.Name.String()) { + return HeuristicRules["KWR.002"] + } + } + } + + case *tidb.CreateTableStmt: + // create + if ast.IsMysqlKeyword(stmt.Table.Name.String()) { + return HeuristicRules["KWR.002"] + } + + for _, col := range stmt.Cols { + if ast.IsMysqlKeyword(col.Name.String()) { + return HeuristicRules["KWR.002"] + } + } + } + + } + } + + return rule +} + +// RulePluralWord KWR.003 +// Reference: https://en.wikipedia.org/wiki/English_plurals +func (q *Query4Audit) RulePluralWord() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + if q.TiStmt == nil { + common.Log.Error("TiStmt is nil, SQL: %s", q.Query) + return rule + } + + for _, tiStmtNode := range q.TiStmt { + switch stmt := tiStmtNode.(type) { + case *tidb.AlterTableStmt: + // alter + for _, spec := range stmt.Specs { + for _, column := range spec.NewColumns { + if inflector.Singularize(column.Name.String()) != column.Name.String() { + return HeuristicRules["KWR.003"] + } + } + } + + case *tidb.CreateTableStmt: + // create + if inflector.Singularize(stmt.Table.Name.String()) != stmt.Table.Name.String() { + return HeuristicRules["KWR.003"] + } + + for _, col := range stmt.Cols { + if inflector.Singularize(col.Name.String()) != col.Name.String() { + return HeuristicRules["KWR.003"] + } + } + } + + } + + } + return rule +} + +// RuleInsertSelect LCK.001 +func (q *Query4Audit) RuleInsertSelect() Rule { + var rule = q.RuleOK() + switch n := q.Stmt.(type) { + case *sqlparser.Insert: + switch n.Rows.(type) { + case *sqlparser.Select: + rule = HeuristicRules["LCK.001"] + } + } + return rule +} + +// RuleInsertOnDup LCK.002 +func (q *Query4Audit) RuleInsertOnDup() Rule { + var rule = q.RuleOK() + switch n := q.Stmt.(type) { + case *sqlparser.Insert: + if n.OnDup != nil { + rule = HeuristicRules["LCK.002"] + return rule + } + } + return rule +} + +// RuleInSubquery SUB.001 +func (q *Query4Audit) RuleInSubquery() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery: + rule = HeuristicRules["SUB.001"] + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleSubqueryDepth SUB.004 +func (q *Query4Audit) RuleSubqueryDepth() Rule { + var rule = q.RuleOK() + if depth := ast.GetSubqueryDepth(q.Stmt); depth > common.Config.MaxSubqueryDepth { + rule = HeuristicRules["SUB.004"] + } + return rule +} + +// RuleSubQueryLimit SUB.005 +// 只有IN的SUBQUERY限制了LIMIT,FROM子句中的SUBQUERY并未限制LIMIT +func (q *Query4Audit) RuleSubQueryLimit() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.ComparisonExpr: + if n.Operator == "in" { + switch r := n.Right.(type) { + case *sqlparser.Subquery: + switch s := r.Select.(type) { + case *sqlparser.Select: + if s.Limit != nil { + rule = HeuristicRules["SUB.005"] + return false, nil + } + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleSubQueryFunctions SUB.006 +func (q *Query4Audit) RuleSubQueryFunctions() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery: + err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.FuncExpr: + rule = HeuristicRules["SUB.006"] + return false, nil + } + return true, nil + }, node) + common.LogIfError(err, "") + } + + if rule.Item == "OK" { + return true, nil + } + return false, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleMultiValueAttribute LIT.003 +func (q *Query4Audit) RuleMultiValueAttribute() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(?i)(id\s+varchar)|(id\s+text)|(id\s+regexp)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["LIT.003"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleAddDelimiter LIT.004 +func (q *Query4Audit) RuleAddDelimiter() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(?i)(^use\s+[0-9a-z_-]*)|(^show\s+databases)`) + if re.FindString(q.Query) != "" && !strings.HasSuffix(q.Query, common.Config.Delimiter) { + rule = HeuristicRules["LIT.004"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleRecursiveDependency KEY.003 +func (q *Query4Audit) RuleRecursiveDependency() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + // create statement + for _, ref := range node.Constraints { + if ref != nil && ref.Tp == tidb.ConstraintForeignKey { + rule = HeuristicRules["KEY.003"] + } + } + + case *tidb.AlterTableStmt: + // alter table statement + for _, spec := range node.Specs { + if spec.Constraint != nil && spec.Constraint.Tp == tidb.ConstraintForeignKey { + rule = HeuristicRules["KEY.003"] + } + } + } + } + } + + if rule.Item == "KEY.003" { + re := regexp.MustCompile(`(?i)(\s+references\s+)`) + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + + return rule +} + +// RuleImpreciseDataType COL.009 +func (q *Query4Audit) RuleImpreciseDataType() Rule { + var rule = q.RuleOK() + if q.TiStmt != nil { + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + // Create table statement + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeDecimal, mysql.TypeNewDecimal: + rule = HeuristicRules["COL.009"] + } + } + + case *tidb.AlterTableStmt: + // Alter table statement + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, tidb.AlterTableModifyColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeFloat, mysql.TypeDouble, + mysql.TypeDecimal, mysql.TypeNewDecimal: + rule = HeuristicRules["COL.009"] + } + } + } + } + + case *tidb.InsertStmt: + // Insert statement + for _, values := range node.Lists { + for _, value := range values { + switch value.GetDatum().Kind() { + case types.KindFloat32, types.KindFloat64, types.KindMysqlDecimal: + rule = HeuristicRules["COL.009"] + } + } + } + + case *tidb.SelectStmt: + // Select statement + switch where := node.Where.(type) { + case *tidb.BinaryOperationExpr: + switch where.R.GetDatum().Kind() { + case types.KindFloat32, types.KindFloat64, types.KindMysqlDecimal: + rule = HeuristicRules["COL.009"] + } + } + } + } + } + + return rule +} + +// RuleValuesInDefinition COL.010 +func (q *Query4Audit) RuleValuesInDefinition() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeSet, mysql.TypeEnum, mysql.TypeBit: + rule = HeuristicRules["COL.010"] + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, tidb.AlterTableModifyColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeSet, mysql.TypeEnum, mysql.TypeBit: + rule = HeuristicRules["COL.010"] + } + } + } + } + } + } + } + return rule +} + +// RuleIndexAttributeOrder KEY.004 +func (q *Query4Audit) RuleIndexAttributeOrder() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateIndexStmt: + if len(node.IndexColNames) > 1 { + rule = HeuristicRules["KEY.004"] + break + } + case *tidb.CreateTableStmt: + for _, constraint := range node.Constraints { + // 当一条索引中包含多个列的时候给予建议 + if len(constraint.Keys) > 1 { + rule = HeuristicRules["KEY.004"] + break + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + if spec.Tp == tidb.AlterTableAddConstraint && len(spec.Constraint.Keys) > 1 { + rule = HeuristicRules["KEY.004"] + break + } + } + } + } + } + return rule +} + +// RuleNullUsage COL.011 +func (q *Query4Audit) RuleNullUsage() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(?i)(\s+null\s+)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["COL.011"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleStringConcatenation FUN.003 +func (q *Query4Audit) RuleStringConcatenation() Rule { + var rule = q.RuleOK() + re := regexp.MustCompile(`(?i)(\|\|)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["FUN.003"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleSysdate FUN.004 +func (q *Query4Audit) RuleSysdate() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.FuncExpr: + if n.Name.String() == "sysdate" { + rule = HeuristicRules["FUN.004"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleCountConst FUN.005 +func (q *Query4Audit) RuleCountConst() Rule { + var rule = q.RuleOK() + fingerprint := query.Fingerprint(q.Query) + countReg := regexp.MustCompile(`(?i)count\(\s*[0-9a-z?]*\s*\)`) + if countReg.MatchString(fingerprint) { + rule = HeuristicRules["FUN.005"] + if position := countReg.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RuleSumNPE FUN.006 +func (q *Query4Audit) RuleSumNPE() Rule { + var rule = q.RuleOK() + fingerprint := query.Fingerprint(q.Query) + sumReg := regexp.MustCompile(`(?i)sum\(\s*[0-9a-z?]*\s*\)`) + isnullReg := regexp.MustCompile(`(?i)isnull\(sum\(\s*[0-9a-z?]*\s*\)\)`) + if sumReg.MatchString(fingerprint) && !isnullReg.MatchString(fingerprint) { + rule = HeuristicRules["FUN.006"] + if position := isnullReg.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + return rule +} + +// RulePatternMatchingUsage ARG.007 +func (q *Query4Audit) RulePatternMatchingUsage() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Select: + re := regexp.MustCompile(`(?i)(\bregexp\b)|(\bsimilar to\b)`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["ARG.007"] + } + } + return rule +} + +// RuleSpaghettiQueryAlert CLA.012 +func (q *Query4Audit) RuleSpaghettiQueryAlert() Rule { + var rule = q.RuleOK() + if len(query.Fingerprint(q.Query)) > common.Config.SpaghettiQueryLength { + rule = HeuristicRules["CLA.012"] + } + return rule +} + +// RuleReduceNumberOfJoin JOI.005 +func (q *Query4Audit) RuleReduceNumberOfJoin() Rule { + var rule = q.RuleOK() + var tables []string + switch q.Stmt.(type) { + // TODO: UNION有可能有多张表,这里未检查UNION SELECT + case *sqlparser.Union: + return rule + default: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.AliasedTableExpr: + switch table := n.Expr.(type) { + case sqlparser.TableName: + exist := false + for _, t := range tables { + if t == table.Name.String() { + exist = true + break + } + } + if !exist { + tables = append(tables, table.Name.String()) + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + } + if len(tables) > common.Config.MaxJoinTableCount { + rule = HeuristicRules["JOI.005"] + } + return rule +} + +// RuleDistinctUsage DIS.001 +func (q *Query4Audit) RuleDistinctUsage() Rule { + // Distinct + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Select: + re := regexp.MustCompile(`(?i)(\bdistinct\b)`) + if len(re.FindAllString(q.Query, -1)) > common.Config.MaxDistinctCount { + rule = HeuristicRules["DIS.001"] + } + } + return rule +} + +// RuleCountDistinctMultiCol DIS.002 +func (q *Query4Audit) RuleCountDistinctMultiCol() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.FuncExpr: + str := strings.ToLower(sqlparser.String(n)) + if strings.HasPrefix(str, "count") && strings.Contains(str, ",") { + rule = HeuristicRules["DIS.002"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleDistinctStar DIS.003 +func (q *Query4Audit) RuleDistinctStar() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Select: + meta := ast.GetMeta(q.Stmt, nil) + for _, m := range meta { + if len(m.Table) == 1 { + // distinct tbl.* from tbl和 distinct * + re := regexp.MustCompile(`(?i)((\s+distinct\s*\*)|(\s+distinct\s+[0-9a-z_` + "`" + `]*\.\*))`) + if re.MatchString(q.Query) { + rule = HeuristicRules["DIS.003"] + } + } + break + } + } + return rule +} + +// RuleHavingClause CLA.013 +func (q *Query4Audit) RuleHavingClause() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.Select: + if expr.Having != nil { + rule = HeuristicRules["CLA.013"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleUpdatePrimaryKey CLA.016 +func (idxAdv *IndexAdvisor) RuleUpdatePrimaryKey() Rule { + rule := HeuristicRules["OK"] + switch node := idxAdv.Ast.(type) { + case *sqlparser.Update: + var setColumns []*common.Column + + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.UpdateExpr: + // 获取set操作的全部column + setColumns = append(setColumns, ast.FindAllCols(node)...) + } + return true, nil + }, node) + common.LogIfError(err, "") + setColumns = idxAdv.calcCardinality(CompleteColumnsInfo(idxAdv.Ast, setColumns, idxAdv.vEnv)) + for _, col := range setColumns { + idxMeta := idxAdv.IndexMeta[idxAdv.vEnv.DBHash(col.DB)][col.Table] + if idxMeta == nil { + return rule + } + for _, idx := range idxMeta.IdxRows { + if idx.KeyName == "PRIMARY" { + if col.Name == idx.ColumnName { + rule = HeuristicRules["CLA.016"] + return rule + } + continue + } + } + } + } + + return rule +} + +// RuleForbiddenSyntax CLA.017 +func (q *Query4Audit) RuleForbiddenSyntax() Rule { + var rule = q.RuleOK() + + // 由于vitess对某些语法的支持不完善,使得如创建临时表等语句无法通过语法检查 + // 所以这里使用正则对触发器、临时表、存储过程等进行匹配 + // 但是目前支持的也不是非常全面,有待完善匹配规则 + // TODO TiDB 目前还不支持触发器、存储过程、自定义函数、外键 + + forbidden := []*regexp.Regexp{ + regexp.MustCompile(`(?i)CREATE\s+TRIGGER\s+`), + + regexp.MustCompile(`(?i)CREATE\s+TEMPORARY\s+TABLE\s+`), + + regexp.MustCompile(`(?i)CREATE\s+VIEW\s+`), + regexp.MustCompile(`(?i)REPLACE\s+VIEW\s+`), + + regexp.MustCompile(`(?i)CREATE\s+PROCEDURE\s+`), + regexp.MustCompile(`(?i)CREATE\s+FUNCTION\s+`), + } + + for _, reg := range forbidden { + if reg.MatchString(q.Query) { + rule = HeuristicRules["CLA.017"] + if position := reg.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + break + } + } + return rule +} + +// RuleNestedSubQueries JOI.006 +func (q *Query4Audit) RuleNestedSubQueries() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery: + rule = HeuristicRules["JOI.006"] + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleMultiDeleteUpdate JOI.007 +func (q *Query4Audit) RuleMultiDeleteUpdate() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Delete, *sqlparser.Update: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.JoinTableExpr: + rule = HeuristicRules["JOI.007"] + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + } + return rule +} + +// RuleMultiDBJoin JOI.008 +func (q *Query4Audit) RuleMultiDBJoin() Rule { + var rule = q.RuleOK() + meta := ast.GetMeta(q.Stmt, nil) + dbCount := 0 + for range meta { + dbCount++ + } + + if dbCount > 1 { + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.JoinTableExpr: + rule = HeuristicRules["JOI.008"] + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + } + return rule +} + +// RuleORUsage ARG.008 +func (q *Query4Audit) RuleORUsage() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Select: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.OrExpr: + rule = HeuristicRules["ARG.008"] + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + } + return rule +} + +// RuleSpaceWithQuote ARG.009 +func (q *Query4Audit) RuleSpaceWithQuote() Rule { + var rule = q.RuleOK() + for _, tk := range ast.Tokenize(q.Query) { + if tk.Type == ast.TokenTypeQuote { + // 序列化的Val是带引号,所以要取第2个最倒数第二个,这样也就不用担心len<2了。 + switch tk.Val[1] { + case ' ': + rule = HeuristicRules["ARG.009"] + } + switch tk.Val[len(tk.Val)-2] { + case ' ': + rule = HeuristicRules["ARG.009"] + } + } + } + return rule +} + +// RuleHint ARG.010 +func (q *Query4Audit) RuleHint() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.IndexHints: + if n != nil { + rule = HeuristicRules["ARG.010"] + } + return false, nil + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleNot ARG.011 +func (q *Query4Audit) RuleNot() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.ComparisonExpr: + if strings.HasPrefix(n.Operator, "not") { + rule = HeuristicRules["ARG.011"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleUNIONUsage SUB.002 +func (q *Query4Audit) RuleUNIONUsage() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.Union: + if s.Type == "union" { + rule = HeuristicRules["SUB.002"] + } + } + return rule +} + +// RuleDistinctJoinUsage SUB.003 +func (q *Query4Audit) RuleDistinctJoinUsage() Rule { + var rule = q.RuleOK() + switch expr := q.Stmt.(type) { + case *sqlparser.Select: + if expr.Distinct != "" { + if expr.From != nil { + if len(expr.From) > 1 { + rule = HeuristicRules["SUB.003"] + } + } + } + } + return rule +} + +// RuleReadablePasswords SEC.002 +func (q *Query4Audit) RuleReadablePasswords() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + re := regexp.MustCompile(`(?i)(password)|(password)|(pwd)`) + for _, tiStmt := range q.TiStmt { + // create table stmt + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob: + if re.FindString(q.Query) != "" { + return HeuristicRules["SEC.002"] + } + } + } + + case *tidb.AlterTableStmt: + // alter table stmt + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableModifyColumn, tidb.AlterTableChangeColumn, tidb.AlterTableAddColumns: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString, + mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob: + if re.FindString(q.Query) != "" { + return HeuristicRules["SEC.002"] + } + } + } + } + } + } + } + } + return rule +} + +// RuleDataDrop SEC.003 +func (q *Query4Audit) RuleDataDrop() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.DBDDL: + if s.Action == "drop" { + rule = HeuristicRules["SEC.003"] + } + case *sqlparser.DDL: + if s.Action == "drop" || s.Action == "truncate" { + rule = HeuristicRules["SEC.003"] + } + case *sqlparser.Delete: + rule = HeuristicRules["SEC.003"] + } + return rule +} + +// RuleCompareWithFunction FUN.001 +func (q *Query4Audit) RuleCompareWithFunction() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.ComparisonExpr: + if strings.HasSuffix(sqlparser.String(n.Left), ")") { + rule = HeuristicRules["FUN.001"] + return false, nil + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleCountStar FUN.002 +func (q *Query4Audit) RuleCountStar() Rule { + var rule = q.RuleOK() + switch n := q.Stmt.(type) { + case *sqlparser.Select: + // count(N), count(col), count(*) + re := regexp.MustCompile(`(?i)(count\(\s*[*0-9a-z_` + "`" + `]*\s*\))`) + if re.FindString(q.Query) != "" && n.Where != nil { + rule = HeuristicRules["FUN.002"] + } + } + return rule +} + +// RuleTruncateTable SEC.001 +func (q *Query4Audit) RuleTruncateTable() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.DDL: + if s.Action == "truncate" { + rule = HeuristicRules["SEC.001"] + } + } + return rule +} + +// RuleIn ARG.005 && ARG.004 +func (q *Query4Audit) RuleIn() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case *sqlparser.ComparisonExpr: + switch n.Operator { + case "in": + switch r := n.Right.(type) { + case sqlparser.ValTuple: + // IN (NULL) + for _, v := range r { + switch v.(type) { + case *sqlparser.NullVal: + rule = HeuristicRules["ARG.004"] + return false, nil + } + } + if len(r) > common.Config.MaxInCount { + rule = HeuristicRules["ARG.005"] + return false, nil + } + } + case "not in": + switch r := n.Right.(type) { + case sqlparser.ValTuple: + // NOT IN (NULL) + for _, v := range r { + switch v.(type) { + case *sqlparser.NullVal: + rule = HeuristicRules["ARG.004"] + return false, nil + } + } + } + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleIsNullIsNotNull ARG.006 +func (q *Query4Audit) RuleIsNullIsNotNull() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.Select: + re := regexp.MustCompile(`(?i)is\s*(not)?\s+null\b`) + if re.FindString(q.Query) != "" { + rule = HeuristicRules["ARG.006"] + } + } + return rule +} + +// RuleVarcharVSChar COL.008 +func (q *Query4Audit) RuleVarcharVSChar() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + // 在 TiDB 的 AST 中,char 和 binary 的 type 都是 mysql.TypeString + // 只是 binary 数据类型的 character 和 collate 是 binary + case mysql.TypeString: + rule = HeuristicRules["COL.008"] + } + } + + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, tidb.AlterTableModifyColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeString: + rule = HeuristicRules["COL.008"] + } + } + } + } + } + } + } + return rule +} + +// RuleCreateDualTable TBL.003 +func (q *Query4Audit) RuleCreateDualTable() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.DDL: + if s.NewName.Name.String() == "dual" { + rule = HeuristicRules["TBL.003"] + + } + } + return rule +} + +// RuleAlterCharset ALT.001 +func (q *Query4Audit) RuleAlterCharset() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableOption: + for _, option := range spec.Options { + if option.Tp == tidb.TableOptionCharset || + option.Tp == tidb.TableOptionCollate { + rule = HeuristicRules["ALT.001"] + break + } + } + } + + if rule.Item == "ALT.001" { + break + } + } + } + } + } + return rule +} + +// RuleAlterDropColumn ALT.003 +func (q *Query4Audit) RuleAlterDropColumn() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableDropColumn: + rule = HeuristicRules["ALT.003"] + } + } + } + } + + if rule.Item == "ALT.003" { + re := regexp.MustCompile(`(?i)(drop\s+column)`) + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + } + } + return rule +} + +// RuleAlterDropKey ALT.004 +func (q *Query4Audit) RuleAlterDropKey() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableDropPrimaryKey, + tidb.AlterTableDropIndex, + tidb.AlterTableDropForeignKey: + rule = HeuristicRules["ALT.004"] + } + } + } + } + } + return rule +} + +// RuleCantBeNull COL.012 +func (q *Query4Audit) RuleCantBeNull() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + if !mysql.HasNotNullFlag(col.Tp.Flag) { + rule = HeuristicRules["COL.012"] + } + } + } + + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableModifyColumn, tidb.AlterTableChangeColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + if !mysql.HasNotNullFlag(col.Tp.Flag) { + rule = HeuristicRules["COL.012"] + } + } + } + } + } + } + } + } + + return rule +} + +// RuleTooManyKeys KEY.005 +func (q *Query4Audit) RuleTooManyKeys() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + if len(node.Constraints) > common.Config.MaxIdxCount { + rule = HeuristicRules["KEY.005"] + } + } + } + } + return rule +} + +// RuleTooManyKeyParts KEY.006 +func (q *Query4Audit) RuleTooManyKeyParts() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, constraint := range node.Constraints { + if len(constraint.Keys) > common.Config.MaxIdxColsCount { + return HeuristicRules["KEY.006"] + } + + if constraint.Refer != nil && len(constraint.Refer.IndexColNames) > common.Config.MaxIdxColsCount { + return HeuristicRules["KEY.006"] + } + } + + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddConstraint: + if spec.Constraint != nil { + if len(spec.Constraint.Keys) > common.Config.MaxIdxColsCount { + return HeuristicRules["KEY.006"] + } + + if spec.Constraint.Refer != nil { + if len(spec.Constraint.Refer.IndexColNames) > common.Config.MaxIdxColsCount { + return HeuristicRules["KEY.006"] + } + } + } + } + } + } + } + } + + return rule +} + +// RulePKNotInt KEY.007 && KEY.001 +func (q *Query4Audit) RulePKNotInt() Rule { + var rule = q.RuleOK() + var pk sqlparser.ColIdent + switch s := q.Stmt.(type) { + case *sqlparser.DDL: + if s.Action == "create" { + if s.TableSpec == nil { + return rule + } + for _, idx := range s.TableSpec.Indexes { + if idx.Info.Type == "primary key" { + if len(idx.Columns) == 1 { + pk = idx.Columns[0].Column + break + } + } + } + + // 未指定主键 + if pk.String() == "" { + rule = HeuristicRules["KEY.007"] + return rule + } + + // 主键非int, bigint类型 + for _, col := range s.TableSpec.Columns { + if pk.String() == col.Name.String() { + switch col.Type.Type { + case "int", "bigint", "integer": + if !col.Type.Unsigned { + rule = HeuristicRules["KEY.007"] + } + if !col.Type.Autoincrement { + rule = HeuristicRules["KEY.001"] + } + default: + rule = HeuristicRules["KEY.007"] + } + } + } + } + } + return rule +} + +// RuleOrderByMultiDirection KEY.008 +func (q *Query4Audit) RuleOrderByMultiDirection() Rule { + var rule = q.RuleOK() + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch n := node.(type) { + case sqlparser.OrderBy: + order := "" + for _, col := range strings.Split(sqlparser.String(n), ",") { + orders := strings.Split(col, " ") + if order != "" && order != orders[len(orders)-1] { + rule = HeuristicRules["KEY.008"] + return false, nil + } + order = orders[len(orders)-1] + } + } + return true, nil + }, q.Stmt) + common.LogIfError(err, "") + return rule +} + +// RuleUniqueKeyDup KEY.009 +// TODO: 目前只是给建议,期望能够实现自动检查 +func (q *Query4Audit) RuleUniqueKeyDup() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateIndexStmt: + // create index + if node.Unique { + re := regexp.MustCompile(`(?i)(create\s+(unique)\s)`) + rule = HeuristicRules["KEY.009"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + return rule + } + + case *tidb.AlterTableStmt: + // alter table add constraint + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddConstraint: + if spec.Constraint == nil { + continue + } + switch spec.Constraint.Tp { + case tidb.ConstraintPrimaryKey, tidb.ConstraintUniq, tidb.ConstraintUniqKey, tidb.ConstraintUniqIndex: + re := regexp.MustCompile(`(?i)(add\s+(unique)\s)`) + rule = HeuristicRules["KEY.009"] + if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { + rule.Position = position[0] + } + return rule + } + } + } + } + } + } + return rule +} + +// RuleTimestampDefault COL.013 +func (q *Query4Audit) RuleTimestampDefault() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + if col.Tp.Tp == mysql.TypeTimestamp { + hasDefault := false + for _, option := range col.Options { + if option.Tp == tidb.ColumnOptionDefaultValue { + hasDefault = true + } + } + if !hasDefault { + rule = HeuristicRules["COL.013"] + break + } + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, + tidb.AlterTableModifyColumn, + tidb.AlterTableChangeColumn, + tidb.AlterTableAlterColumn: + for _, col := range spec.NewColumns { + if col.Tp.Tp == mysql.TypeTimestamp { + hasDefault := false + for _, option := range col.Options { + if option.Tp == tidb.ColumnOptionDefaultValue { + hasDefault = true + } + } + if !hasDefault { + rule = HeuristicRules["COL.013"] + break + } + } + } + } + } + } + } + } + return rule +} + +// RuleAutoIncrementInitNotZero TBL.004 +func (q *Query4Audit) RuleAutoIncrementInitNotZero() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, opt := range node.Options { + if opt.Tp == tidb.TableOptionAutoIncrement && opt.UintValue > 1 { + rule = HeuristicRules["TBL.004"] + } + } + + } + } + } + return rule +} + +// RuleColumnWithCharset COL.014 +func (q *Query4Audit) RuleColumnWithCharset() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + if col.Tp.Charset != "" || col.Tp.Collate != "" { + rule = HeuristicRules["COL.014"] + break + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAlterColumn, tidb.AlterTableChangeColumn, + tidb.AlterTableModifyColumn, tidb.AlterTableAddColumns: + for _, col := range spec.NewColumns { + if col.Tp.Charset != "" || col.Tp.Collate != "" { + rule = HeuristicRules["COL.014"] + break + } + } + } + + } + } + } + } + return rule +} + +// RuleTableCharsetCheck TBL.005 +func (q *Query4Audit) RuleTableCharsetCheck() Rule { + var rule = q.RuleOK() + + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + var allow bool + var hasCharset bool + for _, opt := range node.Options { + if opt.Tp == tidb.TableOptionCharset { + hasCharset = true + for _, ch := range common.Config.TableAllowCharsets { + if strings.TrimSpace(strings.ToLower(ch)) == strings.TrimSpace(strings.ToLower(opt.StrValue)) { + allow = true + break + } + } + } + } + + // 未指定字符集使用MySQL默认配置字符集,我们认为MySQL的配置是被优化过的。 + if hasCharset && !allow { + rule = HeuristicRules["TBL.005"] + break + } + + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + var allow bool + var hasCharset bool + switch spec.Tp { + case tidb.AlterTableOption: + for _, opt := range spec.Options { + if opt.Tp == tidb.TableOptionCharset { + hasCharset = true + for _, ch := range common.Config.TableAllowCharsets { + if strings.TrimSpace(strings.ToLower(ch)) == strings.TrimSpace(strings.ToLower(opt.StrValue)) { + allow = true + break + } + } + } + } + // 未指定字符集使用MySQL默认配置字符集,我们认为MySQL的配置是被优化过的。 + if hasCharset && !allow { + rule = HeuristicRules["TBL.005"] + break + } + } + } + } + } + } + return rule +} + +// RuleBlobDefaultValue COL.015 +func (q *Query4Audit) RuleBlobDefaultValue() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeBlob, mysql.TypeMediumBlob, mysql.TypeTinyBlob, mysql.TypeLongBlob: + for _, opt := range col.Options { + if opt.Tp == tidb.ColumnOptionDefaultValue && opt.Expr.GetType().Tp != mysql.TypeNull { + rule = HeuristicRules["COL.015"] + break + } + } + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableModifyColumn, tidb.AlterTableAlterColumn, + tidb.AlterTableChangeColumn, tidb.AlterTableAddColumns: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeBlob, mysql.TypeMediumBlob, mysql.TypeTinyBlob, mysql.TypeLongBlob: + for _, opt := range col.Options { + if opt.Tp == tidb.ColumnOptionDefaultValue && opt.Expr.GetType().Tp != mysql.TypeNull { + rule = HeuristicRules["COL.015"] + break + } + } + } + } + } + } + } + } + } + return rule +} + +// RuleIntPrecision COL.016 +func (q *Query4Audit) RuleIntPrecision() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeLong: + if (col.Tp.Flen < 10 || col.Tp.Flen > 11) && col.Tp.Flen > 0 { + // 有些语言ORM框架会生成int(11),有些语言的框架生成int(10) + rule = HeuristicRules["COL.016"] + break + } + case mysql.TypeLonglong: + if (col.Tp.Flen != 20) && col.Tp.Flen > 0 { + rule = HeuristicRules["COL.016"] + break + } + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, + tidb.AlterTableAlterColumn, tidb.AlterTableModifyColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeLong: + if (col.Tp.Flen < 10 || col.Tp.Flen > 11) && col.Tp.Flen > 0 { + // 有些语言ORM框架会生成int(11),有些语言的框架生成int(10) + rule = HeuristicRules["COL.016"] + break + } + case mysql.TypeLonglong: + if col.Tp.Flen != 20 && col.Tp.Flen > 0 { + rule = HeuristicRules["COL.016"] + break + } + } + } + } + } + } + } + } + return rule +} + +// RuleVarcharLength COL.017 +func (q *Query4Audit) RuleVarcharLength() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + switch col.Tp.Tp { + case mysql.TypeVarchar, mysql.TypeVarString: + if col.Tp.Flen > common.Config.MaxVarcharLength { + rule = HeuristicRules["COL.017"] + break + } + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableAddColumns, tidb.AlterTableChangeColumn, + tidb.AlterTableAlterColumn, tidb.AlterTableModifyColumn: + for _, col := range spec.NewColumns { + switch col.Tp.Tp { + case mysql.TypeVarchar, mysql.TypeVarString: + if col.Tp.Flen > common.Config.MaxVarcharLength { + rule = HeuristicRules["COL.017"] + break + } + } + } + } + } + } + } + } + return rule +} + +// RuleNoOSCKey KEY.002 +func (q *Query4Audit) RuleNoOSCKey() Rule { + var rule = q.RuleOK() + switch s := q.Stmt.(type) { + case *sqlparser.DDL: + if s.Action == "create" { + pkReg := regexp.MustCompile(`(?i)(primary\s+key)`) + if !pkReg.MatchString(q.Query) { + ukReg := regexp.MustCompile(`(?i)(unique\s+((key)|(index)))`) + if !ukReg.MatchString(q.Query) { + rule = HeuristicRules["KEY.002"] + } + } + } + } + return rule +} + +// RuleTooManyFields COL.006 +func (q *Query4Audit) RuleTooManyFields() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + if len(node.Cols) > common.Config.MaxColCount { + rule = HeuristicRules["COL.006"] + } + } + } + } + return rule +} + +// RuleAllowEngine TBL.002 +func (q *Query4Audit) RuleAllowEngine() Rule { + var rule = q.RuleOK() + var hasDefaultEngine bool + var allowedEngine bool + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, opt := range node.Options { + if opt.Tp == tidb.TableOptionEngine { + hasDefaultEngine = true + // 使用了非推荐的存储引擎 + for _, engine := range common.Config.TableAllowEngines { + if strings.EqualFold(opt.StrValue, engine) { + allowedEngine = true + } + } + // common.Config.TableAllowEngines 为空时不给予建议 + if !allowedEngine && len(common.Config.TableAllowEngines) > 0 { + rule = HeuristicRules["TBL.002"] + break + } + } + } + // 建表语句未指定表的存储引擎 + if !hasDefaultEngine { + rule = HeuristicRules["TBL.002"] + break + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableOption: + for _, opt := range spec.Options { + if opt.Tp == tidb.TableOptionEngine { + // 使用了非推荐的存储引擎 + for _, engine := range common.Config.TableAllowEngines { + if strings.EqualFold(opt.StrValue, engine) { + allowedEngine = true + } + } + // common.Config.TableAllowEngines 为空时不给予建议 + if !allowedEngine && len(common.Config.TableAllowEngines) > 0 { + rule = HeuristicRules["TBL.002"] + break + } + } + } + } + } + } + } + } + return rule +} + +// RulePartitionNotAllowed TBL.001 +func (q *Query4Audit) RulePartitionNotAllowed() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + if node.Partition != nil { + rule = HeuristicRules["TBL.001"] + break + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + if len(spec.PartDefinitions) > 0 { + rule = HeuristicRules["TBL.001"] + break + } + } + } + } + } + return rule +} + +// RuleAutoIncUnsigned COL.003: +func (q *Query4Audit) RuleAutoIncUnsigned() Rule { + var rule = q.RuleOK() + switch q.Stmt.(type) { + case *sqlparser.DDL: + for _, tiStmt := range q.TiStmt { + switch node := tiStmt.(type) { + case *tidb.CreateTableStmt: + for _, col := range node.Cols { + for _, opt := range col.Options { + if opt.Tp == tidb.ColumnOptionAutoIncrement { + if !mysql.HasUnsignedFlag(col.Tp.Flag) { + rule = HeuristicRules["COL.003"] + break + } + } + + if rule.Item == "COL.003" { + break + } + } + } + case *tidb.AlterTableStmt: + for _, spec := range node.Specs { + switch spec.Tp { + case tidb.AlterTableChangeColumn, tidb.AlterTableAlterColumn, + tidb.AlterTableModifyColumn, tidb.AlterTableAddColumns: + for _, col := range spec.NewColumns { + for _, opt := range col.Options { + if opt.Tp == tidb.ColumnOptionAutoIncrement { + if !mysql.HasUnsignedFlag(col.Tp.Flag) { + rule = HeuristicRules["COL.003"] + break + } + } + + if rule.Item == "COL.003" { + break + } + } + } + } + } + } + } + } + return rule +} + +// RuleSpaceAfterDot STA.002 +func (q *Query4Audit) RuleSpaceAfterDot() Rule { + var rule = q.RuleOK() + tks := ast.Tokenize(q.Query) + for i, tk := range tks { + switch tk.Type { + + // SELECT * FROM db. tbl + // SELECT tbl. col FROM tbl + case ast.TokenTypeWord: + if len(tks) > i+1 && + tks[i+1].Type == ast.TokenTypeWhitespace && + strings.HasSuffix(tk.Val, ".") { + common.Log.Debug("RuleSpaceAfterDot: ", tk.Val, tks[i+1].Val) + rule = HeuristicRules["STA.002"] + return rule + } + default: + } + } + return rule +} + +// RuleIdxPrefix STA.003 +func (q *Query4Audit) RuleIdxPrefix() Rule { + var rule = q.RuleOK() + for _, node := range q.TiStmt { + switch n := node.(type) { + case *tidb.CreateTableStmt: + for _, c := range n.Constraints { + switch c.Tp { + case tidb.ConstraintIndex, tidb.ConstraintKey: + if !strings.HasPrefix(c.Name, common.Config.IdxPrefix) { + rule = HeuristicRules["STA.003"] + } + case tidb.ConstraintUniq, tidb.ConstraintUniqKey, tidb.ConstraintUniqIndex: + if !strings.HasPrefix(c.Name, common.Config.UkPrefix) { + rule = HeuristicRules["STA.003"] + } + } + } + case *tidb.AlterTableStmt: + for _, s := range n.Specs { + switch s.Tp { + case tidb.AlterTableAddConstraint: + switch s.Constraint.Tp { + case tidb.ConstraintIndex, tidb.ConstraintKey: + if !strings.HasPrefix(s.Constraint.Name, common.Config.IdxPrefix) { + rule = HeuristicRules["STA.003"] + } + case tidb.ConstraintUniq, tidb.ConstraintUniqKey, tidb.ConstraintUniqIndex: + if !strings.HasPrefix(s.Constraint.Name, common.Config.UkPrefix) { + rule = HeuristicRules["STA.003"] + } + } + } + } + } + } + return rule +} + +// RuleStandardName STA.004 +func (q *Query4Audit) RuleStandardName() Rule { + var rule = q.RuleOK() + allowReg := regexp.MustCompile(`(?i)[a-z0-9_` + "`" + `]`) + for _, tk := range ast.Tokenize(q.Query) { + if tk.Val == "``" { + rule = HeuristicRules["STA.004"] + } + + switch tk.Type { + // 反引号中可能有乱七八糟的东西 + case ast.TokenTypeBacktickQuote: + // 特殊字符,连续下划线 + if allowReg.ReplaceAllString(tk.Val, "") != "" || strings.Contains(tk.Val, "__") { + rule = HeuristicRules["STA.004"] + } + // 统一大小写 + if !(strings.ToLower(tk.Val) == tk.Val || strings.ToUpper(tk.Val) == tk.Val) { + rule = HeuristicRules["STA.004"] + } + case ast.TokenTypeWord: + // TOKEN_TYPE_WORD中处理连续下划线的情况,其他情况容易误伤 + if strings.Contains(tk.Val, "__") { + rule = HeuristicRules["STA.004"] + } + default: + } + } + return rule +} + +// MergeConflictHeuristicRules merge conflict rules +func MergeConflictHeuristicRules(rules map[string]Rule) map[string]Rule { + // KWR.001 VS ERR.000 + // select sql_calc_found_rows * from film + if _, ok := rules["KWR.001"]; ok { + delete(rules, "ERR.000") + } + + // SUB.001 VS OWN.004 VS JOI.006 + if _, ok := rules["SUB.001"]; ok { + delete(rules, "ARG.005") + delete(rules, "JOI.006") + } + + // SUB.004 VS SUB.001 + if _, ok := rules["SUB.004"]; ok { + delete(rules, "SUB.001") + } + + // KEY.007 VS KEY.002 + if _, ok := rules["KEY.007"]; ok { + delete(rules, "KEY.002") + } + + // JOI.002 VS JOI.006 + if _, ok := rules["JOI.002"]; ok { + delete(rules, "JOI.006") + } + + // JOI.008 VS JOI.007 + if _, ok := rules["JOI.008"]; ok { + delete(rules, "JOI.007") + } + return rules +} + +// RuleMySQLError ERR.XXX +func RuleMySQLError(item string, err error) Rule { + + type MySQLError struct { + ErrCode string + ErrString string + } + + // vitess 语法检查出错返回的是ERR.000 + switch item { + case "ERR.000": + return Rule{ + Item: item, + Summary: "MySQL执行出错 " + err.Error(), + Severity: "L8", + Content: err.Error(), + } + } + + // Received #1146 error from MySQL server: "table xxx doesn't exist" + errReg := regexp.MustCompile(`(?i)Received #([0-9]+) error from MySQL server: ['"](.*)['"]`) + errStr := err.Error() + msg := errReg.FindStringSubmatch(errStr) + var mysqlError MySQLError + + if len(msg) == 3 { + if msg[1] != "" && msg[2] != "" { + mysqlError = MySQLError{ + ErrCode: msg[1], + ErrString: msg[2], + } + } + } else { + var errcode string + if strings.HasPrefix(err.Error(), "syntax error at position") { + errcode = "1064" + } + mysqlError = MySQLError{ + ErrCode: errcode, + ErrString: err.Error(), + } + } + switch mysqlError.ErrCode { + // 1146 ER_NO_SUCH_TABLE + case "", "1146": + return Rule{ + Item: item, + Summary: "MySQL执行出错", + Severity: "L0", + Content: "", + } + default: + return Rule{ + Item: item, + Summary: "MySQL执行出错 " + mysqlError.ErrString, + Severity: "L8", + Content: mysqlError.ErrString, + } + } +} diff --git a/advisor/heuristic_test.go b/advisor/heuristic_test.go new file mode 100644 index 00000000..7366c452 --- /dev/null +++ b/advisor/heuristic_test.go @@ -0,0 +1,3015 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "errors" + "sort" + "testing" + + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" +) + +// ALI.001 +func TestRuleImplicitAlias(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "select col c from tbl where id < 1000", + "select col from tbl tb where id < 1000", + }, + { + "do 1", + }, + } + for _, sql := range sqls[0] { + q, _ := NewQuery4Audit(sql) + rule := q.RuleImplicitAlias() + if rule.Item != "ALI.001" { + t.Error("Rule not match:", rule.Item, "Expect : ALI.001") + } + } + for _, sql := range sqls[1] { + q, _ := NewQuery4Audit(sql) + rule := q.RuleImplicitAlias() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ALI.002 +func TestRuleStarAlias(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select tbl.* as c1,c2,c3 from tbl where id < 1000", + } + for _, sql := range sqls { + q, _ := NewQuery4Audit(sql) + rule := q.RuleStarAlias() + if rule.Item != "ALI.002" { + t.Error("Rule not match:", rule.Item, "Expect : ALI.002") + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ALI.003 +func TestRuleSameAlias(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col as col from tbl where id < 1000", + "select col from tbl as tbl where id < 1000", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSameAlias() + if rule.Item != "ALI.003" { + t.Error("Rule not match:", rule.Item, "Expect : ALI.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.001 +func TestRulePrefixLike(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col from tbl where id like '%abc'", + "select col from tbl where id like '_abc'", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePrefixLike() + if rule.Item != "ARG.001" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.002 +func TestRuleEqualLike(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col from tbl where id like 'abc'", + "select col from tbl where id like 1", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleEqualLike() + if rule.Item != "ARG.002" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.001 +func TestRuleNoWhere(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + {"select col from tbl", + "delete from tbl", + "update tbl set col=1", + "insert into city (country_id) select country_id from country", + }, + { + `select 1;`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoWhere() + if rule.Item != "CLA.001" && rule.Item != "CLA.014" && rule.Item != "CLA.015" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.001/CLA.014/CLA.015") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoWhere() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.002 +func TestRuleOrderByRand(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col from tbl where id = 1 order by rand()", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOrderByRand() + if rule.Item != "CLA.002" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.003 +func TestRuleOffsetLimit(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select c1,c2 from tbl where name=xx order by number limit 1 offset 2000", + "select c1,c2 from tbl where name=xx order by number limit 2000,1", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOffsetLimit() + if rule.Item != "CLA.003" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.004 +func TestRuleGroupByConst(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col1,col2 from tbl where col1='abc' group by 1", + "select col1,col2 from tbl group by 1", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleGroupByConst() + if rule.Item != "CLA.004" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.005 +func TestRuleOrderByConst(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + // "select id from test where id=1 order by id", + "select id from test where id=1 order by 1", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOrderByConst() + if rule.Item != "CLA.005" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.006 +func TestRuleDiffGroupByOrderBy(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select tb1.col, tb2.col from tb1, tb2 where id=1 group by tb1.col, tb2.col", + "select tb1.col, tb2.col from tb1, tb2 where id=1 order by tb1.col, tb2.col", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDiffGroupByOrderBy() + if rule.Item != "CLA.006" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.007 +func TestRuleMixOrderBy(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMixOrderBy() + if rule.Item != "CLA.007" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.007") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.008 +func TestRuleExplicitOrderBy(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select c1,c2,c3 from t1 where c1='foo' group by c2", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleExplicitOrderBy() + if rule.Item != "CLA.008" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.008") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.009 +func TestRuleOrderByExpr(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "SELECT col FROM tbl order by cola - colb;", // order by 列运算 + "SELECT cola - colb col FROM tbl order by col;", // 别名为列运算 + "SELECT cola FROM tbl order by from_unixtime(col);", // order by 函数运算 + "SELECT from_unixtime(col) cola FROM tbl order by cola;", // 别名为函数运算 + + // 反面例子 + // `SELECT tbl.col FROM tbl ORDER BY col`, + // "SELECT sum(col) AS col FROM tbl ORDER BY dt", + // "SELECT tbl.col FROM tb, tbl WHERE tbl.tag_id = tb.id ORDER BY tbl.col", + // "SELECT col FROM tbl order by `timestamp`;", // 列名为关键字 + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOrderByExpr() + if rule.Item != "CLA.009" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.009") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.010 +func TestRuleGroupByExpr(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "SELECT col FROM tbl GROUP by cola - colb;", + "SELECT cola - colb col FROM tbl GROUP by col;", + "SELECT cola FROM tbl GROUP by from_unixtime(col);", + "SELECT from_unixtime(col) cola FROM tbl GROUP by cola;", + + // 反面例子 + // `SELECT tbl.col FROM tbl GROUP BY col`, + // "SELECT dt, sum(col) AS col FROM tbl GROUP BY dt", + // "SELECT tbl.col FROM tb, tbl WHERE tbl.tag_id = tb.id GROUP BY tbl.col", + // "SELECT col FROM tbl GROUP by `timestamp`;", // 列名为关键字 + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleGroupByExpr() + if rule.Item != "CLA.010" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.010") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.011 +func TestRuleTblCommentCheck(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "CREATE TABLE `test1`( `ID` bigint(20) NOT NULL AUTO_INCREMENT," + + " `c1` varchar(128) DEFAULT NULL, `c2` varchar(300) DEFAULT NULL," + + " `c3` varchar(32) DEFAULT NULL, `c4` int(11) NOT NULL, `c5` double NOT NULL," + + " `c6` text NOT NULL, PRIMARY KEY (`ID`), KEY `idx_c3_c2_c4_c5_c6` " + + "(`c3`,`c2`(255),`c4`,`c5`,`c6`(255)), KEY `idx_c3_c2_c4` (`c3`,`c2`,`c4`)) " + + "ENGINE=InnoDB DEFAULT CHARSET=utf8", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTblCommentCheck() + if rule.Item != "CLA.011" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.011") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.001 +func TestRuleSelectStar(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select * from tbl where id=1", + "select col, * from tbl where id=1", + // 反面例子 + // "select count(*) from film where id=1", + // `select count(* ) from film where id=1`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSelectStar() + if rule.Item != "COL.001" { + t.Error("Rule not match:", rule.Item, "Expect : COL.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.002 +func TestRuleInsertColDef(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "insert into tbl values(1,'name')", + "replace into tbl values(1,'name')", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleInsertColDef() + if rule.Item != "COL.002" { + t.Error("Rule not match:", rule.Item, "Expect : COL.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.004 +func TestRuleAddDefaultValue(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "create table test(id int)", + `ALTER TABLE test change id id varchar(10);`, + `ALTER TABLE test modify id varchar(10);`, + }, + { + `ALTER TABLE test modify id varchar(10) DEFAULT '';`, + `ALTER TABLE test CHANGE id id varchar(10) DEFAULT '';`, + "create table test(id int not null default 0 comment '用户id')", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAddDefaultValue() + if rule.Item != "COL.004" { + t.Error("Rule not match:", rule.Item, "Expect : COL.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAddDefaultValue() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.005 +func TestRuleColCommentCheck(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "create table test(id int not null default 0)", + `alter table test add column a int`, + `ALTER TABLE t1 CHANGE b b INT NOT NULL;`, + }, + { + "create table test(id int not null default 0 comment '用户id')", + `alter table test add column a int comment 'test'`, + `ALTER TABLE t1 AUTO_INCREMENT = 13;`, + `ALTER TABLE t1 CHANGE b b INT NOT NULL COMMENT 'test';`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleColCommentCheck() + if rule.Item != "COL.005" { + t.Error("Rule not match:", rule.Item, "Expect : COL.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleColCommentCheck() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LIT.001 +func TestRuleIPString(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "insert into tbl (IP,name) values('10.20.306.122','test')", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIPString() + if rule.Item != "LIT.001" { + t.Error("Rule not match:", rule.Item, "Expect : LIT.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LIT.002 +func TestRuleDataNotQuote(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col1,col2 from tbl where time < 2018-01-10", + "select col1,col2 from tbl where time < 18-01-10", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDataNotQuote() + if rule.Item != "LIT.002" { + t.Error("Rule not match:", rule.Item, "Expect : LIT.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KWR.001 +func TestRuleSQLCalcFoundRows(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select SQL_CALC_FOUND_ROWS col from tbl where id>1000", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSQLCalcFoundRows() + if rule.Item != "KWR.001" { + t.Error("Rule not match:", rule.Item, "Expect : KWR.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.001 +func TestRuleCommaAnsiJoin(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1 and t1.c3=t3.c1 where id>1000;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCommaAnsiJoin() + if rule.Item != "JOI.001" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.002 +func TestRuleDupJoin(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select tb1.col from (tb1, tb2) join tb2 on tb1.id=tb.id where tb1.id=1;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDupJoin() + if rule.Item != "JOI.002" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.001 +func TestRuleNoDeterministicGroupby(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + // 正面CASE + { + "select c1,c2,c3 from t1 where c2='foo' group by c2", + "select col, col2, sum(col1) from tb group by col", + "select col, col1 from tb group by col,sum(col1)", + "select * from tb group by col", + }, + + // 反面CASE + { + "select id from film", + "select col, sum(col1) from tb group by col", + "select * from file", + "SELECT COUNT(*) AS cnt, language_id FROM film GROUP BY language_id;", + "SELECT COUNT(*) AS cnt FROM film GROUP BY language_id;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoDeterministicGroupby() + if rule.Item != "RES.001" { + t.Error("Rule not match:", rule.Item, "Expect : RES.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoDeterministicGroupby() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.002 +func TestRuleNoDeterministicLimit(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col1,col2 from tbl where name='zhangsan' limit 10", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoDeterministicLimit() + if rule.Item != "RES.002" { + t.Error("Rule not match:", rule.Item, "Expect : RES.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.003 +func TestRuleUpdateDeleteWithLimit(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "UPDATE film SET length = 120 WHERE title = 'abc' LIMIT 1;", + }, + { + "UPDATE film SET length = 120 WHERE title = 'abc';", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateDeleteWithLimit() + if rule.Item != "RES.003" { + t.Error("Rule not match:", rule.Item, "Expect : RES.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateDeleteWithLimit() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.004 +func TestRuleUpdateDeleteWithOrderby(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "UPDATE film SET length = 120 WHERE title = 'abc' ORDER BY title;", + }, + { + "UPDATE film SET length = 120 WHERE title = 'abc';", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateDeleteWithOrderby() + if rule.Item != "RES.004" { + t.Error("Rule not match:", rule.Item, "Expect : RES.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateDeleteWithOrderby() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.005 +func TestRuleUpdateSetAnd(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "update tbl set col = 1 and cl = 2 where col=3;", + }, + { + "update tbl set col = 1 ,cl = 2 where col=3;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateSetAnd() + if rule.Item != "RES.005" { + t.Error("Rule not match:", rule.Item, "Expect : RES.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUpdateSetAnd() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.006 +func TestRuleImpossibleWhere(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "select * from tbl where 1 != 1;", + "select * from tbl where 'a' != 'a';", + "select * from tbl where col between 10 AND 5;", + }, + { + "select * from tbl where 1 = 1;", + "select * from tbl where 'a' != 1;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleImpossibleWhere() + if rule.Item != "RES.006" { + t.Error("Rule not match:", rule.Item, "Expect : RES.006, SQL: ", sql) + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleImpossibleWhere() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK, SQL: ", sql) + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.007 +func TestRuleMeaninglessWhere(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "select * from tbl where 1 = 1;", + "select * from tbl where 'a' = 'a';", + "select * from tbl where 'a' != 1;", + }, + { + "select * from tbl where 2 = 1;", + "select * from tbl where 'b' = 'a';", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMeaninglessWhere() + if rule.Item != "RES.007" { + t.Error("Rule not match:", rule.Item, "Expect : RES.007, SQL: ", sql) + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMeaninglessWhere() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK, SQL: ", sql) + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// RES.008 +func TestRuleLoadFile(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table;", + "LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table;", + "LOAD /*COMMENT*/DATA INFILE 'data.txt' INTO TABLE db2.my_table;", + `SELECT a,b,a+b INTO OUTFILE '/tmp/result.txt' FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n' FROM test_table;`, + }, + { + "SELECT id, data INTO @x, @y FROM test.t1 LIMIT 1;", + }, + } + for _, sql := range sqls[0] { + q := &Query4Audit{Query: sql} + rule := q.RuleLoadFile() + if rule.Item != "RES.008" { + t.Error("Rule not match:", rule.Item, "Expect : RES.008, SQL: ", sql) + } + } + + for _, sql := range sqls[1] { + q := &Query4Audit{Query: sql} + rule := q.RuleLoadFile() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK, SQL: ", sql) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// STA.001 +func TestRuleStandardINEQ(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col1,col2 from tbl where type!=0", + // "select col1,col2 from tbl where type<>0", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleStandardINEQ() + if rule.Item != "STA.001" { + t.Error("Rule not match:", rule.Item, "Expect : STA.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KWR.002 +func TestRuleUseKeyWord(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE tbl (`select` int)", + "CREATE TABLE `select` (a int)", + "ALTER TABLE tbl ADD COLUMN `select` varchar(10)", + }, + { + "CREATE TABLE tbl (a int)", + "ALTER TABLE tbl ADD COLUMN col varchar(10)", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUseKeyWord() + if rule.Item != "KWR.002" { + t.Error("Rule not match:", rule.Item, "Expect : KWR.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUseKeyWord() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KWR.003 +func TestRulePluralWord(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE tbl (`people` int)", + "CREATE TABLE people (a int)", + "ALTER TABLE tbl ADD COLUMN people varchar(10)", + }, + { + "CREATE TABLE tbl (`person` int)", + "ALTER TABLE tbl ADD COLUMN person varchar(10)", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePluralWord() + if rule.Item != "KWR.003" { + t.Error("Rule not match:", rule.Item, "Expect : KWR.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePluralWord() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LCK.001 +func TestRuleInsertSelect(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `INSERT INTO tbl SELECT * FROM tbl2;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleInsertSelect() + if rule.Item != "LCK.001" { + t.Error("Rule not match:", rule.Item, "Expect : LCK.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LCK.002 +func TestRuleInsertOnDup(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `INSERT INTO t1(a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleInsertOnDup() + if rule.Item != "LCK.002" { + t.Error("Rule not match:", rule.Item, "Expect : LCK.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SUB.001 +func TestRuleInSubquery(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select col1,col2,col3 from table1 where col2 in(select col from table2)", + "SELECT col1,col2,col3 from table1 where col2 =(SELECT col2 FROM `table1` limit 1)", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleInSubquery() + if rule.Item != "SUB.001" { + t.Error("Rule not match:", rule.Item, "Expect : SUB.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LIT.003 +func TestRuleMultiValueAttribute(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]'", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMultiValueAttribute() + if rule.Item != "LIT.003" { + t.Error("Rule not match:", rule.Item, "Expect : LIT.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// LIT.003 +func TestRuleAddDelimiter(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `use sakila + select * from film`, + `use sakila`, + `show databases`, + }, + { + `use sakila;`, + }, + } + for _, sql := range sqls[0] { + q, _ := NewQuery4Audit(sql) + + rule := q.RuleAddDelimiter() + if rule.Item != "LIT.004" { + t.Error("Rule not match:", rule.Item, "Expect : LIT.004") + } + } + for _, sql := range sqls[1] { + q, _ := NewQuery4Audit(sql) + + rule := q.RuleAddDelimiter() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.003 +func TestRuleRecursiveDependency(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `CREATE TABLE tab2 ( + p_id BIGINT UNSIGNED NOT NULL, + a_id BIGINT UNSIGNED NOT NULL, + PRIMARY KEY (p_id, a_id), + FOREIGN KEY (p_id) REFERENCES tab1(p_id), + FOREIGN KEY (a_id) REFERENCES tab3(a_id) + );`, + `ALTER TABLE tbl2 add FOREIGN KEY (p_id) REFERENCES tab1(p_id);`, + }, + { + `ALTER TABLE tbl2 ADD KEY (p_id) p_id;`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleRecursiveDependency() + if rule.Item != "KEY.003" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleRecursiveDependency() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.009 +func TestRuleImpreciseDataType(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `CREATE TABLE tab2 ( + p_id BIGINT UNSIGNED NOT NULL, + a_id BIGINT UNSIGNED NOT NULL, + hours float NOT null, + PRIMARY KEY (p_id, a_id) + );`, + `alter table tbl add column c float not null;`, + `insert into tb (col) values (0.00001);`, + `select * from tb where col = 0.00001;`, + }, + { + "REPLACE INTO `binks3` (`hostname`,`storagehost`, `filename`, `starttime`, `binlogstarttime`, `uploadname`, `binlogsize`, `filesize`, `md5`, `status`) VALUES (1, 1, 1, 1, 1, 1, ?, ?);", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleImpreciseDataType() + if rule.Item != "COL.009" { + t.Error("Rule not match:", rule.Item, "Expect : COL.009") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleImpreciseDataType() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.010 +func TestRuleValuesInDefinition(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create table tab1(status ENUM('new', 'in progress', 'fixed'))`, + `alter table tab1 add column status ENUM('new', 'in progress', 'fixed')`, + } + + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleValuesInDefinition() + if rule.Item != "COL.010" { + t.Error("Rule not match:", rule.Item, "Expect : COL.010") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.004 +func TestRuleIndexAttributeOrder(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create index idx1 on tabl(last_name,first_name);`, + `alter table tabl add index idx1 (last_name,first_name);`, + `CREATE TABLE test (id int,blob_col BLOB, INDEX(blob_col(10),id));`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIndexAttributeOrder() + if rule.Item != "KEY.004" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.011 +func TestRuleNullUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select c1,c2,c3 from tabl where c4 is null or c4 <> 1;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNullUsage() + if rule.Item != "COL.011" { + t.Error("Rule not match:", rule.Item, "Expect : COL.011") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.003 +func TestRuleStringConcatenation(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select c1 || coalesce(' ' || c2 || ' ', ' ') || c3 as c from tabl;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleStringConcatenation() + if rule.Item != "FUN.003" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.004 +func TestRuleSysdate(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select sysdate();`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSysdate() + if rule.Item != "FUN.004" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.005 +func TestRuleCountConst(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `select count(1) from tbl;`, + `select count(col) from tbl;`, + }, + { + `select count(*) from tbl`, + `select count(DISTINCT col) from tbl`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCountConst() + if rule.Item != "FUN.005" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCountConst() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.006 +func TestRuleSumNPE(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `select sum(1) from tbl;`, + `select sum(col) from tbl;`, + }, + { + `SELECT IF(ISNULL(SUM(COL)), 0, SUM(COL)) FROM tbl`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSumNPE() + if rule.Item != "FUN.006" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSumNPE() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.007 +func TestRulePatternMatchingUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]';`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePatternMatchingUsage() + if rule.Item != "ARG.007" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.007") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.012 +func TestRuleSpaghettiQueryAlert(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select 1`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + common.Config.SpaghettiQueryLength = 1 + rule := q.RuleSpaghettiQueryAlert() + if rule.Item != "CLA.012" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.012") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.005 +func TestRuleReduceNumberOfJoin(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select bp1.p_id, b1.d_d as l, b1.b_id from b1 join bp1 on (b1.b_id = bp1.b_id) left outer join (b1 as b2 join bp2 on (b2.b_id = bp2.b_id)) on (bp1.p_id = bp2.p_id ) join bp21 on (b1.b_id = bp1.b_id) join bp31 on (b1.b_id = bp1.b_id) join bp41 on (b1.b_id = bp1.b_id) where b2.b_id = 0; `, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleReduceNumberOfJoin() + if rule.Item != "JOI.005" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// DIS.001 +func TestRuleDistinctUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT DISTINCT c.c_id,count(DISTINCT c.c_name),count(DISTINCT c.c_e),count(DISTINCT c.c_n),count(DISTINCT c.c_me),c.c_d FROM (select distinct xing, name from B) as e WHERE e.country_id = c.country_id;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDistinctUsage() + if rule.Item != "DIS.001" { + t.Error("Rule not match:", rule.Item, "Expect : DIS.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// DIS.002 +func TestRuleCountDistinctMultiCol(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "SELECT COUNT(DISTINCT col, col2) FROM tbl;", + }, + { + "SELECT COUNT(DISTINCT col) FROM tbl;", + `SELECT JSON_OBJECT( "key", p.id, "title", p.name, "manufacturer", p.manufacturer, "price", p.price, "specifications", JSON_OBJECTAGG(a.name, v.value)) as product FROM product as p JOIN value as v ON p.id = v.prod_id JOIN attribute as a ON a.id = v.attribute_id GROUP BY v.prod_id`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCountDistinctMultiCol() + if rule.Item != "DIS.002" { + t.Error("Rule not match:", rule.Item, "Expect : DIS.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCountDistinctMultiCol() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// DIS.003 +// RuleDistinctStar +func TestRuleDistinctStar(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "SELECT DISTINCT * FROM film;", + "SELECT DISTINCT film.* FROM film;", + }, + { + "SELECT DISTINCT col FROM film;", + "SELECT DISTINCT film.* FROM film, tbl;", + "SELECT DISTINCT * FROM film, tbl;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDistinctStar() + if rule.Item != "DIS.003" { + t.Error("Rule not match:", rule.Item, "Expect : DIS.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDistinctStar() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.013 +func TestRuleHavingClause(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleHavingClause() + if rule.Item != "CLA.013" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.013") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.017 +func TestRuleForbiddenSyntax(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create view v_today (today) AS SELECT CURRENT_DATE;`, + `CREATE VIEW v (mycol) AS SELECT 'abc';`, + `CREATE FUNCTION hello (s CHAR(20));`, + `CREATE PROCEDURE simpleproc (OUT param1 INT)`, + } + for _, sql := range sqls { + q, _ := NewQuery4Audit(sql) + rule := q.RuleForbiddenSyntax() + if rule.Item != "CLA.017" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.017") + } + + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.006 +func TestRuleNestedSubQueries(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT s,p,d FROM tabl WHERE p.p_id = (SELECT s.p_id FROM tabl WHERE s.c_id = 100996 AND s.q = 1 );`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNestedSubQueries() + if rule.Item != "JOI.006" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.007 +func TestRuleMultiDeleteUpdate(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `DELETE u FROM users u LEFT JOIN hobby tna ON u.id = tna.uid WHERE tna.hobby = 'piano'; `, + `UPDATE users u LEFT JOIN hobby h ON u.id = h.uid SET u.name = 'pianoboy' WHERE h.hobby = 'piano';`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMultiDeleteUpdate() + if rule.Item != "JOI.007" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.007") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// JOI.008 +func TestRuleMultiDBJoin(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT s,p,d FROM db1.tb1 join db2.tb2 on db1.tb1.a = db2.tb2.a where db1.tb1.a > 10;`, + `SELECT s,p,d FROM db1.tb1 join tb2 on db1.tb1.a = tb2.a where db1.tb1.a > 10;`, + // `SELECT s,p,d FROM db1.tb1 join db1.tb2 on db1.tb1.a = db1.tb2.a where db1.tb1.a > 10;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleMultiDBJoin() + if rule.Item != "JOI.008" { + t.Error("Rule not match:", rule.Item, "Expect : JOI.008") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.008 +func TestRuleORUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT c1,c2,c3 FROM tabl WHERE c1 = 14 OR c2 = 17;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleORUsage() + if rule.Item != "ARG.008" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.008") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.009 +func TestRuleSpaceWithQuote(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `SELECT 'a ';`, + `SELECT ' a';`, + `SELECT "a ";`, + `SELECT " a";`, + }, + { + `select ''`, + `select 'a'`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSpaceWithQuote() + if rule.Item != "ARG.009" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.009") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSpaceWithQuote() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.010 +func TestRuleHint(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `SELECT * FROM t1 USE INDEX (i1) ORDER BY a;`, + `SELECT * FROM t1 IGNORE INDEX (i1) ORDER BY (i2);`, + // vitess syntax not support now + // `SELECT * FROM t1 USE INDEX (i1,i2) IGNORE INDEX (i2);`, + // `SELECT * FROM t1 USE INDEX (i1) IGNORE INDEX (i2) USE INDEX (i2);`, + }, + { + `select ''`, + `select 'a'`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleHint() + if rule.Item != "ARG.010" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.010") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleHint() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.011 +func TestNot(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `select id from t where num not in(1,2,3);`, + `select id from t where num not like "a%"`, + }, + { + `select id from t where num in(1,2,3);`, + `select id from t where num like "a%"`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNot() + if rule.Item != "ARG.011" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.011") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNot() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SUB.002 +func TestRuleUNIONUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select teacher_id as id,people_name as name from t1,t2 where t1.teacher_id=t2.people_id union select student_id as id,people_name as name from t1,t2 where t1.student_id=t2.people_id;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUNIONUsage() + if rule.Item != "SUB.002" { + t.Error("Rule not match:", rule.Item, "Expect : SUB.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SUB.003 +func TestRuleDistinctJoinUsage(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT DISTINCT c.c_id, c.c_name FROM c,e WHERE e.c_id = c.c_id;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDistinctJoinUsage() + if rule.Item != "SUB.003" { + t.Error("Rule not match:", rule.Item, "Expect : SUB.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SUB.005 +func TestRuleSubQueryLimit(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1)`, + }, + { + `select * from (select id from tbl limit 3) as foo`, + `select * from tbl where id in (select t.id from (select * from tbl limit 3)as t)`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSubQueryLimit() + if rule.Item != "SUB.005" { + t.Error("Rule not match:", rule.Item, "Expect : SUB.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSubQueryLimit() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SUB.006 +func TestRuleSubQueryFunctions(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `SELECT * FROM staff WHERE name IN (SELECT max(NAME) FROM customer)`, + }, + { + `select * from (select id from tbl limit 3) as foo`, + `select * from tbl where id in (select t.id from (select * from tbl limit 3)as t)`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSubQueryFunctions() + if rule.Item != "SUB.006" { + t.Error("Rule not match:", rule.Item, "Expect : SUB.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSubQueryFunctions() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SEC.002 +func TestRuleReadablePasswords(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create table test(id int,name varchar(20) not null,password varchar(200)not null);`, + `alter table test add column password varchar(200) not null;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleReadablePasswords() + if rule.Item != "SEC.002" { + t.Error("Rule not match:", rule.Item, "Expect : SEC.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SEC.003 +func TestRuleDataDrop(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `delete from tb where a = b;`, + `truncate table tb;`, + `drop table tb;`, + `drop database db;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleDataDrop() + if rule.Item != "SEC.003" { + t.Error("Rule not match:", rule.Item, "Expect : SEC.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.001 +func TestCompareWithFunction(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + {`select id from t where substring(name,1,3)='abc';`}, + // TODO: 右侧使用函数比较 + {`select id from t where 'abc'=substring(name,1,3);`}, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCompareWithFunction() + if rule.Item != "FUN.001" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCompareWithFunction() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// FUN.002 +func TestRuleCountStar(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `SELECT c3, COUNT(*) AS accounts FROM tab where c2 < 10000 GROUP BY c3 ORDER BY num;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCountStar() + if rule.Item != "FUN.002" { + t.Error("Rule not match:", rule.Item, "Expect : FUN.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// SEC.001 +func TestRuleTruncateTable(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `TRUNCATE TABLE tbl_name;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTruncateTable() + if rule.Item != "SEC.001" { + t.Error("Rule not match:", rule.Item, "Expect : SEC.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.005 +func TestRuleIn(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select id from t where num in(1,2,3);`, + `SELECT * FROM tbl WHERE col IN (NULL)`, + `SELECT * FROM tbl WHERE col NOT IN (NULL)`, + } + common.Config.MaxInCount = 0 + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIn() + if rule.Item != "ARG.005" && rule.Item != "ARG.004" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.005 OR ARG.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ARG.006 +func TestRuleisNullIsNotNull(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select id from t where num is null;`, + `select id from t where num is not null;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIsNullIsNotNull() + if rule.Item != "ARG.006" { + t.Error("Rule not match:", rule.Item, "Expect : ARG.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.008 +func TestRuleVarcharVSChar(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create table t1(id int,name char(20),last_time date);`, + `create table t1(id int,name binary(20),last_time date);`, + `alter table t1 add column id int, add column name binary(20), add column last_time date;`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleVarcharVSChar() + if rule.Item != "COL.008" { + t.Error("Rule not match:", rule.Item, "Expect : COL.008") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// TBL.003 +func TestRuleCreateDualTable(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `create table dual(id int, primary key (id));`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCreateDualTable() + if rule.Item != "TBL.003" { + t.Error("Rule not match:", rule.Item, "Expect : TBL.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ALT.001 +func TestRuleAlterCharset(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `alter table tbl default character set 'utf8';`, + `alter table tbl default character set='utf8';`, + `ALTER TABLE t1 CHANGE a b BIGINT NOT NULL, default character set utf8`, + `ALTER TABLE t1 CHANGE a b BIGINT NOT NULL,default character set utf8`, + `ALTER TABLE tbl_name CHARACTER SET charset_name;`, + `ALTER TABLE t1 CHANGE a b BIGINT NOT NULL, character set utf8`, + `ALTER TABLE t1 CHANGE a b BIGINT NOT NULL,character set utf8`, + `alter table t1 convert to character set utf8 collate utf8_unicode_ci;`, + `alter table t1 default collate = utf8_unicode_ci;`, + }, + { + // 反面的例子 + `ALTER TABLE t MODIFY latin1_text_col TEXT CHARACTER SET utf8`, + `ALTER TABLE t1 CHANGE c1 c1 TEXT CHARACTER SET utf8;`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterCharset() + if rule.Item != "ALT.001" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : ALT.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterCharset() + if rule.Item != "OK" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ALT.003 +func TestRuleAlterDropColumn(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `alter table film drop column title;`, + }, + { + // 反面的例子 + `ALTER TABLE t1 CHANGE c1 c1 TEXT CHARACTER SET utf8;`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterDropColumn() + if rule.Item != "ALT.003" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : ALT.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterDropColumn() + if rule.Item != "OK" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// ALT.004 +func TestRuleAlterDropKey(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `alter table film drop primary key`, + `alter table film drop foreign key fk_film_language`, + }, + { + // 反面的例子 + `ALTER TABLE t1 CHANGE c1 c1 TEXT CHARACTER SET utf8;`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterDropKey() + if rule.Item != "ALT.004" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : ALT.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAlterDropKey() + if rule.Item != "OK" { + t.Error(sql, " Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.012 +func TestRuleCantBeNull(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`));", + "alter TABLE `sbtest` add column `c` longblob;", + "alter TABLE `sbtest` add column `c` text;", + "alter TABLE `sbtest` add column `c` blob;", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleCantBeNull() + if rule.Item != "COL.012" { + t.Error("Rule not match:", rule.Item, "Expect : COL.012") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.006 +func TestRuleTooManyKeyParts(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob NOT NULL DEFAULT '', PRIMARY KEY (`id`));", + "alter TABLE `sbtest` add index idx_idx (`id`);", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + common.Config.MaxIdxColsCount = 0 + rule := q.RuleTooManyKeyParts() + if rule.Item != "KEY.006" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.005 +func TestRuleTooManyKeys(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "create table tbl ( a char(10), b int, primary key (`a`)) engine=InnoDB;", + "create table tbl ( a varchar(64) not null, b int, PRIMARY KEY (`a`), key `idx_a_b` (`a`,`b`)) engine=InnoDB", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + common.Config.MaxIdxCount = 0 + rule := q.RuleTooManyKeys() + if rule.Item != "KEY.005" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.007 +func TestRulePKNotInt(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "create table tbl ( a char(10), b int, primary key (`a`)) engine=InnoDB;", + "create table tbl ( a int, b int, primary key (`a`)) engine=InnoDB;", + "create table tbl ( a bigint, b int, primary key (`a`)) engine=InnoDB;", + "create table tbl ( a int unsigned, b int, primary key (`a`)) engine=InnoDB;", + "create table tbl ( a bigint unsigned, b int, primary key (`a`)) engine=InnoDB;", + }, + { + "CREATE TABLE tbl (a int unsigned auto_increment, b int, primary key(`a`)) engine=InnoDB;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePKNotInt() + if rule.Item != "KEY.007" && rule.Item != "KEY.001" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.007 OR KEY.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePKNotInt() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.008 +func TestRuleOrderByMultiDirection(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `SELECT col FROM tbl order by col desc, col2 asc`, + }, + { + `SELECT col FROM tbl order by col, col2`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOrderByMultiDirection() + if rule.Item != "KEY.008" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.008") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleOrderByMultiDirection() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.009 +func TestRuleUniqueKeyDup(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `ALTER TABLE customer ADD UNIQUE INDEX part_of_name (name(10));`, + `CREATE UNIQUE INDEX part_of_name ON customer (name(10));`, + }, + { + `ALTER TABLE tbl add INDEX idx_col (col);`, + `CREATE INDEX part_of_name ON customer (name(10));`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUniqueKeyDup() + if rule.Item != "KEY.009" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.009") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleUniqueKeyDup() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.013 +func TestRuleTimestampDefault(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE tbl( `id` bigint not null, `create_time` timestamp) ENGINE=InnoDB DEFAULT CHARSET=utf8;", + "ALTER TABLE t1 MODIFY b timestamp NOT NULL;", + }, + { + "CREATE TABLE tbl (`id` bigint not null, `update_time` timestamp default current_timestamp)", + "ALTER TABLE t1 MODIFY b timestamp NOT NULL default current_timestamp;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTimestampDefault() + if rule.Item != "COL.013" { + t.Error("Rule not match:", rule.Item, "Expect : COL.013") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTimestampDefault() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// TBL.004 +func TestRuleAutoIncrementInitNotZero(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + // 正面的例子 + { + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `pad` char(60) NOT NULL DEFAULT '', PRIMARY KEY (`id`)) ENGINE=InnoDB AUTO_INCREMENT=13", + }, + // 反面的例子 + { + "CREATE TABLE `test1` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `pad` char(60) NOT NULL DEFAULT '', PRIMARY KEY (`id`))", + "CREATE TABLE `test1` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `pad` char(60) NOT NULL DEFAULT '', PRIMARY KEY (`id`)) auto_increment = 1", + "CREATE TABLE `test1` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `pad` char(60) NOT NULL DEFAULT '', PRIMARY KEY (`id`)) auto_increment = 1 DEFAULT CHARSET=latin1", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAutoIncrementInitNotZero() + if rule.Item != "TBL.004" { + t.Error("Rule not match:", rule.Item, "Expect : TBL.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAutoIncrementInitNotZero() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.014 +func TestRuleColumnWithCharset(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + // 正面的例子 + { + "CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL)", + "alter table tb2 change col col char(10) CHARACTER SET utf8 DEFAULT NULL;", + }, + // 反面的例子 + { + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` char(120) NOT NULL DEFAULT '', PRIMARY KEY (`id`))", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleColumnWithCharset() + if rule.Item != "COL.014" { + t.Error("Rule not match:", rule.Item, "Expect : COL.014") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleColumnWithCharset() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// TBL.005 +func TestRuleTableCharsetCheck(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "create table tbl (a int) DEFAULT CHARSET=latin1;", + "ALTER TABLE tbl CONVERT TO CHARACTER SET latin1;", + }, + { + "create table tlb (a int);", + "ALTER TABLE `tbl` add column a int, add column b int ;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTableCharsetCheck() + if rule.Item != "TBL.005" { + t.Error("Rule not match:", rule.Item, "Expect : TBL.005") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTableCharsetCheck() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.015 +func TestRuleBlobDefaultValue(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL DEFAULT '', PRIMARY KEY (`id`));", + "alter table `sbtest` add column `c` blob NOT NULL DEFAULT '';", + }, + { + "CREATE TABLE `sbtest` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL, PRIMARY KEY (`id`));", + "alter table `sbtest` add column `c` blob NOT NULL DEFAULT NULL;", + }, + } + + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleBlobDefaultValue() + if rule.Item != "COL.015" { + t.Error("Rule not match:", rule.Item, "Expect : COL.015") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleBlobDefaultValue() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.016 +func TestRuleIntPrecision(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE `sbtest` ( `id` int(1) );", + "CREATE TABLE `sbtest` ( `id` bigint(1) );", + "alter TABLE `sbtest` add column `id` bigint(1);", + "alter TABLE `sbtest` add column `id` int(1);", + }, + { + "CREATE TABLE `sbtest` ( `id` int(10));", + "CREATE TABLE `sbtest` ( `id` bigint(20));", + "alter TABLE `sbtest` add column `id` bigint(20);", + "alter TABLE `sbtest` add column `id` int(10);", + "CREATE TABLE `sbtest` ( `id` int);", + "alter TABLE `sbtest` add column `id` bigint;", + }, + } + + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIntPrecision() + if rule.Item != "COL.016" { + t.Error("Rule not match:", rule.Item, "Expect : COL.016") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIntPrecision() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.017 +func TestRuleVarcharLength(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE `sbtest` ( `id` varchar(4000) );", + "CREATE TABLE `sbtest` ( `id` varchar(3500) );", + "alter TABLE `sbtest` add column `id` varchar(3500);", + }, + { + "CREATE TABLE `sbtest` ( `id` varchar(1024));", + "CREATE TABLE `sbtest` ( `id` varchar(20));", + "alter TABLE `sbtest` add column `id` varchar(35);", + }, + } + + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleVarcharLength() + if rule.Item != "COL.017" { + t.Error("Rule not match:", rule.Item, "Expect : COL.017") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleVarcharLength() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// KEY.002 +func TestRuleNoOSCKey(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + // 正面的例子 + { + "CREATE TABLE tbl (a int, b int)", + }, + // 反面的例子 + { + "CREATE TABLE tbl (a int, primary key(`a`))", + "CREATE TABLE tbl (a int, unique key(`a`))", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoOSCKey() + if rule.Item != "KEY.002" { + t.Error("Rule not match:", rule.Item, "Expect : KEY.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleNoOSCKey() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.006 +func TestRuleTooManyFields(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "create table tbl (a int);", + } + + common.Config.MaxColCount = 0 + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleTooManyFields() + if rule.Item != "COL.006" { + t.Error("Rule not match:", rule.Item, "Expect : COL.006") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// TBL.002 +func TestRuleAllowEngine(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE tbl (a int) engine=myisam;", + "ALTER TABLE tbl engine=myisam;", + "CREATE TABLE tbl (a int);", + }, + { + "CREATE TABLE tbl (a int) engine = InnoDB;", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAllowEngine() + if rule.Item != "TBL.002" { + t.Error("Rule not match:", rule.Item, "Expect : TBL.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAllowEngine() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// TBL.001 +func TestRulePartitionNotAllowed(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `CREATE TABLE trb3 (id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE( YEAR(purchased) ) + ( + PARTITION p0 VALUES LESS THAN (1990), + PARTITION p1 VALUES LESS THAN (1995), + PARTITION p2 VALUES LESS THAN (2000), + PARTITION p3 VALUES LESS THAN (2005) + );`, + `ALTER TABLE t1 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2002));`, + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RulePartitionNotAllowed() + if rule.Item != "TBL.001" { + t.Error("Rule not match:", rule.Item, "Expect : TBL.001") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// COL.003 +func TestRuleAutoIncUnsigned(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + "CREATE TABLE `sbtest` ( `id` int(10) NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`));", + "ALTER TABLE `tbl` ADD COLUMN `id` int(10) NOT NULL AUTO_INCREMENT;", + } + for _, sql := range sqls { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleAutoIncUnsigned() + if rule.Item != "COL.003" { + t.Error("Rule not match:", rule.Item, "Expect : COL.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// STA.003 +func TestRuleIdxPrefix(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE tbl (a int, unique key `xx_a` (`a`));", + "CREATE TABLE tbl (a int, key `xx_a` (`a`));", + `ALTER TABLE tbl ADD INDEX xx_a (a)`, + `ALTER TABLE tbl ADD UNIQUE INDEX xx_a (a)`, + }, + { + `ALTER TABLE tbl ADD INDEX idx_a (a)`, + `ALTER TABLE tbl ADD UNIQUE INDEX uk_a (a)`, + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIdxPrefix() + if rule.Item != "STA.003" { + t.Error("Rule not match:", rule.Item, "Expect : STA.003") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleIdxPrefix() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// STA.004 +func TestRuleStandardName(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "CREATE TABLE `tbl-name` (a int);", + "CREATE TABLE `tbl `(a int)", + "CREATE TABLE t__bl (a int);", + }, + { + "CREATE TABLE tbl (a int)", + "CREATE TABLE `tbl`(a int)", + "CREATE TABLE `tbl` (a int) ENGINE=InnoDB DEFAULT CHARSET=utf8", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleStandardName() + if rule.Item != "STA.004" { + t.Error("Rule not match:", rule.Item, "Expect : STA.004") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleStandardName() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// STA.002 +func TestRuleSpaceAfterDot(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + "SELECT * FROM sakila. film", + "SELECT film. length FROM film", + }, + { + "SELECT * FROM sakila.film", + "SELECT film.length FROM film", + "SELECT * FROM t1, t2 WHERE t1.title = t2.title", + }, + } + for _, sql := range sqls[0] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSpaceAfterDot() + if rule.Item != "STA.002" { + t.Error("Rule not match:", rule.Item, "Expect : STA.002") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + + for _, sql := range sqls[1] { + q, err := NewQuery4Audit(sql) + if err == nil { + rule := q.RuleSpaceAfterDot() + if rule.Item != "OK" { + t.Error("Rule not match:", rule.Item, "Expect : OK") + } + } else { + t.Error("sqlparser.Parse Error:", err) + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +func TestRuleMySQLError(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + err := errors.New(`Received #1146 error from MySQL server: "can't xxxx"`) + if RuleMySQLError("ERR.002", err).Content != "" { + t.Error("Want: '', Bug get: ", err) + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +func TestMergeConflictHeuristicRules(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + tmpRules := make(map[string]Rule) + for item, val := range HeuristicRules { + tmpRules[item] = val + } + err := common.GoldenDiff(func() { + suggest := MergeConflictHeuristicRules(tmpRules) + var sortedSuggest []string + for item := range suggest { + sortedSuggest = append(sortedSuggest, item) + } + sort.Strings(sortedSuggest) + for _, item := range sortedSuggest { + pretty.Println(suggest[item]) + } + }, t.Name(), update) + if err != nil { + t.Error(err) + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} diff --git a/advisor/index.go b/advisor/index.go new file mode 100644 index 00000000..046d8d1a --- /dev/null +++ b/advisor/index.go @@ -0,0 +1,1113 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "fmt" + "strings" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/database" + "github.com/XiaoMi/soar/env" + + "github.com/dchest/uniuri" + "vitess.io/vitess/go/vt/sqlparser" +) + +// IndexAdvisor 索引建议需要使用到的所有信息 +type IndexAdvisor struct { + vEnv *env.VirtualEnv // 线下虚拟测试环境(测试环境) + rEnv database.Connector // 线上真实环境 + Ast sqlparser.Statement // Vitess Parser生成的抽象语法树 + where []*common.Column // 所有where条件中用到的列 + whereEQ []*common.Column // where条件中可以加索引的等值条件列 + whereINEQ []*common.Column // where条件中可以加索引的非等值条件列 + groupBy []*common.Column // group by可以加索引列 + orderBy []*common.Column // order by可以加索引列 + joinCond [][]*common.Column // 由于join condition跨层级间索引不可共用,需要多一个维度用来维护层级关系 + IndexMeta map[string]map[string]*database.TableIndexInfo +} + +// IndexInfo 创建一条索引需要的信息 +type IndexInfo struct { + Name string `json:"name"` // 索引名称 + Database string `json:"database"` // 数据库名 + Table string `json:"table"` // 表名 + DDL string `json:"ddl"` // ALTER, CREATE等类型的DDL语句 + ColumnDetails []*common.Column `json:"column_details"` // 列详情 +} + +// IndexAdvises IndexAdvises列表 +type IndexAdvises []IndexInfo + +// mergeAdvices 合并索引建议 +func mergeAdvices(dst []IndexInfo, src ...IndexInfo) IndexAdvises { + if len(src) == 0 { + return dst + } + + for _, newIdx := range src { + has := false + for _, idx := range dst { + if newIdx.DDL == idx.DDL { + common.Log.Debug("merge index %s and %s", idx.Name, newIdx.Name) + has = true + } + } + + if !has { + dst = append(dst, newIdx) + } + } + + return dst +} + +// NewAdvisor 构造一个 IndexAdvisor 的时候就会对其本身结构初始化 +// 获取 condition 中的等值条件、非等值条件,以及group by 、 order by信息 +func NewAdvisor(env *env.VirtualEnv, rEnv database.Connector, q Query4Audit) (*IndexAdvisor, error) { + common.Log.Debug("Enter: NewAdvisor(), Caller: %s", common.Caller()) + if common.Config.TestDSN.Disable { + return nil, fmt.Errorf("TestDSN is Disabled: %s", common.Config.TestDSN.Addr) + } + // DDL 检测 + switch stmt := q.Stmt.(type) { + case *sqlparser.DDL: + // 获取ast中用到的库表 + sqlMeta := ast.GetMeta(q.Stmt, nil) + for db := range sqlMeta { + dbRef := db + if db == "" { + dbRef = rEnv.Database + } + + // DDL在Env初始化的时候已经执行过了 + if _, ok := env.TableMap[dbRef]; !ok { + env.TableMap[dbRef] = make(map[string]string) + } + + for _, tb := range sqlMeta[db].Table { + env.TableMap[dbRef][tb.TableName] = tb.TableName + } + } + + return nil, nil + + case *sqlparser.DBDDL: + // 忽略建库语句 + return nil, nil + + case *sqlparser.Use: + // 如果是use,切基础环境 + env.Database = env.DBHash(stmt.DBName.String()) + return nil, nil + } + + return &IndexAdvisor{ + vEnv: env, + rEnv: rEnv, + Ast: q.Stmt, + + // 所有的FindXXXXCols尽最大可能先排除不需要加索引的列,但由于元数据在此阶段尚未补齐,给出的列有可能也无法添加索引 + // 后续需要通过CompleteColumnsInfo + calcCardinality补全后再进一步判断 + joinCond: ast.FindJoinCols(q.Stmt), + whereEQ: ast.FindWhereEQ(q.Stmt), + whereINEQ: ast.FindWhereINEQ(q.Stmt), + groupBy: ast.FindGroupByCols(q.Stmt), + orderBy: ast.FindOrderByCols(q.Stmt), + where: ast.FindAllCols(q.Stmt, "where"), + IndexMeta: make(map[string]map[string]*database.TableIndexInfo), + }, nil +} + +/* + +关于如何添加索引: +在《Relational Database Index Design and the Optimizers》一书中,作者提出著名的的三星索引理论(Three-Star Index) + +To Qualify for the First Star: +Pick the columns from all equal predicates (WHERE COL = . . .). +Make these the first columns of the index—in any order. For CURSOR41, the three-star index will begin with +columns LNAME, CITY or CITY, LNAME. In both cases the index slice that must be scanned will be as thin as possible. + +To Qualify for the Second Star: +Add the ORDER BY columns. Do not change the order of these columns, but ignore columns that were already +picked in step 1. For example, if CURSOR41 had redundant columns in the ORDER BY, say ORDER BY LNAME, +FNAME or ORDER BY FNAME, CITY, only FNAME would be added in this step. When FNAME is the third index column, +the result table will be in the right order without sorting. The first FETCH call will return the row with +the smallest FNAME value. + +To Qualify for the Third Star: +Add all the remaining columns from the SELECT statement. The order of the columns added in this step +has no impact on the performance of the SELECT, but the cost of updates should be reduced by placing volatile +columns at the end. Now the index contains all the columns required for an index-only access path. + +索引添加算法正是以这个理想化索策略添为基础,尽可能的给予"三星"索引建议。 + +但又如《High Performance MySQL》一书中所说,索引并不总是最好的工具。只有当索引帮助存储引擎快速查找到记录带来的好处大于其 +带来的额外工作时,索引才是有效的。 + +因此,在三星索引理论的基础上引入启发式索引算法,在第二颗星的实现上做了部分改进,对于非等值条件只会添加散粒度最高的一列到索引中, +并基于总体列的使用情况作出判断,按需对order by、group by添加索引,由此来想`增强索引建议的通用性。 + +*/ + +// IndexAdvise 索引优化建议算法入口主函数 +// TODO 索引顺序该如何确定 +func (idxAdv *IndexAdvisor) IndexAdvise() IndexAdvises { + // 支持不依赖DB的索引建议分析 + if common.Config.TestDSN.Disable { + // 未开启Env原数据依赖,信息不全的情况下可能会给予错误的索引建议,请人工进行核查。 + common.Log.Warn("TestDSN.Disable = true") + } + + // 检查否是否含有子查询 + subQueries := ast.FindSubquery(0, idxAdv.Ast) + var subQueryAdvises []IndexInfo + // 含有子查询对子查询进行单独评审,子查询评审建议报错忽略 + if len(subQueries) > 0 { + for _, subSQL := range subQueries { + stmt, err := sqlparser.Parse(subSQL) + if err != nil { + continue + } + q := Query4Audit{ + Query: subSQL, + Stmt: stmt, + } + subIdxAdv, _ := NewAdvisor(idxAdv.vEnv, idxAdv.rEnv, q) + subQueryAdvises = append(subQueryAdvises, subIdxAdv.IndexAdvise()...) + } + } + + // 变量初始化,用于存放索引信息,按照db.tb.[cols]组织 + indexList := make(map[string]map[string][]*common.Column) + + // 为用到的每一列填充库名,表名等信息 + var joinCond [][]*common.Column + for _, joinCols := range idxAdv.joinCond { + joinCond = append(joinCond, CompleteColumnsInfo(idxAdv.Ast, joinCols, idxAdv.vEnv)) + } + idxAdv.joinCond = joinCond + + idxAdv.where = CompleteColumnsInfo(idxAdv.Ast, idxAdv.where, idxAdv.vEnv) + idxAdv.whereEQ = CompleteColumnsInfo(idxAdv.Ast, idxAdv.whereEQ, idxAdv.vEnv) + idxAdv.whereINEQ = CompleteColumnsInfo(idxAdv.Ast, idxAdv.whereINEQ, idxAdv.vEnv) + idxAdv.groupBy = CompleteColumnsInfo(idxAdv.Ast, idxAdv.groupBy, idxAdv.vEnv) + idxAdv.orderBy = CompleteColumnsInfo(idxAdv.Ast, idxAdv.orderBy, idxAdv.vEnv) + + // 只要在开启使用env元数据的时候才会计算散粒度 + if !common.Config.TestDSN.Disable { + // 计算joinCond, whereEQ, whereINEQ用到的每一列的散粒度,并排序,方便后续添加复合索引 + // groupBy, orderBy列按书写顺序给索引建议,不需要按散粒度排序 + idxAdv.calcCardinality(idxAdv.whereEQ) + idxAdv.calcCardinality(idxAdv.whereINEQ) + idxAdv.calcCardinality(idxAdv.orderBy) + idxAdv.calcCardinality(idxAdv.groupBy) + + for i, joinCols := range idxAdv.joinCond { + idxAdv.calcCardinality(joinCols) + joinCols = common.ColumnSort(joinCols) + idxAdv.joinCond[i] = joinCols + } + + // 根据散粒度进行排序 + // 对所有列进行排序,按散粒度由大到小排序 + idxAdv.whereEQ = common.ColumnSort(idxAdv.whereEQ) + idxAdv.whereINEQ = common.ColumnSort(idxAdv.whereINEQ) + idxAdv.orderBy = common.ColumnSort(idxAdv.orderBy) + idxAdv.groupBy = common.ColumnSort(idxAdv.groupBy) + + } + + // 是否指定Where条件,打标签 + hasWhere := false + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch where := node.(type) { + case *sqlparser.Subquery: + return false, nil + case *sqlparser.Where: + if where != nil { + hasWhere = true + } + } + return true, nil + }, idxAdv.Ast) + common.LogIfError(err, "") + // 获取哪些列被忽略 + var ignore []*common.Column + usedCols := append(idxAdv.whereINEQ, idxAdv.whereEQ...) + + for _, whereCol := range idxAdv.where { + isUsed := false + for _, used := range usedCols { + if whereCol.Equal(used) { + isUsed = true + } + } + + if !isUsed { + common.Log.Debug("column %s in `%s`.`%s` will ignore when adding index", whereCol.DB, whereCol.Table, whereCol.Name) + ignore = append(ignore, whereCol) + } + + } + + // 索引优化算法入口,从这里开始放大招 + if hasWhere { + // 有Where条件的先分析 等值条件 + for _, index := range idxAdv.whereEQ { + // 对应列在前面已经按散粒度由大到小排序好了 + mergeIndex(indexList, index) + } + // 若存在非等值查询条件,可以给第一个非等值条件添加索引 + if len(idxAdv.whereINEQ) > 0 { + mergeIndex(indexList, idxAdv.whereINEQ[0]) + } + // 有WHERE条件,但WHERE条件未能给出索引建议就不能再加GROUP BY和ORDER BY建议了 + if len(ignore) == 0 { + // 没有非等值查询条件时可以再为GroupBy和OrderBy添加索引 + for _, index := range idxAdv.groupBy { + mergeIndex(indexList, index) + } + + // OrderBy + // 没有GroupBy时可以为OrderBy加索引 + if len(idxAdv.groupBy) == 0 { + for _, index := range idxAdv.orderBy { + mergeIndex(indexList, index) + } + } + } + } else { + // 未指定Where条件的,只需要GroupBy和OrderBy的索引建议 + for _, index := range idxAdv.groupBy { + mergeIndex(indexList, index) + } + + // OrderBy + // 没有GroupBy时可以为OrderBy加索引 + if len(idxAdv.groupBy) == 0 { + for _, index := range idxAdv.orderBy { + mergeIndex(indexList, index) + } + } + } + + // 开始整合索引信息,添加索引 + var indexes []IndexInfo + + // 为join添加索引 + // 获取 join condition 中需要加索引的表有哪些 + defaultDB := "" + if !common.Config.TestDSN.Disable { + defaultDB = idxAdv.vEnv.RealDB(idxAdv.vEnv.Database) + } + if !common.Config.OnlineDSN.Disable { + defaultDB = idxAdv.rEnv.Database + } + + // 根据join table的信息给予优化建议 + joinTableMeta := ast.FindJoinTable(idxAdv.Ast, nil).SetDefault(idxAdv.rEnv.Database).SetDefault(defaultDB) + indexes = mergeAdvices(indexes, idxAdv.buildJoinIndex(joinTableMeta)...) + + if common.Config.TestDSN.Disable || common.Config.OnlineDSN.Disable { + // 无env环境下只提供单列索引,无法确定table时不给予优化建议 + // 仅有table信息时给出的建议不包含DB信息 + indexes = mergeAdvices(indexes, idxAdv.buildIndexWithNoEnv(indexList)...) + } else { + // 给出尽可能详细的索引建议 + indexes = mergeAdvices(indexes, idxAdv.buildIndex(indexList)...) + } + + indexes = mergeAdvices(indexes, subQueryAdvises...) + + // 在开启env的情况下,检查数据库版本,字段类型,索引总长度 + indexes = idxAdv.idxColsTypeCheck(indexes) + + // 在开启env的情况下,会对索引进行检查,对全索引进行过滤 + // 在前几步都不会对idx生成DDL语句,DDL语句在这里生成 + return idxAdv.mergeIndexes(indexes) +} + +// idxColsTypeCheck 对超长的字段添加前缀索引,剔除无法添索引字段的列 +// TODO 暂不支持fulltext索引, +func (idxAdv *IndexAdvisor) idxColsTypeCheck(idxList []IndexInfo) []IndexInfo { + if common.Config.TestDSN.Disable { + return rmSelfDupIndex(idxList) + } + + var indexes []IndexInfo + + for _, idx := range idxList { + var newCols []*common.Column + var newColInfo []string + // 索引总长度 + idxBytesTotal := 0 + isOverFlow := false + for _, col := range idx.ColumnDetails { + // 获取字段bytes + bytes := col.GetDataBytes(common.Config.OnlineDSN.Version) + tmpCol := col.Name + overFlow := 0 + // 加上该列后是否索引长度过长 + if bytes < 0 { + // bytes < 0 说明字段的长度是无法计算的 + common.Log.Warning("%s.%s data type not support %s, can't add index", + col.Table, col.Name, col.DataType) + continue + } + + // idx bytes over flow + if total := idxBytesTotal + bytes; total > common.Config.MaxIdxBytes { + + common.Log.Debug("bytes: %d, idxBytesTotal: %d, total: %d, common.Config.MaxIdxBytes: %d", + bytes, idxBytesTotal, total, common.Config.MaxIdxBytes) + + overFlow = total - common.Config.MaxIdxBytes + isOverFlow = true + + } else { + idxBytesTotal = total + } + + // common.Config.MaxIdxColBytes 默认大小 767 + if bytes > common.Config.MaxIdxBytesPerColumn || isOverFlow { + // In 5.6, you may not include a column that equates to + // bigger than 767 bytes: VARCHAR(255) CHARACTER SET utf8 or VARCHAR(191) CHARACTER SET utf8mb4. + // In 5.7 you may not include a column that equates to + // bigger than 3072 bytes. + + // v : 在 col.Character 字符集下每个字符占用 v bytes + v, ok := common.CharSets[strings.ToLower(col.Character)] + if !ok { + // 找不到对应字符集,不添加索引 + // 如果出现不认识的字符集,认为每个字符占用4个字节 + common.Log.Warning("%s.%s(%s) charset not support yet %s, use default 4 bytes length", + col.Table, col.Name, col.DataType, col.Character) + v = 4 + } + + // 保留两个字节的安全余量 + length := (common.Config.MaxIdxBytesPerColumn - 2) / v + if isOverFlow { + // 在索引中添加该列会导致索引长度过长,建议根据需求转换为合理的前缀索引 + // _OPR_SPLIT_ 是自定的用于后续处理的特殊分隔符 + common.Log.Warning("adding index '%s(%s)' to table '%s' causes the index to be too long, overflow is %d", + col.Name, col.DataType, col.Table, overFlow) + tmpCol += fmt.Sprintf("_OPR_SPLIT_(N)") + } else { + // 索引没有过长,可以加一个最长的前缀索引 + common.Log.Warning("index column too large: %s.%s --> %s.%s(%d), data type: %s", + col.Table, col.Name, col.Table, tmpCol, length, col.DataType) + tmpCol += fmt.Sprintf("_OPR_SPLIT_(%d)", length) + } + + } + + newCols = append(newCols, col) + newColInfo = append(newColInfo, tmpCol) + } + + // 为新索引重建索引语句 + idxName := "idx_" + idxCols := "" + for i, newCol := range newColInfo { + // 对名称和可能存在的长度进行拼接 + // 用等号进行分割 + tmp := strings.Split(newCol, "_OPR_SPLIT_") + idxName += tmp[0] + if len(tmp) > 1 { + idxCols += tmp[0] + "`" + tmp[1] + } else { + idxCols += tmp[0] + "`" + } + + if i+1 < len(newColInfo) { + idxName += "_" + idxCols += ",`" + } + } + + // 索引名称最大长度64 + if len(idxName) > 64 { + common.Log.Warn("index '%s' name large than 64", idxName) + idxName = strings.TrimRight(idxName[:64], "_") + } + + // 新的alter语句 + newDDL := fmt.Sprintf("alter table `%s`.`%s` add index `%s` (`%s)", idxAdv.vEnv.RealDB(idx.Database), + idx.Table, idxName, idxCols) + + // 将筛选改造后的索引信息信息加入到新的索引列表中 + idx.ColumnDetails = newCols + idx.DDL = newDDL + indexes = append(indexes, idx) + } + + return indexes +} + +// mergeIndexes 与线上环境对比,将给出的索引建议进行去重 +func (idxAdv *IndexAdvisor) mergeIndexes(idxList []IndexInfo) []IndexInfo { + // TODO 暂不支持前缀索引去重 + if common.Config.TestDSN.Disable { + return rmSelfDupIndex(idxList) + } + + var indexes []IndexInfo + for _, idx := range idxList { + // 将DB替换成vEnv中的数据库名称 + dbInVEnv := idx.Database + if _, ok := idxAdv.vEnv.DBRef[idx.Database]; ok { + dbInVEnv = idxAdv.vEnv.DBRef[idx.Database] + } + + // 检测索引添加的表是否是视图 + if idxAdv.vEnv.IsView(idx.Table) { + common.Log.Info("%s.%s is a view. no need indexed", idx.Database, idx.Table) + continue + } + + // 检测是否存在重复索引 + indexMeta := idxAdv.IndexMeta[dbInVEnv][idx.Table] + isExisted := false + + // 检测无索引列的情况 + if len(idx.ColumnDetails) < 1 { + continue + } + + if existedIndexes := indexMeta.FindIndex(database.IndexColumnName, idx.ColumnDetails[0].Name); len(existedIndexes) > 0 { + for _, existedIdx := range existedIndexes { + // flag: 用于标记已存在的索引是否是约束条件 + isConstraint := false + + var cols []string + var colsDetail []*common.Column + + // 把已经存在的key摘出来遍历一遍对比是否是包含关系 + for _, col := range indexMeta.FindIndex(database.IndexKeyName, existedIdx.KeyName) { + cols = append(cols, col.ColumnName) + colsDetail = append(colsDetail, &common.Column{ + Name: col.ColumnName, + Table: idx.Table, + DB: idx.ColumnDetails[0].DB, + }) + } + + // 判断已存在的索引是否属于约束条件(唯一索引、主键) + // 这里可以忽略是否含有外键的情况,因为索引已经重复了,添加了新索引后原先重复的索引是可以删除的。 + if existedIdx.NonUnique == 0 { + common.Log.Debug("%s.%s表%s为约束条件", dbInVEnv, idx.Table, existedIdx.KeyName) + isConstraint = true + } + + // 如果已存在的索引与索引建议存在重叠,则说明无需添加新索引或可能需要给出删除索引的建议 + if common.IsColsPart(colsDetail, idx.ColumnDetails) { + idxName := existedIdx.KeyName + // 如果已经存在的索引包含需要添加的索引,则无需添加 + if len(colsDetail) >= len(idx.ColumnDetails) { + common.Log.Info(" `%s`.`%s` %s already had a index `%s`", + idx.Database, idx.Table, strings.Join(cols, ","), idxName) + isExisted = true + continue + } + + // 库、表、列名需要用反撇转义 + // TODO 关于外键索引去重的优雅解决方案 + if !isConstraint { + if common.Config.AllowDropIndex { + alterSQL := fmt.Sprintf("alter table `%s`.`%s` drop index `%s`", idx.Database, idx.Table, idxName) + indexes = append(indexes, IndexInfo{ + Name: idxName, + Database: idx.Database, + Table: idx.Table, + DDL: alterSQL, + ColumnDetails: colsDetail, + }) + } else { + common.Log.Warning("In table `%s`, the new index of column `%s` contains index `%s`,"+ + " maybe you could drop one of them.", existedIdx.Table, + strings.Join(cols, ","), idxName) + } + } + } + } + } + + if !isExisted { + // 检测索引名称是否重复? + if existedIndexes := indexMeta.FindIndex(database.IndexKeyName, idx.Name); len(existedIndexes) > 0 { + var newName string + if len(idx.Name) < 59 { + newName = idx.Name + "_" + uniuri.New()[:4] + } else { + newName = idx.Name[:59] + "_" + uniuri.New()[:4] + } + + common.Log.Warning("duplicate index name '%s', new name is '%s'", idx.Name, newName) + idx.DDL = strings.Replace(idx.DDL, idx.Name, newName, -1) + idx.Name = newName + } + + // 添加合并 + indexes = mergeAdvices(indexes, idx) + } + + } + + // 对索引进行去重 + return rmSelfDupIndex(indexes) +} + +// rmSelfDupIndex 去重传入的[]IndexInfo中重复的索引 +func rmSelfDupIndex(indexes []IndexInfo) []IndexInfo { + var resultIndex []IndexInfo + tmpIndexList := indexes + for _, a := range indexes { + tmp := a + for i, b := range tmpIndexList { + if common.IsColsPart(tmp.ColumnDetails, b.ColumnDetails) && tmp.Name != b.Name { + if len(b.ColumnDetails) > len(tmp.ColumnDetails) { + common.Log.Debug("remove duplicate index: %s", tmp.Name) + tmp = b + } + + if i < len(tmpIndexList) { + tmpIndexList = append(tmpIndexList[:i], tmpIndexList[i+1:]...) + } else { + tmpIndexList = tmpIndexList[:i] + } + + } + } + resultIndex = mergeAdvices(resultIndex, tmp) + } + + return resultIndex +} + +// buildJoinIndex 检查Join中使用的库表是否需要添加索引并给予索引建议 +func (idxAdv *IndexAdvisor) buildJoinIndex(meta common.Meta) []IndexInfo { + var indexes []IndexInfo + for _, IndexCols := range idxAdv.joinCond { + // 如果该列的库表为join condition中需要添加索引的库表 + indexColsList := make(map[string]map[string][]*common.Column) + for _, col := range IndexCols { + mergeIndex(indexColsList, col) + + } + + if common.Config.TestDSN.Disable || common.Config.OnlineDSN.Disable { + indexes = mergeAdvices(indexes, idxAdv.buildIndexWithNoEnv(indexColsList)...) + continue + } + + indexes = mergeAdvices(indexes, idxAdv.buildIndex(indexColsList)...) + } + return indexes +} + +// buildIndex 尽可能的将 map[string]map[string][]*common.Column 转换成 []IndexInfo +// 此处不判断索引是否重复 +func (idxAdv *IndexAdvisor) buildIndex(idxList map[string]map[string][]*common.Column) []IndexInfo { + var indexes []IndexInfo + for db, tbs := range idxList { + for tb, cols := range tbs { + + // 单个索引中含有的列收 config 中参数限制 + if len(cols) > common.Config.MaxIdxColsCount { + cols = cols[:common.Config.MaxIdxColsCount] + } + + var colNames []string + for _, col := range cols { + if col.DB == "" || col.Table == "" { + common.Log.Warn("can not get the meta info of column '%s'", col.Name) + continue + } + colNames = append(colNames, col.Name) + } + + if len(colNames) == 0 { + continue + } + + idxName := "idx_" + strings.Join(colNames, "_") + + // 索引名称最大长度64 + if len(idxName) > 64 { + common.Log.Warn("index '%s' name large than 64", idxName) + idxName = strings.TrimRight(idxName[:64], "_") + } + + alterSQL := fmt.Sprintf("alter table `%s`.`%s` add index `%s` (`%s`)", idxAdv.vEnv.RealDB(db), tb, + idxName, strings.Join(colNames, "`,`")) + + indexes = append(indexes, IndexInfo{ + Name: idxName, + Database: idxAdv.vEnv.RealDB(db), + Table: tb, + DDL: alterSQL, + ColumnDetails: cols, + }) + } + } + return indexes +} + +// buildIndexWithNoEnv 忽略原数据,给予最基础的索引 +func (idxAdv *IndexAdvisor) buildIndexWithNoEnv(indexList map[string]map[string][]*common.Column) []IndexInfo { + // 如果不获取数据库原信息,则不去判断索引是否重复,且只给单列加索引 + var indexes []IndexInfo + for _, tableIndex := range indexList { + for _, indexCols := range tableIndex { + for _, col := range indexCols { + if col.Table == "" { + common.Log.Warn("can not get the meta info of column '%s'", col.Name) + continue + } + idxName := "idx_" + col.Name + // 库、表、列名需要用反撇转义 + alterSQL := fmt.Sprintf("alter table `%s`.`%s` add index `%s` (`%s`)", idxAdv.vEnv.RealDB(col.DB), col.Table, idxName, col.Name) + if col.DB == "" { + alterSQL = fmt.Sprintf("alter table `%s` add index `%s` (`%s`)", col.Table, idxName, col.Name) + } + + indexes = append(indexes, IndexInfo{ + Name: idxName, + Database: idxAdv.vEnv.RealDB(col.DB), + Table: col.Table, + DDL: alterSQL, + ColumnDetails: []*common.Column{col}, + }) + } + + } + } + return indexes +} + +// mergeIndex 将索引用到的列去重后合并到一起 +func mergeIndex(idxList map[string]map[string][]*common.Column, column *common.Column) { + db := column.DB + tb := column.Table + if idxList[db] == nil { + idxList[db] = make(map[string][]*common.Column) + } + if idxList[db][tb] == nil { + idxList[db][tb] = make([]*common.Column, 0) + } + + // 去除重复列Append + exist := false + for _, cl := range idxList[db][tb] { + if cl.Name == column.Name { + exist = true + } + } + if !exist { + idxList[db][tb] = append(idxList[db][tb], column) + } +} + +// CompleteColumnsInfo 补全索引可能会用到列的所属库名、表名等信息 +func CompleteColumnsInfo(stmt sqlparser.Statement, cols []*common.Column, env *env.VirtualEnv) []*common.Column { + // 如果传过来的列是空的,没必要跑逻辑 + if len(cols) == 0 { + return cols + } + + // 从Ast中拿到DBStructure,包含所有表的相关信息 + dbs := ast.GetMeta(stmt, nil) + + // 此处生成的meta信息中不应该含有""db的信息,若DB为空则认为是已传入的db为默认db并进行信息补全 + // BUG Fix: + // 修补dbs中空DB的导致后续补全列信息时无法获取正确table名称的问题 + if _, ok := dbs[""]; ok { + dbs[env.Database] = dbs[""] + delete(dbs, "") + } + + tableCount := 0 + for db := range dbs { + for tb := range dbs[db].Table { + if tb != "" { + tableCount++ + } + } + } + + var noEnvTmp []*common.Column + for _, col := range cols { + for db := range dbs { + // 对每一列进行比对,将别名转换为正确的名称 + find := false + for _, tb := range dbs[db].Table { + for _, tbAlias := range tb.TableAliases { + if col.Table != "" && col.Table == tbAlias { + common.Log.Debug("column '%s' prefix change: %s --> %s", col.Name, col.Table, tb.TableName) + find = true + col.Table = tb.TableName + col.DB = db + break + } + } + if find { + break + } + + } + + // 如果不依赖env环境,利用ast中包含的信息推理列的库表信息 + if common.Config.TestDSN.Disable { + if tableCount == 1 { + for _, tb := range dbs[db].Table { + col.Table = tb.TableName + + // 因为tableMeta是按照库表组织的树状结构,db变量贯穿全局 + // 只有在最终赋值前才能根据逻辑变更补全 + if db == "" { + db = env.Database + } + col.DB = db + } + } + + // 如果SQL中含有的表大于一个,则使用的列中必须含有前缀,不然无法判断该列属于哪个表 + // 如果某一列未含有前缀信息,则认为两张表中都含有该列,需要由人去判断 + if tableCount > 1 { + if col.Table == "" { + for _, tb := range dbs[db].Table { + if tb.TableName == "" { + common.Log.Warn("can not get the meta info of column '%s'", col.Name) + } + + if db == "" { + db = env.RealDB(env.Database) + } + col.Table = tb.TableName + col.DB = db + + tmp := *col + tmp.Table = tb.TableName + tmp.DB = db + + noEnvTmp = append(noEnvTmp, &tmp) + } + } + + if col.DB == "" { + if db == "" { + db = env.Database + } + col.DB = db + } + } + + break + } + + // 将已经获取到正确表信息的列信息带入到env中,利用show columns where table 获取库表信息 + // 此出会传入之前从ast中,该 db 下获取的所有表来作为where限定条件, + // 防止与SQL无关的库表信息干扰准确性 + // 此处传入的是测试环境,DB是经过变换的,所以在寻找列名的时候需要将DB名称转换成测试环境中经过hash的DB名称 + // 不然会找不到col的信息 + realCols, err := env.FindColumn(col.Name, env.DBHash(db), dbs.Tables(db)...) + if err != nil { + common.Log.Warn("%v", err) + continue + } + + // 对比 column 信息中的表名与从 env 中获取的库表名的一致性 + for _, realCol := range realCols { + if col.Name == realCol.Name { + // 如果查询到了列名一致,但从ast中获取的列的前缀与env中的表信息不符 + // 1.存在一个同名列,但不同表,该情况下忽略 + // 2.存在一个未正确转换的别名(如表名为),该情况下修正,大概率是正确的 + if col.Table != "" && col.Table != realCol.Table { + has, _ := env.FindColumn(col.Name, env.DBHash(db), col.Table) + if len(has) > 0 { + realCol = has[0] + } + } + + col.DataType = realCol.DataType + col.Table = realCol.Table + col.DB = env.RealDB(realCol.DB) + col.Character = realCol.Character + col.Collation = realCol.Collation + + } + } + } + + } + + // 如果不依赖env环境,将可能存在的列也加入到索引预处理列表中 + if common.Config.TestDSN.Disable { + cols = append(cols, noEnvTmp...) + } + + return cols +} + +// calcCardinality 计算每一列的散粒度 +// 这个函数需要在补全列的库表信息之后再调用,否则无法确定要计算列的归属 +func (idxAdv *IndexAdvisor) calcCardinality(cols []*common.Column) []*common.Column { + common.Log.Debug("Enter: calcCardinality(), Caller: %s", common.Caller()) + tmpDB := *idxAdv.vEnv + for _, col := range cols { + // 补全对应列的库->表->索引信息到IndexMeta + // 这将在后面用于判断某一列是否为主键或单列唯一索引,快速返回散粒度 + if col.DB == "" { + col.DB = idxAdv.vEnv.Database + } + realDB := idxAdv.vEnv.DBHash(col.DB) + if idxAdv.IndexMeta[realDB] == nil { + idxAdv.IndexMeta[realDB] = make(map[string]*database.TableIndexInfo) + } + + if idxAdv.IndexMeta[realDB][col.Table] == nil { + tmpDB.Database = realDB + indexInfo, err := tmpDB.ShowIndex(col.Table) + if err != nil { + // 如果是不存在的表就会报错,报错的可能性有三个: + // 1.数据库错误 2.表不存在 3.临时表 + // 而这三种错误都是不需要在这一层关注的,直接跳过 + common.Log.Debug("calcCardinality error: %v", err) + continue + } + + // 将获取的索引信息以db.tb维度组织到IndexMeta中 + idxAdv.IndexMeta[realDB][col.Table] = indexInfo + } + + // 检查对应列是否为主键或单列唯一索引,如果满足直接返回1,不再重复计算,提高效率 + // 多列复合唯一索引不能跳过计算,单列普通索引不能跳过计算 + for _, index := range idxAdv.IndexMeta[realDB][col.Table].IdxRows { + // 根据索引的名称判断该索引包含的列数,列数大于1即为复合索引 + columnCount := len(idxAdv.IndexMeta[realDB][col.Table].FindIndex(database.IndexKeyName, index.KeyName)) + if col.Name == index.ColumnName { + // 主键、唯一键 无需计算散粒度 + if (index.KeyName == "PRIMARY" || index.NonUnique == 0) && columnCount == 1 { + common.Log.Debug("column '%s' is PK or UK, no need to calculate cardinality.", col.Name) + col.Cardinality = 1 + break + } + } + + } + + // 给非 PRIMARY、UNIQUE 的列计算散粒度 + if col.Cardinality != 1 { + col.Cardinality = idxAdv.vEnv.ColumnCardinality(col.Table, col.Name) + } + } + + return cols +} + +// Format 用于格式化输出索引建议 +func (idxAdvs IndexAdvises) Format() map[string]Rule { + rulesMap := make(map[string]Rule) + number := 1 + rules := make(map[string]*Rule) + sqls := make(map[string][]string) + + for _, advise := range idxAdvs { + advKey := advise.Database + advise.Table + + if _, ok := sqls[advKey]; !ok { + sqls[advKey] = make([]string, 0) + } + + sqls[advKey] = append(sqls[advKey], advise.DDL) + + if _, ok := rules[advKey]; !ok { + summary := fmt.Sprintf("为%s库的%s表添加索引", advise.Database, advise.Table) + if advise.Database == "" { + summary = fmt.Sprintf("为%s表添加索引", advise.Table) + } + + rules[advKey] = &Rule{ + Summary: summary, + Content: "", + Severity: "L2", + } + } + + for _, col := range advise.ColumnDetails { + // 为了更好地显示效果 + if common.Config.Sampling { + cardinal := fmt.Sprintf("%0.2f", col.Cardinality*100) + if cardinal != "0.00" { + rules[advKey].Content += fmt.Sprintf("为列%s添加索引,散粒度为: %s%%; ", + col.Name, cardinal) + } + } else { + rules[advKey].Content += fmt.Sprintf("为列%s添加索引;", col.Name) + } + } + // 清理多余的标点 + rules[advKey].Content = strings.Trim(rules[advKey].Content, common.Config.Delimiter) + } + + for adv := range rules { + key := fmt.Sprintf("IDX.%03d", number) + ddl := ast.MergeAlterTables(sqls[adv]...) + // 由于传入合并的SQL都是一张表的,所以一定只会输出一条ddl语句 + for _, v := range ddl { + rules[adv].Case = v + } + rulesMap[key] = *rules[adv] + + number++ + } + + return rulesMap +} + +// HeuristicCheck 依赖数据字典的启发式检查 +// IndexAdvisor会构建测试环境和数据字典,所以放在这里实现 +func (idxAdv *IndexAdvisor) HeuristicCheck(q Query4Audit) map[string]Rule { + var rule Rule + heuristicSuggest := make(map[string]Rule) + if common.Config.OnlineDSN.Disable && common.Config.TestDSN.Disable { + return heuristicSuggest + } + + ruleFuncs := []func(*IndexAdvisor) Rule{ + (*IndexAdvisor).RuleImplicitConversion, // ARG.003 + // (*IndexAdvisor).RuleImpossibleOuterJoin, // TODO: JOI.003, JOI.004 + (*IndexAdvisor).RuleGroupByConst, // CLA.004 + (*IndexAdvisor).RuleOrderByConst, // CLA.005 + (*IndexAdvisor).RuleUpdatePrimaryKey, // CLA.016 + } + + for _, f := range ruleFuncs { + rule = f(idxAdv) + if rule.Item != "OK" { + heuristicSuggest[rule.Item] = rule + } + } + return heuristicSuggest +} + +// DuplicateKeyChecker 对所有用到的库表检查是否存在重复索引 +func DuplicateKeyChecker(conn *database.Connector, databases ...string) map[string]Rule { + common.Log.Debug("Enter: DuplicateKeyChecker, Caller: %s", common.Caller()) + // 复制一份online connector,防止环境切换影响其他功能的使用 + tmpOnline := *conn + ruleMap := make(map[string]Rule) + number := 1 + + // 错误处理,用于汇总所有的错误 + funcErrCheck := func(err error) { + if err != nil { + if sug, ok := ruleMap["ERR.003"]; ok { + sug.Content += fmt.Sprintf("; %s", err.Error()) + } else { + ruleMap["ERR.003"] = Rule{ + Item: "ERR.003", + Severity: "L8", + Content: err.Error(), + } + } + } + } + + // 不指定DB的时候检查online dsn中的DB + if len(databases) == 0 { + databases = append(databases, tmpOnline.Database) + } + + for _, db := range databases { + // 获取所有的表 + tmpOnline.Database = db + tables, err := tmpOnline.ShowTables() + + if err != nil { + funcErrCheck(err) + if !common.Config.DryRun { + return ruleMap + } + } + + for _, tb := range tables { + // 获取表中所有的索引 + idxMap := make(map[string][]*common.Column) + idxInfo, err := tmpOnline.ShowIndex(tb) + if err != nil { + funcErrCheck(err) + if !common.Config.DryRun { + return ruleMap + } + } + + // 枚举所有的索引信息,提取用到的列 + for _, idx := range idxInfo.IdxRows { + if _, ok := idxMap[idx.KeyName]; !ok { + idxMap[idx.KeyName] = make([]*common.Column, 0) + for _, col := range idxInfo.FindIndex(database.IndexKeyName, idx.KeyName) { + idxMap[idx.KeyName] = append(idxMap[idx.KeyName], &common.Column{ + Name: col.ColumnName, + Table: tb, + DB: db, + }) + } + } + } + + // 对索引进行重复检查 + hasDup := false + content := "" + + for k1, cl1 := range idxMap { + for k2, cl2 := range idxMap { + if k1 != k2 && common.IsColsPart(cl1, cl2) { + hasDup = true + col1Str := common.JoinColumnsName(cl1, ", ") + col2Str := common.JoinColumnsName(cl2, ", ") + content += fmt.Sprintf("索引%s(%s)与%s(%s)重复;", k1, col1Str, k2, col2Str) + common.Log.Debug(" %s.%s has duplicate index %s(%s) <--> %s(%s)", db, tb, k1, col1Str, k2, col2Str) + } + } + delete(idxMap, k1) + } + + // TODO 重复索引检查添加对约束及索引的判断,提供重复索引的删除功能 + if hasDup { + tmpOnline.Database = db + ddl, _ := tmpOnline.ShowCreateTable(tb) + key := fmt.Sprintf("IDX.%03d", number) + ruleMap[key] = Rule{ + Item: key, + Severity: "L2", + Summary: fmt.Sprintf("%s.%s存在重复的索引", db, tb), + Content: content, + Case: ddl, + } + number++ + } + } + } + + return ruleMap +} diff --git a/advisor/index_test.go b/advisor/index_test.go new file mode 100644 index 00000000..a4bf8cc4 --- /dev/null +++ b/advisor/index_test.go @@ -0,0 +1,464 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "fmt" + "os" + "testing" + + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/env" + + "github.com/kr/pretty" + "vitess.io/vitess/go/vt/sqlparser" +) + +func init() { + common.BaseDir = common.DevPath + err := common.ParseConfig("") + if err != nil { + fmt.Println(err.Error()) + } + vEnv, rEnv := env.BuildEnv() + if _, err = vEnv.Version(); err != nil { + fmt.Println(err.Error(), ", By pass all advisor test cases") + os.Exit(0) + } + + if _, err := rEnv.Version(); err != nil { + fmt.Println(err.Error(), ", By pass all advisor test cases") + os.Exit(0) + } +} + +// ARG.003 +func TestRuleImplicitConversion(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + dsn := common.Config.OnlineDSN + common.Config.OnlineDSN = common.Config.TestDSN + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + initSQLs := []string{ + `CREATE TABLE t1 (id int, title varchar(255) CHARSET utf8 COLLATE utf8_general_ci);`, + `CREATE TABLE t2 (id int, title varchar(255) CHARSET utf8mb4);`, + `CREATE TABLE t3 (id int, title varchar(255) CHARSET utf8 COLLATE utf8_bin);`, + } + for _, sql := range initSQLs { + vEnv.BuildVirtualEnv(rEnv, sql) + } + + sqls := []string{ + "SELECT * FROM t1 WHERE title >= 60;", + "SELECT * FROM t1, t2 WHERE t1.title = t2.title;", + "SELECT * FROM t1, t3 WHERE t1.title = t3.title;", + } + for _, sql := range sqls { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleImplicitConversion() + if rule.Item != "ARG.003" { + t.Error("Rule not match:", rule, "Expect : ARG.003, SQL:", sql) + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) + common.Config.OnlineDSN = dsn +} + +// JOI.003 & JOI.004 +func TestRuleImpossibleOuterJoin(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select city_id, city, country from city left outer join country using(country_id) WHERE city.city_id=59 and country.country='Algeria'`, + `select city_id, city, country from city left outer join country using(country_id) WHERE country.country='Algeria'`, + `select city_id, city, country from city left outer join country on city.country_id=country.country_id WHERE city.city_id IS NULL`, + } + + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range sqls { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleImpossibleOuterJoin() + if rule.Item != "JOI.003" && rule.Item != "JOI.004" { + t.Error("Rule not match:", rule, "Expect : JOI.003 || JOI.004") + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// GRP.001 +func TestIndexAdvisorRuleGroupByConst(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `select film_id, title from film where release_year='2006' group by release_year`, + `select film_id, title from film where release_year in ('2006') group by release_year`, + }, + { + // 反面的例子 + `select film_id, title from film where release_year in ('2006', '2007') group by release_year`, + }, + } + + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range sqls[0] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleGroupByConst() + if rule.Item != "GRP.001" { + t.Error("Rule not match:", rule, "Expect : GRP.001") + } + } + } + } + + for _, sql := range sqls[1] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleGroupByConst() + if rule.Item != "OK" { + t.Error("Rule not match:", rule, "Expect : OK") + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.005 +func TestIndexAdvisorRuleOrderByConst(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `select film_id, title from film where release_year='2006' order by release_year;`, + `select film_id, title from film where release_year in ('2006') order by release_year;`, + }, + { + // 反面的例子 + `select film_id, title from film where release_year in ('2006', '2007') order by release_year;`, + }, + } + + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range sqls[0] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleOrderByConst() + if rule.Item != "CLA.005" { + t.Error("Rule not match:", rule, "Expect : CLA.005") + } + } + } + } + + for _, sql := range sqls[1] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleOrderByConst() + if rule.Item != "OK" { + t.Error("Rule not match:", rule, "Expect : OK") + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +// CLA.016 +func TestRuleUpdatePrimaryKey(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := [][]string{ + { + `update film set film_id = 1 where title='a';`, + }, + { + // 反面的例子 + `select film_id, title from film where release_year in ('2006', '2007') order by release_year;`, + }, + } + + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range sqls[0] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleUpdatePrimaryKey() + if rule.Item != "CLA.016" { + t.Error("Rule not match:", rule.Item, "Expect : CLA.016") + } + } + } + } + + for _, sql := range sqls[1] { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.RuleUpdatePrimaryKey() + if rule.Item != "OK" { + t.Error("Rule not match:", rule, "Expect : OK") + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +func TestIndexAdvise(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range common.TestSQLs { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.IndexAdvise().Format() + if len(rule) > 0 { + pretty.Println(rule) + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +func TestIndexAdviseNoEnv(t *testing.T) { + common.Config.OnlineDSN.Disable = true + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range common.TestSQLs { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + if idxAdvisor != nil { + rule := idxAdvisor.IndexAdvise().Format() + if len(rule) > 0 { + pretty.Println(rule) + } + } + } + } + common.Log.Debug("Exiting function: %s", common.GetFunctionName()) +} + +func TestDuplicateKeyChecker(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + _, rEnv := env.BuildEnv() + rule := DuplicateKeyChecker(rEnv, "sakila") + if len(rule) != 0 { + t.Errorf("got rules: %s", pretty.Sprint(rule)) + } +} + +func TestMergeAdvices(t *testing.T) { + dst := []IndexInfo{ + { + Name: "test", + Database: "db", + Table: "tb", + ColumnDetails: []*common.Column{ + { + Name: "test", + }, + }, + }, + } + + src := dst[0] + + advise := mergeAdvices(dst, src) + if len(advise) != 1 { + t.Error(pretty.Sprint(advise)) + } +} + +func TestIdxColsTypeCheck(t *testing.T) { + common.Log.Debug("Entering function: %s", common.GetFunctionName()) + sqls := []string{ + `select city_id, city, country from city left outer join country using(country_id) WHERE city.city_id=59 and country.country='Algeria'`, + } + + vEnv, rEnv := env.BuildEnv() + defer vEnv.CleanUp() + + for _, sql := range sqls { + stmt, syntaxErr := sqlparser.Parse(sql) + if syntaxErr != nil { + common.Log.Critical("Syntax Error: %v, SQL: %s", syntaxErr, sql) + } + + q := &Query4Audit{Query: sql, Stmt: stmt} + + if vEnv.BuildVirtualEnv(rEnv, q.Query) { + idxAdvisor, err := NewAdvisor(vEnv, *rEnv, *q) + if err != nil { + t.Error("NewAdvisor Error: ", err, "SQL: ", sql) + } + + idxList := []IndexInfo{ + { + Name: "idx_fk_country_id", + Database: "sakila", + Table: "city", + ColumnDetails: []*common.Column{ + { + Name: "country_id", + Character: "utf8", + DataType: "varchar(3000)", + }, + }, + }, + } + + if idxAdvisor != nil { + rule := idxAdvisor.idxColsTypeCheck(idxList) + if !(len(rule) > 0 && rule[0].DDL == "alter table `sakila`.`city` add index `idx_country_id` (`country_id`(N))") { + t.Error(pretty.Sprint(rule)) + } + } + } + } +} diff --git a/advisor/rules.go b/advisor/rules.go new file mode 100644 index 00000000..97a60e8e --- /dev/null +++ b/advisor/rules.go @@ -0,0 +1,1372 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" + "github.com/percona/go-mysql/query" + tidb "github.com/pingcap/tidb/ast" + "vitess.io/vitess/go/vt/sqlparser" +) + +// Query4Audit 待评审的SQL结构体,由原SQL和其对应的抽象语法树组成 +type Query4Audit struct { + Query string // 查询语句 + Stmt sqlparser.Statement // 通过Vitess解析出的抽象语法树 + TiStmt []tidb.StmtNode // 通过TiDB解析出的抽象语法树 +} + +// NewQuery4Audit return a struct for Query4Audit +func NewQuery4Audit(sql string, options ...string) (*Query4Audit, error) { + var err, tiErr error + var charset string + var collation string + + if len(options) > 0 { + charset = options[0] + } + + if len(options) > 1 { + collation = options[1] + } + + q := &Query4Audit{Query: sql} + // vitess语法解析 + q.Stmt, err = sqlparser.Parse(sql) + + // TiDB 语法解析仅作为补充,不检查语法错误 + // TODO: charset, collation + q.TiStmt, tiErr = ast.TiParse(sql, charset, collation) + if tiErr != nil { + common.Log.Warn("NewQuery4Audit ast.Tiparse Error: %s", tiErr.Error()) + } + return q, err +} + +// Rule 评审规则元数据结构 +type Rule struct { + Item string `json:"Item"` // 规则代号 + Severity string `json:"Severity"` // 危险等级:L[0-8], 数字越大表示级别越高 + Summary string `json:"Summary"` // 规则摘要 + Content string `json:"Content"` // 规则解释 + Case string `json:"Case"` // SQL示例 + Position int `json:"Position"` // 建议所处SQL字符位置,默认0表示全局建议 + Func func(*Query4Audit) Rule `json:"-"` // 函数名 +} + +/* + +## Item单词缩写含义 + +* ALI Alias(AS) +* ALT Alter +* ARG Argument +* CLA Classic +* COL Column +* DIS Distinct +* ERR Error, 特指MySQL执行返回的报错信息, ERR.000为vitess语法错误,ERR.001为执行错误,ERR.002为EXPLAIN错误 +* EXP Explain, 由explain模块给 +* FUN Function +* IDX Index, 由index模块给 +* JOI Join +* KEY Key +* KWR Keyword +* LCK Lock +* LIT Literal +* PRO Profiling, 由profiling模块给 +* RES Result +* SEC Security +* STA Standard +* SUB Subquery +* TBL Table +* TRA Trace, 由trace模块给 + +*/ + +// HeuristicRules 启发式规则列表 +var HeuristicRules map[string]Rule + +func init() { + HeuristicRules = map[string]Rule{ + "OK": { + Item: "OK", + Severity: "L0", + Summary: "✔️", // heavy check mark unicode + Content: `✔️`, + Case: "✔️", + Func: (*Query4Audit).RuleOK, + }, + "ALI.001": { + Item: "ALI.001", + Severity: "L0", + Summary: "建议使用AS关键字显示声明一个别名", + Content: `在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。`, + Case: "select name from tbl t1 where id < 1000", + Func: (*Query4Audit).RuleImplicitAlias, + }, + "ALI.002": { + Item: "ALI.002", + Severity: "L8", + Summary: "不建议给列通配符'*'设置别名", + Content: `例: "SELECT tbl.* col1, col2"上面这条SQL给列通配符设置了别名,这样的SQL可能存在逻辑错误。您可能意在查询col1, 但是代替它的是重命名的是tbl的最后一列。`, + Case: "select tbl.* as c1,c2,c3 from tbl where id < 1000", + Func: (*Query4Audit).RuleStarAlias, + }, + "ALI.003": { + Item: "ALI.003", + Severity: "L1", + Summary: "别名不要与表或列的名字相同", + Content: `表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨。`, + Case: "select name from tbl as tbl where id < 1000", + Func: (*Query4Audit).RuleSameAlias, + }, + "ALT.001": { + Item: "ALT.001", + Severity: "L4", + Summary: "修改表的默认字符集不会改表各个字段的字符集", + Content: `很多初学者会将ALTER TABLE tbl_name [DEFAULT] CHARACTER SET 'UTF8'误认为会修改所有字段的字符集,但实际上它只会影响后续新增的字段不会改表已有字段的字符集。如果想修改整张表所有字段的字符集建议使用ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name;`, + Case: "ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name;", + Func: (*Query4Audit).RuleAlterCharset, + }, + "ALT.002": { + Item: "ALT.002", + Severity: "L2", + Summary: "同一张表的多条ALTER请求建议合为一条", + Content: `每次表结构变更对线上服务都会产生影响,即使是能够通过在线工具进行调整也请尽量通过合并ALTER请求的试减少操作次数。`, + Case: "ALTER TABLE tbl ADD COLUMN col int, ADD INDEX idx_col (`col`);", + Func: (*Query4Audit).RuleOK, // 该建议在indexAdvisor中给 + }, + "ALT.003": { + Item: "ALT.003", + Severity: "L0", + Summary: "删除列为高危操作,操作前请注意检查业务逻辑是否还有依赖", + Content: `如业务逻辑依赖未完全消除,列被删除后可能导致数据无法写入或无法查询到已删除列数据导致程序异常的情况。这种情况下即使通过备份数据回滚也会丢失用户请求写入的数据。`, + Case: "ALTER TABLE tbl DROP COLUMN col;", + Func: (*Query4Audit).RuleAlterDropColumn, + }, + "ALT.004": { + Item: "ALT.004", + Severity: "L0", + Summary: "删除主键和外键为高危操作,操作前请与DBA确认影响", + Content: `主键和外键为关系型数据库中两种重要约束,删除已有约束会打破已有业务逻辑,操作前请业务开发与DBA确认影响,三思而行。`, + Case: "ALTER TABLE tbl DROP PRIMARY KEY;", + Func: (*Query4Audit).RuleAlterDropKey, + }, + "ARG.001": { + Item: "ARG.001", + Severity: "L4", + Summary: "不建议使用前项通配符查找", + Content: `例如“%foo”,查询参数有一个前项通配符的情况无法使用已有索引。`, + Case: "select c1,c2,c3 from tbl where name like '%foo'", + Func: (*Query4Audit).RulePrefixLike, + }, + "ARG.002": { + Item: "ARG.002", + Severity: "L1", + Summary: "没有通配符的LIKE查询", + Content: `不包含通配符的LIKE查询可能存在逻辑错误,因为逻辑上它与等值查询相同。`, + Case: "select c1,c2,c3 from tbl where name like 'foo'", + Func: (*Query4Audit).RuleEqualLike, + }, + "ARG.003": { + Item: "ARG.003", + Severity: "L4", + Summary: "参数比较包含隐式转换,无法使用索引", + Content: "隐式类型转换有无法命中索引的风险,在高并发、大数据量的情况下,命不中索引带来的后果非常严重。", + Case: "SELECT * FROM sakila.film WHERE length >= '60';", + Func: (*Query4Audit).RuleOK, // 该建议在indexAdvisor中给 + }, + "ARG.004": { + Item: "ARG.004", + Severity: "L4", + Summary: "IN (NULL)/NOT IN (NULL)永远非真", + Content: "正确的作法是col IN ('val1', 'val2', 'val3') OR col IS NULL", + Case: "SELECT * FROM sakila.film WHERE length >= '60';", + Func: (*Query4Audit).RuleIn, + }, + "ARG.005": { + Item: "ARG.005", + Severity: "L1", + Summary: "IN要慎用,元素过多会导致全表扫描", + Content: ` 如:select id from t where num in(1,2,3)对于连续的数值,能用BETWEEN就不要用IN了:select id from t where num between 1 and 3。而当IN值过多时MySQL也可能会进入全表扫描导致性能急剧下降。`, + Case: "select id from t where num in(1,2,3)", + Func: (*Query4Audit).RuleIn, + }, + "ARG.006": { + Item: "ARG.006", + Severity: "L1", + Summary: "应尽量避免在WHERE子句中对字段进行NULL值判断", + Content: `使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0;`, + Case: "select id from t where num is null", + Func: (*Query4Audit).RuleIsNullIsNotNull, + }, + "ARG.007": { + Item: "ARG.007", + Severity: "L3", + Summary: "避免使用模式匹配", + Content: `性能问题是使用模式匹配操作符的最大缺点。使用LIKE或正则表达式进行模式匹配进行查询的另一个问题,是可能会返回意料之外的结果。最好的方案就是使用特殊的搜索引擎技术来替代SQL,比如Apache Lucene。另一个可选方案是将结果保存起来从而减少重复的搜索开销。如果一定要使用SQL,请考虑在MySQL中使用像FULLTEXT索引这样的第三方扩展。但更广泛地说,您不一定要使用SQL来解决所有问题。`, + Case: "select c_id,c2,c3 from tbl where c2 like 'test%'", + Func: (*Query4Audit).RulePatternMatchingUsage, + }, + "ARG.008": { + Item: "ARG.008", + Severity: "L1", + Summary: "OR查询索引列时请尽量使用IN谓词", + Content: `IN-list谓词可以用于索引检索,并且优化器可以对IN-list进行排序,以匹配索引的排序序列,从而获得更有效的检索。请注意,IN-list必须只包含常量,或在查询块执行期间保持常量的值,例如外引用。`, + Case: "SELECT c1,c2,c3 FROM tbl WHERE c1 = 14 OR c1 = 17", + Func: (*Query4Audit).RuleORUsage, + }, + "ARG.009": { + Item: "ARG.009", + Severity: "L1", + Summary: "引号中的字符串开头或结尾包含空格", + Content: `如果VARCHAR列的前后存在空格将可能引起逻辑问题,如在MySQL 5.5中'a'和'a '可能会在查询中被认为是相同的值。`, + Case: "SELECT 'abc '", + Func: (*Query4Audit).RuleSpaceWithQuote, + }, + "ARG.010": { + Item: "ARG.010", + Severity: "L1", + Summary: "不要使用hint,如sql_no_cache,force index,ignore key,straight join等", + Content: `hint是用来强制SQL按照某个执行计划来执行,但随着数据量变化我们无法保证自己当初的预判是正确的。`, + Case: "SELECT 'abc '", + Func: (*Query4Audit).RuleHint, + }, + "ARG.011": { + Item: "ARG.011", + Severity: "L3", + Summary: "不要使用负向查询,如:NOT IN/NOT LIKE", + Content: `请尽量不要使用负向查询,这将导致全表扫描,对查询性能影响较大。`, + Case: "select id from t where num not in(1,2,3);", + Func: (*Query4Audit).RuleNot, + }, + "CLA.001": { + Item: "CLA.001", + Severity: "L4", + Summary: "最外层SELECT未指定WHERE条件", + Content: `SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。`, + Case: "select id from tbl", + Func: (*Query4Audit).RuleNoWhere, + }, + "CLA.002": { + Item: "CLA.002", + Severity: "L3", + Summary: "不建议使用ORDER BY RAND()", + Content: `ORDER BY RAND()是从结果集中检索随机行的一种非常低效的方法,因为它会对整个结果进行排序并丢弃其大部分数据。`, + Case: "select name from tbl where id < 1000 order by rand(number)", + Func: (*Query4Audit).RuleOrderByRand, + }, + + "CLA.003": { + Item: "CLA.003", + Severity: "L2", + Summary: "不建议使用带OFFSET的LIMIT查询", + Content: `使用LIMIT和OFFSET对结果集分页的复杂度是O(n^2),并且会随着数据增大而导致性能问题。采用“书签”扫描的方法实现分页效率更高。`, + Case: "select c1,c2 from tbl where name=xx order by number limit 1 offset 20", + Func: (*Query4Audit).RuleOffsetLimit, + }, + "CLA.004": { + Item: "CLA.004", + Severity: "L2", + Summary: "不建议对常量进行GROUP BY", + Content: `GROUP BY 1 表示按第一列进行GROUP BY。如果在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,可能会导致问题。`, + Case: "select col1,col2 from tbl group by 1", + Func: (*Query4Audit).RuleGroupByConst, + }, + "CLA.005": { + Item: "CLA.005", + Severity: "L2", + Summary: "ORDER BY常数列没有任何意义", + Content: `SQL逻辑上可能存在错误; 最多只是一个无用的操作,不会更改查询结果。`, + Case: "select id from test where id=1 order by id", + Func: (*Query4Audit).RuleOrderByConst, + }, + "CLA.006": { + Item: "CLA.006", + Severity: "L4", + Summary: "在不同的表中GROUP BY或ORDER BY", + Content: `这将强制使用临时表和filesort,可能产生巨大性能隐患,并且可能消耗大量内存和磁盘上的临时空间。`, + Case: "select tb1.col, tb2.col from tb1, tb2 where id=1 group by tb1.col, tb2.col", + Func: (*Query4Audit).RuleDiffGroupByOrderBy, + }, + "CLA.007": { + Item: "CLA.007", + Severity: "L2", + Summary: "ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引", + Content: `ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。`, + Case: "select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc", + Func: (*Query4Audit).RuleMixOrderBy, + }, + "CLA.008": { + Item: "CLA.008", + Severity: "L2", + Summary: "请为GROUP BY显示添加ORDER BY条件", + Content: `默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。`, + Case: "select c1,c2,c3 from t1 where c1='foo' group by c2", + Func: (*Query4Audit).RuleExplicitOrderBy, + }, + "CLA.009": { + Item: "CLA.009", + Severity: "L2", + Summary: "ORDER BY的条件为表达式", + Content: `当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。`, + Case: "select description from film where title ='ACADEMY DINOSAUR' order by length-language_id;", + Func: (*Query4Audit).RuleOrderByExpr, + }, + "CLA.010": { + Item: "CLA.010", + Severity: "L2", + Summary: "GROUP BY的条件为表达式", + Content: `当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。`, + Case: "select description from film where title ='ACADEMY DINOSAUR' GROUP BY length-language_id;", + Func: (*Query4Audit).RuleGroupByExpr, + }, + "CLA.011": { + Item: "CLA.011", + Severity: "L1", + Summary: "建议为表添加注释", + Content: `为表添加注释能够使得表的意义更明确,从而为日后的维护带来极大的便利。`, + Case: "CREATE TABLE `test1` (`ID` bigint(20) NOT NULL AUTO_INCREMENT,`c1` varchar(128) DEFAULT NULL,PRIMARY KEY (`ID`)) ENGINE=InnoDB DEFAULT CHARSET=utf8", + Func: (*Query4Audit).RuleTblCommentCheck, + }, + "CLA.012": { + Item: "CLA.012", + Severity: "L2", + Summary: "将复杂的裹脚布式查询分解成几个简单的查询", + Content: `SQL是一门极具表现力的语言,您可以在单个SQL查询或者单条语句中完成很多事情。但这并不意味着必须强制只使用一行代码,或者认为使用一行代码就搞定每个任务是个好主意。通过一个查询来获得所有结果的常见后果是得到了一个笛卡儿积。当查询中的两张表之间没有条件限制它们的关系时,就会发生这种情况。没有对应的限制而直接使用两张表进行联结查询,就会得到第一张表中的每一行和第二张表中的每一行的一个组合。每一个这样的组合就会成为结果集中的一行,最终您就会得到一个行数很多的结果集。重要的是要考虑这些查询很难编写、难以修改和难以调试。数据库查询请求的日益增加应该是预料之中的事。经理们想要更复杂的报告以及在用户界面上添加更多的字段。如果您的设计很复杂,并且是一个单一查询,要扩展它们就会很费时费力。不论对您还是项目来说,时间花在这些事情上面不值得。将复杂的意大利面条式查询分解成几个简单的查询。当您拆分一个复杂的SQL查询时,得到的结果可能是很多类似的查询,可能仅仅在数据类型上有所不同。编写所有的这些查询是很乏味的,因此,最好能够有个程序自动生成这些代码。SQL代码生成是一个很好的应用。尽管SQL支持用一行代码解决复杂的问题,但也别做不切实际的事情。`, + Case: "这是一条很长很长的SQL,案例略。", + Func: (*Query4Audit).RuleSpaghettiQueryAlert, + }, + /* + https://www.datacamp.com/community/tutorials/sql-tutorial-query + The HAVING Clause + The HAVING clause was originally added to SQL because the WHERE keyword could not be used with aggregate functions. HAVING is typically used with the GROUP BY clause to restrict the groups of returned rows to only those that meet certain conditions. However, if you use this clause in your query, the index is not used, which -as you already know- can result in a query that doesn't really perform all that well. + + If you’re looking for an alternative, consider using the WHERE clause. Consider the following queries: + + SELECT state, COUNT(*) + FROM Drivers + WHERE state IN ('GA', 'TX') + GROUP BY state + ORDER BY state + SELECT state, COUNT(*) + FROM Drivers + GROUP BY state + HAVING state IN ('GA', 'TX') + ORDER BY state + The first query uses the WHERE clause to restrict the number of rows that need to be summed, whereas the second query sums up all the rows in the table and then uses HAVING to throw away the sums it calculated. In these types of cases, the alternative with the WHERE clause is obviously the better one, as you don’t waste any resources. + + You see that this is not about limiting the result set, rather about limiting the intermediate number of records within a query. + + Note that the difference between these two clauses lies in the fact that the WHERE clause introduces a condition on individual rows, while the HAVING clause introduces a condition on aggregations or results of a selection where a single result, such as MIN, MAX, SUM,… has been produced from multiple rows. + */ + "CLA.013": { + Item: "CLA.013", + Severity: "L3", + Summary: "不建议使用HAVING子句", + Content: `将查询的HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引。`, + Case: "SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id", + Func: (*Query4Audit).RuleHavingClause, + }, + "CLA.014": { + Item: "CLA.014", + Severity: "L2", + Summary: "删除全表时建议使用TRUNCATE替代DELETE", + Content: `删除全表时建议使用TRUNCATE替代DELETE`, + Case: "delete from tbl", + Func: (*Query4Audit).RuleNoWhere, + }, + "CLA.015": { + Item: "CLA.015", + Severity: "L4", + Summary: "UPDATE未指定WHERE条件", + Content: `UPDATE不指定WHERE条件一般是致命的,请您三思后行`, + Case: "update tbl set col=1", + Func: (*Query4Audit).RuleNoWhere, + }, + "CLA.016": { + Item: "CLA.016", + Severity: "L2", + Summary: "不要UPDATE主键", + Content: `主键是数据表中记录的唯一标识符,不建议频繁更新主键列,这将影响元数据统计信息进而影响正常的查询。`, + Case: "update tbl set col=1", + Func: (*Query4Audit).RuleOK, // 该建议在indexAdvisor中给 + }, + "CLA.017": { + Item: "CLA.017", + Severity: "L2", + Summary: "不建议使用存储过程、视图、触发器、临时表等", + Content: `这些功能的使用在一定程度上会使得程序难以调试和拓展,更没有移植性,且会极大的增加出现BUG的概率。`, + Case: "CREATE VIEW v_today (today) AS SELECT CURRENT_DATE;", + Func: (*Query4Audit).RuleForbiddenSyntax, + }, + "COL.001": { + Item: "COL.001", + Severity: "L1", + Summary: "不建议使用SELECT * 类型查询", + Content: `当表结构变更时,使用*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。`, + Case: "select * from tbl where id=1", + Func: (*Query4Audit).RuleSelectStar, + }, + "COL.002": { + Item: "COL.002", + Severity: "L2", + Summary: "INSERT未指定列名", + Content: `当表结构发生变更,如果INSERT或REPLACE请求不明确指定列名,请求的结果将会与预想的不同; 建议使用“INSERT INTO tbl(col1,col2)VALUES ...”代替。`, + Case: "insert into tbl values(1,'name')", + Func: (*Query4Audit).RuleInsertColDef, + }, + "COL.003": { + Item: "COL.003", + Severity: "L2", + Summary: "建议修改自增ID为无符号类型", + Content: `建议修改自增ID为无符号类型`, + Case: "create table test(`id` int(11) NOT NULL AUTO_INCREMENT)", + Func: (*Query4Audit).RuleAutoIncUnsigned, + }, + "COL.004": { + Item: "COL.004", + Severity: "L1", + Summary: "请为列添加默认值", + Content: `请为列添加默认值,如果是ALTER操作,请不要忘记将原字段的默认值写上。字段无默认值,当表较大时无法在线变更表结构。`, + Case: "CREATE TABLE tbl (col int) ENGINE=InnoDB;", + Func: (*Query4Audit).RuleAddDefaultValue, + }, + "COL.005": { + Item: "COL.005", + Severity: "L1", + Summary: "列未添加注释", + Content: `建议对表中每个列添加注释,来明确每个列在表中的含义及作用。`, + Case: "CREATE TABLE tbl (col int) ENGINE=InnoDB;", + Func: (*Query4Audit).RuleColCommentCheck, + }, + "COL.006": { + Item: "COL.006", + Severity: "L3", + Summary: "表中包含有太多的列", + Content: `表中包含有太多的列`, + Case: "CREATE TABLE tbl ( cols ....);", + Func: (*Query4Audit).RuleTooManyFields, + }, + "COL.008": { + Item: "COL.008", + Severity: "L1", + Summary: "可使用VARCHAR代替CHAR,VARBINARY代替BINARY", + Content: `为首先变长字段存储空间小,可以节省存储空间。其次对于查询来说,在一个相对较小的字段内搜索效率显然要高些。`, + Case: "create table t1(id int,name char(20),last_time date)", + Func: (*Query4Audit).RuleVarcharVSChar, + }, + "COL.009": { + Item: "COL.009", + Severity: "L2", + Summary: "建议使用精确的数据类型", + Content: `实际上,任何使用FLOAT、REAL或DOUBLE PRECISION数据类型的设计都有可能是反模式。大多数应用程序使用的浮点数的取值范围并不需要达到IEEE 754标准所定义的最大/最小区间。在计算总量时,非精确浮点数所积累的影响是严重的。使用SQL中的NUMERIC或DECIMAL类型来代替FLOAT及其类似的数据类型进行固定精度的小数存储。这些数据类型精确地根据您定义这一列时指定的精度来存储数据。尽可能不要使用浮点数。`, + Case: "CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,hours float not null,PRIMARY KEY (p_id, a_id))", + Func: (*Query4Audit).RuleImpreciseDataType, + }, + "COL.010": { + Item: "COL.010", + Severity: "L2", + Summary: "不建议使用ENUM数据类型", + Content: `ENUM定义了列中值的类型,使用字符串表示ENUM里的值时,实际存储在列中的数据是这些值在定义时的序数。因此,这列的数据是字节对齐的,当您进行一次排序查询时,结果是按照实际存储的序数值排序的,而不是按字符串值的字母顺序排序的。这可能不是您所希望的。没有什么语法支持从ENUM或者check约束中添加或删除一个值;您只能使用一个新的集合重新定义这一列。如果您打算废弃一个选项,您可能会为历史数据而烦恼。作为一种策略,改变元数据——也就是说,改变表和列的定义——应该是不常见的,并且要注意测试和质量保证。有一个更好的解决方案来约束一列中的可选值:创建一张检查表,每一行包含一个允许在列中出现的候选值;然后在引用新表的旧表上声明一个外键约束。`, + Case: "create table tab1(status ENUM('new','in progress','fixed'))", + Func: (*Query4Audit).RuleValuesInDefinition, + }, + // 这个建议从sqlcheck迁移来的,实际生产环境每条建表SQL都会给这条建议,看多了会不开心。 + "COL.011": { + Item: "COL.011", + Severity: "L0", + Summary: "当需要唯一约束时才使用NULL,仅当列不能有缺失值时才使用NOT NULL", + Content: `NULL和0是不同的,10乘以NULL还是NULL。NULL和空字符串是不一样的。将一个字符串和标准SQL中的NULL联合起来的结果还是NULL。NULL和FALSE也是不同的。AND、OR和NOT这三个布尔操作如果涉及NULL,其结果也让很多人感到困惑。当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。使用NULL来表示任意类型不存在的空值。 当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。`, + Case: "select c1,c2,c3 from tbl where c4 is null or c4 <> 1", + Func: (*Query4Audit).RuleNullUsage, + }, + "COL.012": { + Item: "COL.012", + Severity: "L5", + Summary: "BLOB和TEXT类型的字段不可设置为NULL", + Content: `BLOB和TEXT类型的字段不可设置为NULL`, + Case: "CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`));", + Func: (*Query4Audit).RuleCantBeNull, + }, + "COL.013": { + Item: "COL.013", + Severity: "L4", + Summary: "TIMESTAMP类型未设置默认值", + Content: `TIMESTAMP类型未设置默认值`, + Case: "CREATE TABLE tbl( `id` bigint not null, `create_time` timestamp);", + Func: (*Query4Audit).RuleTimestampDefault, + }, + "COL.014": { + Item: "COL.014", + Severity: "L5", + Summary: "为列指定了字符集", + Content: `建议列与表使用同一个字符集,不要单独指定列的字符集。`, + Case: "CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL)", + Func: (*Query4Audit).RuleColumnWithCharset, + }, + "COL.015": { + Item: "COL.015", + Severity: "L4", + Summary: "BLOB类型的字段不可指定默认值", + Content: `BLOB类型的字段不可指定默认值`, + Case: "CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL DEFAULT '', PRIMARY KEY (`id`));", + Func: (*Query4Audit).RuleBlobDefaultValue, + }, + "COL.016": { + Item: "COL.016", + Severity: "L1", + Summary: "整型定义建议采用INT(10)或BIGINT(20)", + Content: `INT(M) 在 integer 数据类型中,M 表示最大显示宽度。 在 INT(M) 中,M 的值跟 INT(M) 所占多少存储空间并无任何关系。 INT(3)、INT(4)、INT(8) 在磁盘上都是占用 4 bytes 的存储空间。`, + Case: "CREATE TABLE tab (a INT(1));", + Func: (*Query4Audit).RuleIntPrecision, + }, + "COL.017": { + Item: "COL.017", + Severity: "L2", + Summary: "varchar定义长度过长", + Content: fmt.Sprintf(`varchar 是可变长字符串,不预先分配存储空间,长度不要超过%d,如果存储长度过长MySQL将定义字段类型为text,独立出来一张表,用主键来对应,避免影响其它字段索引效率。`, common.Config.MaxVarcharLength), + Case: "CREATE TABLE tab (a varchar(3500));", + Func: (*Query4Audit).RuleVarcharLength, + }, + "DIS.001": { + Item: "DIS.001", + Severity: "L1", + Summary: "消除不必要的DISTINCT条件", + Content: `太多DISTINCT条件是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少DISTINCT条件的数量。如果主键列是列的结果集的一部分,则DISTINCT条件可能没有影响。`, + Case: "SELECT DISTINCT c.c_id,count(DISTINCT c.c_name),count(DISTINCT c.c_e),count(DISTINCT c.c_n),count(DISTINCT c.c_me),c.c_d FROM (select distinct xing, name from B) as e WHERE e.country_id = c.country_id", + Func: (*Query4Audit).RuleDistinctUsage, + }, + "DIS.002": { + Item: "DIS.002", + Severity: "L3", + Summary: "COUNT(DISTINCT)多列时结果可能和你预想的不同", + Content: `COUNT(DISTINCT col)计算该列除NULL之外的不重复行数,注意COUNT(DISTINCT col, col2)如果其中一列全为NULL那么即使另一列有不同的值,也返回0。`, + Case: "SELECT COUNT(DISTINCT col, col2) FROM tbl;", + Func: (*Query4Audit).RuleCountDistinctMultiCol, + }, + // DIS.003灵感来源于如下链接 + // http://www.ijstr.org/final-print/oct2015/Query-Optimization-Techniques-Tips-For-Writing-Efficient-And-Faster-Sql-Queries.pdf + "DIS.003": { + Item: "DIS.003", + Severity: "L3", + Summary: "DISTINCT *对有主键的表没有意义", + Content: `当表已经有主键时,对所有列进行DISTINCT的输出结果与不进行DISTINCT操作的结果相同,请不要画蛇添足。`, + Case: "SELECT DISTINCT * FROM film;", + Func: (*Query4Audit).RuleDistinctStar, + }, + "FUN.001": { + Item: "FUN.001", + Severity: "L2", + Summary: "避免在WHERE条件中使用函数或其他运算符", + Content: `虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。`, + Case: "select id from t where substring(name,1,3)='abc'", + Func: (*Query4Audit).RuleCompareWithFunction, + }, + "FUN.002": { + Item: "FUN.002", + Severity: "L1", + Summary: "指定了WHERE条件或非MyISAM引擎时使用COUNT(*)操作性能不佳", + Content: `COUNT(*)的作用是统计表行数,COUNT(COL)的作用是统计指定列非NULL的行数。MyISAM表对于COUNT(*)统计全表行数进行了特殊的优化,通常情况下非常快。但对于非MyISAM表或指定了某些WHERE条件,COUNT(*)操作需要扫描大量的行才能获取精确的结果,性能也因此不佳。有时候某些业务场景并不需要完全精确的COUNT值,此时可以用近似值来代替。EXPLAIN出来的优化器估算的行数就是一个不错的近似值,执行EXPLAIN并不需要真正去执行查询,所以成本很低。`, + Case: "SELECT c3, COUNT(*) AS accounts FROM tab where c2 < 10000 GROUP BY c3 ORDER BY num", + Func: (*Query4Audit).RuleCountStar, + }, + "FUN.003": { + Item: "FUN.003", + Severity: "L3", + Summary: "使用了合并为可空列的字符串连接", + Content: `在一些查询请求中,您需要强制让某一列或者某个表达式返回非NULL的值,从而让查询逻辑变得更简单,担忧不想将这个值存下来。使用COALESCE()函数来构造连接的表达式,这样即使是空值列也不会使整表达式变为NULL。`, + Case: "select c1 || coalesce(' ' || c2 || ' ', ' ') || c3 as c from tbl", + Func: (*Query4Audit).RuleStringConcatenation, + }, + "FUN.004": { + Item: "FUN.004", + Severity: "L4", + Summary: "不建议使用SYSDATE()函数", + Content: `SYSDATE()函数可能导致主从数据不一致,请使用NOW()函数替代SYSDATE()。`, + Case: "SELECT SYSDATE();", + Func: (*Query4Audit).RuleSysdate, + }, + "FUN.005": { + Item: "FUN.005", + Severity: "L1", + Summary: "不建议使用COUNT(col)或COUNT(常量)", + Content: `不要使用COUNT(col)或COUNT(常量)来替代COUNT(*),COUNT(*)是SQL92定义的标准统计行数的方法,跟数据无关,跟NULL和非NULL也无关。`, + Case: "SELECT COUNT(1) FROM tbl;", + Func: (*Query4Audit).RuleCountConst, + }, + "FUN.006": { + Item: "FUN.006", + Severity: "L1", + Summary: "使用SUM(COL)时需注意NPE问题", + Content: `当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题。可以使用如下方式来避免SUM的NPE问题: SELECT IF(ISNULL(SUM(COL)), 0, SUM(COL)) FROM tbl`, + Case: "SELECT SUM(COL) FROM tbl;", + Func: (*Query4Audit).RuleSumNPE, + }, + "GRP.001": { + Item: "GRP.001", + Severity: "L2", + Summary: "不建议对等值查询列使用GROUP BY", + Content: `GROUP BY中的列在前面的WHERE条件中使用了等值查询,对这样的列进行GROUP BY意义不大。`, + Case: "select film_id, title from film where release_year='2006' group by release_year", + Func: (*Query4Audit).RuleOK, // 该建议在indexAdvisor中给 + }, + "JOI.001": { + Item: "JOI.001", + Severity: "L2", + Summary: "JOIN语句混用逗号和ANSI模式", + Content: `表连接的时候混用逗号和ANSI JOIN不便于人类理解,并且MySQL不同版本的表连接行为和优先级均有所不同,当MySQL版本变化后可能会引入错误。`, + Case: "select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1,t1.c3=t3,c1 where id>1000", + Func: (*Query4Audit).RuleCommaAnsiJoin, + }, + "JOI.002": { + Item: "JOI.002", + Severity: "L4", + Summary: "同一张表被连接两次", + Content: `相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。`, + Case: "select tb1.col from (tb1, tb2) join tb2 on tb1.id=tb.id where tb1.id=1", + Func: (*Query4Audit).RuleDupJoin, + }, + "JOI.003": { + Item: "JOI.003", + Severity: "L4", + Summary: "OUTER JOIN失效", + Content: `由于WHERE条件错误使得OUTER JOIN的外部表无数据返回,这会将查询隐式转换为 INNER JOIN 。如:select c from L left join R using(c) where L.a=5 and R.b=10。这种SQL逻辑上可能存在错误或程序员对OUTER JOIN如何工作存在误解,因为LEFT/RIGHT JOIN是LEFT/RIGHT OUTER JOIN的缩写。`, + Case: "select c1,c2,c3 from t1 left outer join t2 using(c1) where t1.c2=2 and t2.c3=4", + Func: (*Query4Audit).RuleOK, // TODO + }, + "JOI.004": { + Item: "JOI.004", + Severity: "L4", + Summary: "不建议使用排它JOIN", + Content: `只在右侧表为NULL的带WHERE子句的LEFT OUTER JOIN语句,有可能是在WHERE子句中使用错误的列,如:“... FROM l LEFT OUTER JOIN r ON l.l = r.r WHERE r.z IS NULL”,这个查询正确的逻辑可能是 WHERE r.r IS NULL。`, + Case: "select c1,c2,c3 from t1 left outer join t2 on t1.c1=t2.c1 where t2.c2 is null", + Func: (*Query4Audit).RuleOK, // TODO + }, + "JOI.005": { + Item: "JOI.005", + Severity: "L2", + Summary: "减少JOIN的数量", + Content: `太多的JOIN是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少JOIN的数量。`, + Case: "select bp1.p_id, b1.d_d as l, b1.b_id from b1 join bp1 on (b1.b_id = bp1.b_id) left outer join (b1 as b2 join bp2 on (b2.b_id = bp2.b_id)) on (bp1.p_id = bp2.p_id ) join bp21 on (b1.b_id = bp1.b_id) join bp31 on (b1.b_id = bp1.b_id) join bp41 on (b1.b_id = bp1.b_id) where b2.b_id = 0", + Func: (*Query4Audit).RuleReduceNumberOfJoin, + }, + "JOI.006": { + Item: "JOI.006", + Severity: "L4", + Summary: "将嵌套查询重写为JOIN通常会导致更高效的执行和更有效的优化", + Content: `一般来说,非嵌套子查询总是用于关联子查询,最多是来自FROM子句中的一个表,这些子查询用于ANY、ALL和EXISTS的谓词。如果可以根据查询语义决定子查询最多返回一个行,那么一个不相关的子查询或来自FROM子句中的多个表的子查询就被压平了。`, + Case: "SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 )", + Func: (*Query4Audit).RuleNestedSubQueries, + }, + "JOI.007": { + Item: "JOI.007", + Severity: "L4", + Summary: "不建议使用联表更新", + Content: `当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。`, + Case: "UPDATE users u LEFT JOIN hobby h ON u.id = h.uid SET u.name = 'pianoboy' WHERE h.hobby = 'piano';", + Func: (*Query4Audit).RuleMultiDeleteUpdate, + }, + "JOI.008": { + Item: "JOI.008", + Severity: "L4", + Summary: "不要使用跨DB的Join查询", + Content: `一般来说,跨DB的Join查询意味着查询语句跨越了两个不同的子系统,这可能意味着系统耦合度过高或库表结构设计不合理。`, + Case: "SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 )", + Func: (*Query4Audit).RuleMultiDBJoin, + }, + // TODO: 跨库事务的检查,目前SOAR未对事务做处理 + "KEY.001": { + Item: "KEY.001", + Severity: "L2", + Summary: "建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列", + Content: `建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列`, + Case: "create table test(`id` int(11) NOT NULL PRIMARY KEY (`id`))", + Func: (*Query4Audit).RulePKNotInt, + }, + "KEY.002": { + Item: "KEY.002", + Severity: "L4", + Summary: "无主键或唯一键,无法在线变更表结构", + Content: `无主键或唯一键,无法在线变更表结构`, + Case: "create table test(col varchar(5000))", + Func: (*Query4Audit).RuleNoOSCKey, + }, + "KEY.003": { + Item: "KEY.003", + Severity: "L4", + Summary: "避免外键等递归关系", + Content: `存在递归关系的数据很常见,数据常会像树或者以层级方式组织。然而,创建一个外键约束来强制执行同一表中两列之间的关系,会导致笨拙的查询。树的每一层对应着另一个连接。您将需要发出递归查询,以获得节点的所有后代或所有祖先。解决方案是构造一个附加的闭包表。它记录了树中所有节点间的关系,而不仅仅是那些具有直接的父子关系。您也可以比较不同层次的数据设计:闭包表,路径枚举,嵌套集。然后根据应用程序的需要选择一个。`, + Case: "CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,PRIMARY KEY (p_id, a_id),FOREIGN KEY (p_id) REFERENCES tab1(p_id),FOREIGN KEY (a_id) REFERENCES tab3(a_id))", + Func: (*Query4Audit).RuleRecursiveDependency, + }, + // TODO: 新增复合索引,字段按散粒度是否由大到小排序,区分度最高的在最左边 + "KEY.004": { + Item: "KEY.004", + Severity: "L0", + Summary: "提醒:请将索引属性顺序与查询对齐", + Content: `如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。`, + Case: "create index idx1 on tbl (last_name,first_name)", + Func: (*Query4Audit).RuleIndexAttributeOrder, + }, + "KEY.005": { + Item: "KEY.005", + Severity: "L2", + Summary: "表建的索引过多", + Content: `表建的索引过多`, + Case: "CREATE TABLE tbl ( a int, b int, c int, KEY idx_a (`a`),KEY idx_b(`b`),KEY idx_c(`c`));", + Func: (*Query4Audit).RuleTooManyKeys, + }, + "KEY.006": { + Item: "KEY.006", + Severity: "L4", + Summary: "主键中的列过多", + Content: `主键中的列过多`, + Case: "CREATE TABLE tbl ( a int, b int, c int, PRIMARY KEY(`a`,`b`,`c`));", + Func: (*Query4Audit).RuleTooManyKeyParts, + }, + "KEY.007": { + Item: "KEY.007", + Severity: "L4", + Summary: "未指定主键或主键非int或bigint", + Content: `未指定主键或主键非int或bigint,建议将主键设置为int unsigned或bigint unsigned。`, + Case: "CREATE TABLE tbl (a int);", + Func: (*Query4Audit).RulePKNotInt, + }, + "KEY.008": { + Item: "KEY.008", + Severity: "L4", + Summary: "ORDER BY多个列但排序方向不同时可能无法使用索引", + Content: `在MySQL 8.0之前当ORDER BY多个列指定的排序方向不同时将无法使用已经建立的索引。`, + Case: "SELECT * FROM tbl ORDER BY a DESC, b ASC;", + Func: (*Query4Audit).RuleOrderByMultiDirection, + }, + "KEY.009": { + Item: "KEY.009", + Severity: "L0", + Summary: "添加唯一索引前请注意检查数据唯一性", + Content: `请提前检查添加唯一索引列的数据唯一性,如果数据不唯一在线表结构调整时将有可能自动将重复列删除,这有可能导致数据丢失。`, + Case: "CREATE UNIQUE INDEX part_of_name ON customer (name(10));", + Func: (*Query4Audit).RuleUniqueKeyDup, + }, + "KWR.001": { + Item: "KWR.001", + Severity: "L2", + Summary: "SQL_CALC_FOUND_ROWS效率低下", + Content: `因为SQL_CALC_FOUND_ROWS不能很好地扩展,所以可能导致性能问题; 建议业务使用其他策略来替代SQL_CALC_FOUND_ROWS提供的计数功能,比如:分页结果展示等。`, + Case: "select SQL_CALC_FOUND_ROWS col from tbl where id>1000", + Func: (*Query4Audit).RuleSQLCalcFoundRows, + }, + "KWR.002": { + Item: "KWR.002", + Severity: "L2", + Summary: "不建议使用MySQL关键字做列名或表名", + Content: `当使用关键字做为列名或表名时程序需要对列名和表名进行转义,如果疏忽被将导致请求无法执行。`, + Case: "CREATE TABLE tbl ( `select` int )", + Func: (*Query4Audit).RuleUseKeyWord, + }, + "KWR.003": { + Item: "KWR.003", + Severity: "L1", + Summary: "不建议使用复数做列名或表名", + Content: `表名应该仅仅表示表里面的实体内容,不应该表示实体数量,对应于 DO 类名也是单数形式,符合表达习惯。`, + Case: "CREATE TABLE tbl ( `books` int )", + Func: (*Query4Audit).RulePluralWord, + }, + "LCK.001": { + Item: "LCK.001", + Severity: "L3", + Summary: "INSERT INTO xx SELECT加锁粒度较大请谨慎", + Content: `INSERT INTO xx SELECT加锁粒度较大请谨慎`, + Case: "INSERT INTO tbl SELECT * FROM tbl2;", + Func: (*Query4Audit).RuleInsertSelect, + }, + "LCK.002": { + Item: "LCK.002", + Severity: "L3", + Summary: "请慎用INSERT ON DUPLICATE KEY UPDATE", + Content: `当主键为自增键时使用INSERT ON DUPLICATE KEY UPDATE可能会导致主键出现大量不连续快速增长,导致主键快速溢出无法继续写入。极端情况下还有可能导致主从数据不一致。`, + Case: "INSERT INTO t1(a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1;", + Func: (*Query4Audit).RuleInsertOnDup, + }, + "LIT.001": { + Item: "LIT.001", + Severity: "L2", + Summary: "用字符类型存储IP地址", + Content: `字符串字面上看起来像IP地址,但不是INET_ATON()的参数,表示数据被存储为字符而不是整数。将IP地址存储为整数更为有效。`, + Case: "insert into tbl (IP,name) values('10.20.306.122','test')", + Func: (*Query4Audit).RuleIPString, + }, + "LIT.002": { + Item: "LIT.002", + Severity: "L4", + Summary: "日期/时间未使用引号括起", + Content: `诸如“WHERE col <2010-02-12”之类的查询是有效的SQL,但可能是一个错误,因为它将被解释为“WHERE col <1996”; 日期/时间文字应该加引号。`, + Case: "select col1,col2 from tbl where time < 2018-01-10", + Func: (*Query4Audit).RuleDataNotQuote, + }, + "LIT.003": { + Item: "LIT.003", + Severity: "L3", + Summary: "一列中存储一系列相关数据的集合", + Content: `将ID存储为一个列表,作为VARCHAR/TEXT列,这样能导致性能和数据完整性问题。查询这样的列需要使用模式匹配的表达式。使用逗号分隔的列表来做多表联结查询定位一行数据是极不优雅和耗时的。这将使验证ID更加困难。考虑一下,列表最多支持存放多少数据呢?将ID存储在一张单独的表中,代替使用多值属性,从而每个单独的属性值都可以占据一行。这样交叉表实现了两张表之间的多对多关系。这将更好地简化查询,也更有效地验证ID。`, + Case: "select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]'", + Func: (*Query4Audit).RuleMultiValueAttribute, + }, + "LIT.004": { + Item: "LIT.004", + Severity: "L1", + Summary: "请使用分号或已设定的DELIMITER结尾", + Content: `USE database, SHOW DATABASES等命令也需要使用使用分号或已设定的DELIMITER结尾。`, + Case: "USE db", + Func: (*Query4Audit).RuleOK, // TODO: RuleAddDelimiter + }, + "RES.001": { + Item: "RES.001", + Severity: "L4", + Summary: "非确定性的GROUP BY", + Content: `SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。`, + Case: "select c1,c2,c3 from t1 where c2='foo' group by c2", + Func: (*Query4Audit).RuleNoDeterministicGroupby, + }, + "RES.002": { + Item: "RES.002", + Severity: "L4", + Summary: "未使用ORDER BY的LIMIT查询", + Content: `没有ORDER BY的LIMIT会导致非确定性的结果,这取决于查询执行计划。`, + Case: "select col1,col2 from tbl where name=xx limit 10", + Func: (*Query4Audit).RuleNoDeterministicLimit, + }, + "RES.003": { + Item: "RES.003", + Severity: "L4", + Summary: "UPDATE/DELETE操作使用了LIMIT条件", + Content: `UPDATE/DELETE操作使用LIMIT条件和不添加WHERE条件一样危险,它可将会导致主从数据不一致或从库同步中断。`, + Case: "UPDATE film SET length = 120 WHERE title = 'abc' LIMIT 1;", + Func: (*Query4Audit).RuleUpdateDeleteWithLimit, + }, + "RES.004": { + Item: "RES.004", + Severity: "L4", + Summary: "UPDATE/DELETE操作指定了ORDER BY条件", + Content: `UPDATE/DELETE操作不要指定ORDER BY条件。`, + Case: "UPDATE film SET length = 120 WHERE title = 'abc' ORDER BY title", + Func: (*Query4Audit).RuleUpdateDeleteWithOrderby, + }, + "RES.005": { + Item: "RES.005", + Severity: "L4", + Summary: "UPDATE可能存在逻辑错误,导致数据损坏", + Content: "", + Case: "update tbl set col = 1 and cl = 2 where col=3;", + Func: (*Query4Audit).RuleUpdateSetAnd, + }, + "RES.006": { + Item: "RES.006", + Severity: "L4", + Summary: "永远不真的比较条件", + Content: "查询条件永远非真,这将导致查询无匹配到的结果。", + Case: "select * from tbl where 1 != 1;", + Func: (*Query4Audit).RuleImpossibleWhere, + }, + "RES.007": { + Item: "RES.007", + Severity: "L4", + Summary: "永远为真的比较条件", + Content: "查询条件永远为真,这将导致WHERE条件失效进行全表查询。", + Case: "select * from tbl where 1 = 1;", + Func: (*Query4Audit).RuleMeaninglessWhere, + }, + "RES.008": { + Item: "RES.008", + Severity: "L2", + Summary: "不建议使用LOAD DATA/SELECT ... INTO OUTFILE", + Content: "SELECT INTO OUTFILE需要授予FILE权限,这通过会引入安全问题。LOAD DATA虽然可以提高数据导入速度,但同时也可能导致从库同步延迟过大。", + Case: "LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table;", + Func: (*Query4Audit).RuleLoadFile, + }, + "SEC.001": { + Item: "SEC.001", + Severity: "L0", + Summary: "请谨慎使用TRUNCATE操作", + Content: `一般来说想清空一张表最快速的做法就是使用TRUNCATE TABLE tbl_name;语句。但TRUNCATE操作也并非是毫无代价的,TRUNCATE TABLE无法返回被删除的准确行数,如果需要返回被删除的行数建议使用DELETE语法。TRUNCATE操作还会重置AUTO_INCREMENT,如果不想重置该值建议使用DELETE FROM tbl_name WHERE 1;替代。TRUNCATE操作会对数据字典添加源数据锁(MDL),当一次需要TRUNCATE很多表时会影响整个实例的所有请求,因此如果要TRUNCATE多个表建议用DROP+CREATE的方式以减少锁时长。`, + Case: "TRUNCATE TABLE tbl_name", + Func: (*Query4Audit).RuleTruncateTable, + }, + "SEC.002": { + Item: "SEC.002", + Severity: "L0", + Summary: "不使用明文存储密码", + Content: `使用明文存储密码或者使用明文在网络上传递密码都是不安全的。如果攻击者能够截获您用来插入密码的SQL语句,他们就能直接读到密码。另外,将用户输入的字符串以明文的形式插入到纯SQL语句中,也会让攻击者发现它。如果您能够读取密码,黑客也可以。解决方案是使用单向哈希函数对原始密码进行加密编码。哈希是指将输入字符串转化成另一个新的、不可识别的字符串的函数。对密码加密表达式加点随机串来防御“字典攻击”。不要将明文密码输入到SQL查询语句中。在应用程序代码中计算哈希串,只在SQL查询中使用哈希串。`, + Case: "create table test(id int,name varchar(20) not null,password varchar(200)not null)", + Func: (*Query4Audit).RuleReadablePasswords, + }, + "SEC.003": { + Item: "SEC.003", + Severity: "L0", + Summary: "使用DELETE/DROP/TRUNCATE等操作时注意备份", + Content: `在执行高危操作之前对数据进行备份是十分有必要的。`, + Case: "delete from table where col = 'condition'", + Func: (*Query4Audit).RuleDataDrop, + }, + "STA.001": { + Item: "STA.001", + Severity: "L0", + Summary: "'!=' 运算符是非标准的", + Content: `"<>"才是标准SQL中的不等于运算符。`, + Case: "select col1,col2 from tbl where type!=0", + Func: (*Query4Audit).RuleStandardINEQ, + }, + "STA.002": { + Item: "STA.002", + Severity: "L1", + Summary: "库名或表名点后建议不要加空格", + Content: `当使用db.table或table.column格式访问表或字段时,请不要在点号后面添加空格,虽然这样语法正确。`, + Case: "select col from sakila. film", + Func: (*Query4Audit).RuleSpaceAfterDot, + }, + "STA.003": { + Item: "STA.003", + Severity: "L1", + Summary: "索引起名不规范", + Content: `建议普通二级索引以idx_为前缀,唯一索引以uk_为前缀。`, + Case: "select col from now where type!=0", + Func: (*Query4Audit).RuleIdxPrefix, + }, + "STA.004": { + Item: "STA.004", + Severity: "L1", + Summary: "起名时请不要使用字母、数字和下划线之外的字符", + Content: `以字母或下划线开头,名字只允许使用字母、数字和下划线。请统一大小写,不要使用驼峰命名法。不要在名字中出现连续下划线'__',这样很难辨认。`, + Case: "CREATE TABLE ` abc` (a int);", + Func: (*Query4Audit).RuleStandardName, + }, + "SUB.001": { + Item: "SUB.001", + Severity: "L4", + Summary: "MySQL对子查询的优化效果不佳", + Content: `MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。`, + Case: "select col1,col2,col3 from table1 where col2 in(select col from table2)", + Func: (*Query4Audit).RuleInSubquery, + }, + "SUB.002": { + Item: "SUB.002", + Severity: "L2", + Summary: "如果您不在乎重复的话,建议使用UNION ALL替代UNION", + Content: `与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。`, + Case: "select teacher_id as id,people_name as name from t1,t2 where t1.teacher_id=t2.people_id union select student_id as id,people_name as name from t1,t2 where t1.student_id=t2.people_id", + Func: (*Query4Audit).RuleUNIONUsage, + }, + "SUB.003": { + Item: "SUB.003", + Severity: "L3", + Summary: "考虑使用EXISTS而不是DISTINCT子查询", + Content: `DISTINCT关键字在对元组排序后删除重复。相反,考虑使用一个带有EXISTS关键字的子查询,您可以避免返回整个表。`, + Case: "SELECT DISTINCT c.c_id, c.c_name FROM c,e WHERE e.c_id = c.c_id", + Func: (*Query4Audit).RuleDistinctJoinUsage, + }, + // TODO: 5.6有了semi join还要把in转成exists么? + // Use EXISTS instead of IN to check existence of data. + // http://www.winwire.com/25-tips-to-improve-sql-query-performance/ + "SUB.004": { + Item: "SUB.004", + Severity: "L3", + Summary: "执行计划中嵌套连接深度过深", + Content: `MySQL对子查询的优化效果不佳,MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。`, + Case: "SELECT * from tb where id in (select id from (select id from tb))", + Func: (*Query4Audit).RuleSubqueryDepth, + }, + // SUB.005灵感来自 https://blog.csdn.net/zhuocr/article/details/61192418 + "SUB.005": { + Item: "SUB.005", + Severity: "L8", + Summary: "子查询不支持LIMIT", + Content: `当前MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'。`, + Case: "SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1)", + Func: (*Query4Audit).RuleSubQueryLimit, + }, + "SUB.006": { + Item: "SUB.006", + Severity: "L2", + Summary: "不建议在子查询中使用函数", + Content: `MySQL将外部查询中的每一行作为依赖子查询执行子查询,如果在子查询中使用函数,即使是semi-join也很难进行高效的查询。可以将子查询重写为OUTER JOIN语句并用连接条件对数据进行过滤。`, + Case: "SELECT * FROM staff WHERE name IN (SELECT max(NAME) FROM customer)", + Func: (*Query4Audit).RuleSubQueryFunctions, + }, + "TBL.001": { + Item: "TBL.001", + Severity: "L4", + Summary: "不建议使用分区表", + Content: `不建议使用分区表`, + Case: "CREATE TABLE trb3(id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE(YEAR(purchased)) (PARTITION p0 VALUES LESS THAN (1990), PARTITION p1 VALUES LESS THAN (1995), PARTITION p2 VALUES LESS THAN (2000), PARTITION p3 VALUES LESS THAN (2005) );", + Func: (*Query4Audit).RulePartitionNotAllowed, + }, + "TBL.002": { + Item: "TBL.002", + Severity: "L4", + Summary: "请为表选择合适的存储引擎", + Content: `建表或修改表的存储引擎时建议使用推荐的存储引擎,如:` + strings.Join(common.Config.TableAllowEngines, ","), + Case: "create table test(`id` int(11) NOT NULL AUTO_INCREMENT)", + Func: (*Query4Audit).RuleAllowEngine, + }, + "TBL.003": { + Item: "TBL.003", + Severity: "L8", + Summary: "以DUAL命名的表在数据库中有特殊含义", + Content: `DUAL表为虚拟表,不需要创建即可使用,也不建议服务以DUAL命名表。`, + Case: "create table dual(id int, primary key (id));", + Func: (*Query4Audit).RuleCreateDualTable, + }, + "TBL.004": { + Item: "TBL.004", + Severity: "L2", + Summary: "表的初始AUTO_INCREMENT值不为0", + Content: `AUTO_INCREMENT不为0会导致数据空洞。`, + Case: "CREATE TABLE tbl (a int) AUTO_INCREMENT = 10;", + Func: (*Query4Audit).RuleAutoIncrementInitNotZero, + }, + "TBL.005": { + Item: "TBL.005", + Severity: "L4", + Summary: "请使用推荐的字符集", + Content: `表字符集只允许设置为` + strings.Join(common.Config.TableAllowCharsets, ","), + Case: "CREATE TABLE tbl (a int) DEFAULT CHARSET = latin1;", + Func: (*Query4Audit).RuleTableCharsetCheck, + }, + } +} + +// IsIgnoreRule 判断是否是过滤规则 +// 支持XXX*前缀匹配,OK规则不可设置过滤 +func IsIgnoreRule(item string) bool { + + for _, ir := range common.Config.IgnoreRules { + ir = strings.Trim(ir, "*") + if strings.HasPrefix(item, ir) && ir != "OK" && ir != "" { + common.Log.Debug("IsIgnoreRule: %s", item) + return true + } + } + return false +} + +// InBlackList 判断一条请求是否在黑名单列表中 +// 如果在返回true,表示不需要评审 +// 注意这里没有做指纹判断,是否用指纹在这个函数的外面处理 +func InBlackList(sql string) bool { + in := false + for _, r := range common.BlackList { + if sql == r { + in = true + break + } + re, err := regexp.Compile("(?i)" + r) + if err == nil { + if re.FindString(sql) != "" { + common.Log.Debug("InBlackList: true, regexp: %s, sql: %s", "(?i)"+r, sql) + in = true + break + } + common.Log.Debug("InBlackList: false, regexp: %s, sql: %s", "(?i)"+r, sql) + } + } + return in +} + +// FormatSuggest 格式化输出优化建议 +// 目前支持:json, text两种形式,其他形式会给结构体的pretty.Println +func FormatSuggest(sql string, format string, suggests ...map[string]Rule) (map[string]Rule, string) { + var fingerprint, id string + var buf []string + var score = 100 + type Result struct { + ID string + Fingerprint string + Sample string + Suggest map[string]Rule + } + + // 生成指纹和ID + if sql != "" { + fingerprint = query.Fingerprint(sql) + id = query.Id(fingerprint) + } + + // 合并重复的建议 + suggest := make(map[string]Rule) + for _, s := range suggests { + for item, rule := range s { + suggest[item] = rule + } + } + suggest = MergeConflictHeuristicRules(suggest) + + // 是否忽略显示OK建议,测试的时候大家都喜欢看OK,线上跑起来的时候OK太多反而容易看花眼 + ignoreOK := false + for _, r := range common.Config.IgnoreRules { + if "OK" == r { + ignoreOK = true + } + } + + // 先保证suggest中有元素,然后再根据ignore配置删除不需要的项 + if len(suggest) < 1 { + suggest = map[string]Rule{"OK": HeuristicRules["OK"]} + } + if ignoreOK || len(suggest) > 1 { + delete(suggest, "OK") + } + for k := range suggest { + if IsIgnoreRule(k) { + delete(suggest, k) + } + } + + switch format { + case "json": + js, err := json.MarshalIndent(Result{ + ID: id, + Fingerprint: fingerprint, + Sample: sql, + Suggest: suggest, + }, "", " ") + if err == nil { + buf = append(buf, fmt.Sprintln(string(js))) + } else { + common.Log.Error("FormatSuggest json.Marshal Error: %v", err) + } + + case "text": + for item, rule := range suggest { + buf = append(buf, fmt.Sprintln("Query: ", sql)) + buf = append(buf, fmt.Sprintln("ID: ", id)) + buf = append(buf, fmt.Sprintln("Item: ", item)) + buf = append(buf, fmt.Sprintln("Severity: ", rule.Severity)) + buf = append(buf, fmt.Sprintln("Summary: ", rule.Summary)) + buf = append(buf, fmt.Sprintln("Content: ", rule.Content)) + } + case "lint": + for item, rule := range suggest { + // lint 中无需关注 OK 和 EXP + if item != "OK" && !strings.HasPrefix(item, "EXP") { + buf = append(buf, fmt.Sprintf("%s %s", item, rule.Summary)) + } + } + + case "markdown", "html", "explain-digest", "duplicate-key-checker": + if sql != "" && len(suggest) > 0 { + switch common.Config.ExplainSQLReportType { + case "fingerprint": + buf = append(buf, fmt.Sprintf("# Query: %s\n", id)) + buf = append(buf, fmt.Sprintf("```sql\n%s\n```\n", fingerprint)) + case "sample": + buf = append(buf, fmt.Sprintf("# Query: %s\n", id)) + buf = append(buf, fmt.Sprintf("```sql\n%s\n```\n", sql)) + default: + buf = append(buf, fmt.Sprintf("# Query: %s\n", id)) + buf = append(buf, fmt.Sprintf("```sql\n%s\n```\n", ast.Pretty(sql, format))) + } + } + // MySQL + var sortedMySQLSuggest []string + for item := range suggest { + if strings.HasPrefix(item, "ERR") { + if suggest[item].Content == "" { + delete(suggest, item) + } else { + sortedMySQLSuggest = append(sortedMySQLSuggest, item) + } + } + } + sort.Strings(sortedMySQLSuggest) + if len(sortedMySQLSuggest) > 0 { + buf = append(buf, "## MySQL执行出错\n") + } + for _, item := range sortedMySQLSuggest { + buf = append(buf, fmt.Sprintln(suggest[item].Content)) + score = 0 + delete(suggest, item) + } + + // Explain + if suggest["EXP.000"].Item != "" { + buf = append(buf, fmt.Sprintln("## ", suggest["EXP.000"].Summary)) + buf = append(buf, fmt.Sprintln(suggest["EXP.000"].Content)) + buf = append(buf, fmt.Sprint(suggest["EXP.000"].Case, "\n")) + delete(suggest, "EXP.000") + } + var sortedExplainSuggest []string + for item := range suggest { + if strings.HasPrefix(item, "EXP") { + sortedExplainSuggest = append(sortedExplainSuggest, item) + } + } + sort.Strings(sortedExplainSuggest) + for _, item := range sortedExplainSuggest { + buf = append(buf, fmt.Sprintln("### ", suggest[item].Summary)) + buf = append(buf, fmt.Sprintln(suggest[item].Content)) + buf = append(buf, fmt.Sprint(suggest[item].Case, "\n")) + } + + // Profiling + var sortedProfilingSuggest []string + for item := range suggest { + if strings.HasPrefix(item, "PRO") { + sortedProfilingSuggest = append(sortedProfilingSuggest, item) + } + } + sort.Strings(sortedProfilingSuggest) + if len(sortedProfilingSuggest) > 0 { + buf = append(buf, "## Profiling信息\n") + } + for _, item := range sortedProfilingSuggest { + buf = append(buf, fmt.Sprintln(suggest[item].Content)) + delete(suggest, item) + } + + // Trace + var sortedTraceSuggest []string + for item := range suggest { + if strings.HasPrefix(item, "TRA") { + sortedTraceSuggest = append(sortedTraceSuggest, item) + } + } + sort.Strings(sortedTraceSuggest) + if len(sortedTraceSuggest) > 0 { + buf = append(buf, "## Trace信息\n") + } + for _, item := range sortedTraceSuggest { + buf = append(buf, fmt.Sprintln(suggest[item].Content)) + delete(suggest, item) + } + + // Index + var sortedIdxSuggest []string + for item := range suggest { + if strings.HasPrefix(item, "IDX") { + sortedIdxSuggest = append(sortedIdxSuggest, item) + } + } + sort.Strings(sortedIdxSuggest) + for _, item := range sortedIdxSuggest { + buf = append(buf, fmt.Sprintln("## ", common.MarkdownEscape(suggest[item].Summary))) + buf = append(buf, fmt.Sprintln("* **Item:** ", item)) + buf = append(buf, fmt.Sprintln("* **Severity:** ", suggest[item].Severity)) + minus, err := strconv.Atoi(strings.Trim(suggest[item].Severity, "L")) + if err == nil { + score = score - minus*5 + } else { + common.Log.Debug("FormatSuggest, sortedIdxSuggest, strconv.Atoi, Error: ", err) + score = 0 + } + buf = append(buf, fmt.Sprintln("* **Content:** ", common.MarkdownEscape(suggest[item].Content))) + + if format == "duplicate-key-checker" { + buf = append(buf, fmt.Sprintf("* **原建表语句:** \n```sql\n%s\n```\n", suggest[item].Case), "\n\n") + } else { + buf = append(buf, fmt.Sprint("* **Case:** ", common.MarkdownEscape(suggest[item].Case), "\n\n")) + } + } + + // Heuristic + var sortedHeuristicSuggest []string + for item := range suggest { + if !strings.HasPrefix(item, "EXP") && + !strings.HasPrefix(item, "IDX") && + !strings.HasPrefix(item, "PRO") { + sortedHeuristicSuggest = append(sortedHeuristicSuggest, item) + } + } + sort.Strings(sortedHeuristicSuggest) + for _, item := range sortedHeuristicSuggest { + buf = append(buf, fmt.Sprintln("## ", suggest[item].Summary)) + if item == "OK" { + continue + } + buf = append(buf, fmt.Sprintln("* **Item:** ", item)) + buf = append(buf, fmt.Sprintln("* **Severity:** ", suggest[item].Severity)) + minus, err := strconv.Atoi(strings.Trim(suggest[item].Severity, "L")) + if err == nil { + score = score - minus*5 + } else { + common.Log.Debug("FormatSuggest, sortedHeuristicSuggest, strconv.Atoi, Error: ", err) + score = 0 + } + buf = append(buf, fmt.Sprintln("* **Content:** ", common.MarkdownEscape(suggest[item].Content))) + // buf = append(buf, fmt.Sprint("* **Case:** ", common.MarkdownEscape(suggest[item].Case), "\n\n")) + } + + default: + buf = append(buf, fmt.Sprintln("Query: ", sql)) + for _, rule := range suggest { + buf = append(buf, pretty.Sprint(rule)) + } + } + + // 打分 + var str string + switch common.Config.ReportType { + case "explain-digest", "lint": + str = strings.Join(buf, "\n") + default: + if len(buf) > 1 { + str = buf[0] + "\n" + common.Score(score) + "\n\n" + strings.Join(buf[1:], "\n") + } + } + + return suggest, str +} + +// ListHeuristicRules 打印支持的启发式规则,对应命令行参数-list-heuristic-rules +func ListHeuristicRules(rules ...map[string]Rule) { + switch common.Config.ReportType { + case "json": + js, err := json.MarshalIndent(rules, "", " ") + if err == nil { + fmt.Println(string(js)) + } + default: + fmt.Print("# 启发式规则建议\n\n[toc]\n\n") + for _, r := range rules { + delete(r, "OK") + for _, item := range common.SortedKey(r) { + fmt.Print("## ", common.MarkdownEscape(r[item].Summary), + "\n\n* **Item**:", r[item].Item, + "\n* **Severity**:", r[item].Severity, + "\n* **Content**:", common.MarkdownEscape(r[item].Content), + "\n* **Case**:\n\n```sql\n", r[item].Case, "\n```\n") + } + } + } +} + +// ListTestSQLs 打印测试用的SQL,方便测试,对应命令行参数-list-test-sqls +func ListTestSQLs() { + for _, sql := range common.TestSQLs { + fmt.Println(sql) + } +} diff --git a/advisor/rules_test.go b/advisor/rules_test.go new file mode 100644 index 00000000..938a5460 --- /dev/null +++ b/advisor/rules_test.go @@ -0,0 +1,54 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package advisor + +import ( + "flag" + "testing" + + "github.com/XiaoMi/soar/common" +) + +var update = flag.Bool("update", false, "update .golden files") + +func TestListTestSQLs(t *testing.T) { + err := common.GoldenDiff(func() { ListTestSQLs() }, t.Name(), update) + if nil != err { + t.Fatal(err) + } +} + +func TestListHeuristicRules(t *testing.T) { + err := common.GoldenDiff(func() { ListHeuristicRules(HeuristicRules) }, t.Name(), update) + if nil != err { + t.Fatal(err) + } +} + +func TestInBlackList(t *testing.T) { + common.BlackList = []string{"select"} + if !InBlackList("select 1") { + t.Error("should be true") + } +} + +func TestIsIgnoreRule(t *testing.T) { + common.Config.IgnoreRules = []string{"test"} + if !IsIgnoreRule("test") { + t.Error("should be true") + } +} diff --git a/advisor/testdata/TestDigestExplainText.golden b/advisor/testdata/TestDigestExplainText.golden new file mode 100644 index 00000000..b0311993 --- /dev/null +++ b/advisor/testdata/TestDigestExplainText.golden @@ -0,0 +1,26 @@ +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *country* | NULL | index | PRIMARY,
country\_id | country | 152 | NULL | 0 | 0.00% | ☠️ **O(n)** | Using index | +| 1 | SIMPLE | *city* | NULL | ref | idx\_fk\_country\_id,
idx\_country\_id\_city,
idx\_all,
idx\_other | idx\_fk\_country\_id | 2 | sakila.country.country\_id | 0 | 0.00% | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + diff --git a/advisor/testdata/TestIndexAdviseNoEnv.golden b/advisor/testdata/TestIndexAdviseNoEnv.golden new file mode 100644 index 00000000..ac966f08 --- /dev/null +++ b/advisor/testdata/TestIndexAdviseNoEnv.golden @@ -0,0 +1,115 @@ +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的address表添加索引", Content:"为列address添加索引,散粒度为: 100.00%; 为列district添加索引,散粒度为: 100.00%; ", Case:"ALTER TABLE `sakila`.`address` add index `idx_address` (`address`), add index `idx_district` (`district`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列release_year添加索引,散粒度为: 0.10%; 为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_release_year` (`release_year`), add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; 为列release_year添加索引,散粒度为: 0.10%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`), add index `idx_release_year` (`release_year`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的country表添加索引", Content:"为列last_update添加索引,散粒度为: 0.92%; ", Case:"ALTER TABLE `sakila`.`country` add index `idx_last_update` (`last_update`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的city表添加索引", Content:"为列last_update添加索引,散粒度为: 0.17%; ", Case:"ALTER TABLE `sakila`.`city` add index `idx_last_update` (`last_update`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的country表添加索引", Content:"为列last_update添加索引,散粒度为: 0.92%; ", Case:"ALTER TABLE `sakila`.`country` add index `idx_last_update` (`last_update`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, + "IDX.002": {Item:"", Severity:"L2", Summary:"为sakila库的city表添加索引", Content:"为列last_update添加索引,散粒度为: 0.17%; ", Case:"ALTER TABLE `sakila`.`city` add index `idx_last_update` (`last_update`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的country表添加索引", Content:"为列country添加索引,散粒度为: 100.00%; ", Case:"ALTER TABLE `sakila`.`country` add index `idx_country` (`country`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列length添加索引,散粒度为: 14.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_length` (`length`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的actor表添加索引", Content:"为列last_update添加索引,散粒度为: 0.50%; 为列first_name添加索引,散粒度为: 64.00%; ", Case:"ALTER TABLE `sakila`.`actor` add index `idx_last_update` (`last_update`), add index `idx_first_name` (`first_name`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的city表添加索引", Content:"为列city添加索引,散粒度为: 99.83%; ", Case:"ALTER TABLE `sakila`.`city` add index `idx_city` (`city`) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} +map[string]advisor.Rule{ + "IDX.001": {Item:"", Severity:"L2", Summary:"为sakila库的film表添加索引", Content:"为列description添加索引,散粒度为: 100.00%; ", Case:"ALTER TABLE `sakila`.`film` add index `idx_description` (`description`(255)) ;\n", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}}, +} diff --git a/advisor/testdata/TestListHeuristicRules.golden b/advisor/testdata/TestListHeuristicRules.golden new file mode 100644 index 00000000..1e93d48a --- /dev/null +++ b/advisor/testdata/TestListHeuristicRules.golden @@ -0,0 +1,1134 @@ +# 启发式规则建议 + +[toc] + +## 建议使用AS关键字显示声明一个别名 + +* **Item**:ALI.001 +* **Severity**:L0 +* **Content**:在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 +* **Case**: + +```sql +select name from tbl t1 where id < 1000 +``` +## 不建议给列通配符'\*'设置别名 + +* **Item**:ALI.002 +* **Severity**:L8 +* **Content**:例: "SELECT tbl.\* col1, col2"上面这条SQL给列通配符设置了别名,这样的SQL可能存在逻辑错误。您可能意在查询col1, 但是代替它的是重命名的是tbl的最后一列。 +* **Case**: + +```sql +select tbl.* as c1,c2,c3 from tbl where id < 1000 +``` +## 别名不要与表或列的名字相同 + +* **Item**:ALI.003 +* **Severity**:L1 +* **Content**:表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨。 +* **Case**: + +```sql +select name from tbl as tbl where id < 1000 +``` +## 修改表的默认字符集不会改表各个字段的字符集 + +* **Item**:ALT.001 +* **Severity**:L4 +* **Content**:很多初学者会将ALTER TABLE tbl\_name [DEFAULT] CHARACTER SET 'UTF8'误认为会修改所有字段的字符集,但实际上它只会影响后续新增的字段不会改表已有字段的字符集。如果想修改整张表所有字段的字符集建议使用ALTER TABLE tbl\_name CONVERT TO CHARACTER SET charset\_name; +* **Case**: + +```sql +ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name; +``` +## 同一张表的多条ALTER请求建议合为一条 + +* **Item**:ALT.002 +* **Severity**:L2 +* **Content**:每次表结构变更对线上服务都会产生影响,即使是能够通过在线工具进行调整也请尽量通过合并ALTER请求的试减少操作次数。 +* **Case**: + +```sql +ALTER TABLE tbl ADD COLUMN col int, ADD INDEX idx_col (`col`); +``` +## 删除列为高危操作,操作前请注意检查业务逻辑是否还有依赖 + +* **Item**:ALT.003 +* **Severity**:L0 +* **Content**:如业务逻辑依赖未完全消除,列被删除后可能导致数据无法写入或无法查询到已删除列数据导致程序异常的情况。这种情况下即使通过备份数据回滚也会丢失用户请求写入的数据。 +* **Case**: + +```sql +ALTER TABLE tbl DROP COLUMN col; +``` +## 删除主键和外键为高危操作,操作前请与DBA确认影响 + +* **Item**:ALT.004 +* **Severity**:L0 +* **Content**:主键和外键为关系型数据库中两种重要约束,删除已有约束会打破已有业务逻辑,操作前请业务开发与DBA确认影响,三思而行。 +* **Case**: + +```sql +ALTER TABLE tbl DROP PRIMARY KEY; +``` +## 不建议使用前项通配符查找 + +* **Item**:ARG.001 +* **Severity**:L4 +* **Content**:例如“%foo”,查询参数有一个前项通配符的情况无法使用已有索引。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where name like '%foo' +``` +## 没有通配符的LIKE查询 + +* **Item**:ARG.002 +* **Severity**:L1 +* **Content**:不包含通配符的LIKE查询可能存在逻辑错误,因为逻辑上它与等值查询相同。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where name like 'foo' +``` +## 参数比较包含隐式转换,无法使用索引 + +* **Item**:ARG.003 +* **Severity**:L4 +* **Content**:隐式类型转换有无法命中索引的风险,在高并发、大数据量的情况下,命不中索引带来的后果非常严重。 +* **Case**: + +```sql +SELECT * FROM sakila.film WHERE length >= '60'; +``` +## IN (NULL)/NOT IN (NULL)永远非真 + +* **Item**:ARG.004 +* **Severity**:L4 +* **Content**:正确的作法是col IN ('val1', 'val2', 'val3') OR col IS NULL +* **Case**: + +```sql +SELECT * FROM sakila.film WHERE length >= '60'; +``` +## IN要慎用,元素过多会导致全表扫描 + +* **Item**:ARG.005 +* **Severity**:L1 +* **Content**: 如:select id from t where num in(1,2,3)对于连续的数值,能用BETWEEN就不要用IN了:select id from t where num between 1 and 3。而当IN值过多时MySQL也可能会进入全表扫描导致性能急剧下降。 +* **Case**: + +```sql +select id from t where num in(1,2,3) +``` +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item**:ARG.006 +* **Severity**:L1 +* **Content**:使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; +* **Case**: + +```sql +select id from t where num is null +``` +## 避免使用模式匹配 + +* **Item**:ARG.007 +* **Severity**:L3 +* **Content**:性能问题是使用模式匹配操作符的最大缺点。使用LIKE或正则表达式进行模式匹配进行查询的另一个问题,是可能会返回意料之外的结果。最好的方案就是使用特殊的搜索引擎技术来替代SQL,比如Apache Lucene。另一个可选方案是将结果保存起来从而减少重复的搜索开销。如果一定要使用SQL,请考虑在MySQL中使用像FULLTEXT索引这样的第三方扩展。但更广泛地说,您不一定要使用SQL来解决所有问题。 +* **Case**: + +```sql +select c_id,c2,c3 from tbl where c2 like 'test%' +``` +## OR查询索引列时请尽量使用IN谓词 + +* **Item**:ARG.008 +* **Severity**:L1 +* **Content**:IN-list谓词可以用于索引检索,并且优化器可以对IN-list进行排序,以匹配索引的排序序列,从而获得更有效的检索。请注意,IN-list必须只包含常量,或在查询块执行期间保持常量的值,例如外引用。 +* **Case**: + +```sql +SELECT c1,c2,c3 FROM tbl WHERE c1 = 14 OR c1 = 17 +``` +## 引号中的字符串开头或结尾包含空格 + +* **Item**:ARG.009 +* **Severity**:L1 +* **Content**:如果VARCHAR列的前后存在空格将可能引起逻辑问题,如在MySQL 5.5中'a'和'a '可能会在查询中被认为是相同的值。 +* **Case**: + +```sql +SELECT 'abc ' +``` +## 不要使用hint,如sql\_no\_cache,force index,ignore key,straight join等 + +* **Item**:ARG.010 +* **Severity**:L1 +* **Content**:hint是用来强制SQL按照某个执行计划来执行,但随着数据量变化我们无法保证自己当初的预判是正确的。 +* **Case**: + +```sql +SELECT 'abc ' +``` +## 不要使用负向查询,如:NOT IN/NOT LIKE + +* **Item**:ARG.011 +* **Severity**:L3 +* **Content**:请尽量不要使用负向查询,这将导致全表扫描,对查询性能影响较大。 +* **Case**: + +```sql +select id from t where num not in(1,2,3); +``` +## 最外层SELECT未指定WHERE条件 + +* **Item**:CLA.001 +* **Severity**:L4 +* **Content**:SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 +* **Case**: + +```sql +select id from tbl +``` +## 不建议使用ORDER BY RAND() + +* **Item**:CLA.002 +* **Severity**:L3 +* **Content**:ORDER BY RAND()是从结果集中检索随机行的一种非常低效的方法,因为它会对整个结果进行排序并丢弃其大部分数据。 +* **Case**: + +```sql +select name from tbl where id < 1000 order by rand(number) +``` +## 不建议使用带OFFSET的LIMIT查询 + +* **Item**:CLA.003 +* **Severity**:L2 +* **Content**:使用LIMIT和OFFSET对结果集分页的复杂度是O(n^2),并且会随着数据增大而导致性能问题。采用“书签”扫描的方法实现分页效率更高。 +* **Case**: + +```sql +select c1,c2 from tbl where name=xx order by number limit 1 offset 20 +``` +## 不建议对常量进行GROUP BY + +* **Item**:CLA.004 +* **Severity**:L2 +* **Content**:GROUP BY 1 表示按第一列进行GROUP BY。如果在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,可能会导致问题。 +* **Case**: + +```sql +select col1,col2 from tbl group by 1 +``` +## ORDER BY常数列没有任何意义 + +* **Item**:CLA.005 +* **Severity**:L2 +* **Content**:SQL逻辑上可能存在错误; 最多只是一个无用的操作,不会更改查询结果。 +* **Case**: + +```sql +select id from test where id=1 order by id +``` +## 在不同的表中GROUP BY或ORDER BY + +* **Item**:CLA.006 +* **Severity**:L4 +* **Content**:这将强制使用临时表和filesort,可能产生巨大性能隐患,并且可能消耗大量内存和磁盘上的临时空间。 +* **Case**: + +```sql +select tb1.col, tb2.col from tb1, tb2 where id=1 group by tb1.col, tb2.col +``` +## ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引 + +* **Item**:CLA.007 +* **Severity**:L2 +* **Content**:ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc +``` +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item**:CLA.008 +* **Severity**:L2 +* **Content**:默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c1='foo' group by c2 +``` +## ORDER BY的条件为表达式 + +* **Item**:CLA.009 +* **Severity**:L2 +* **Content**:当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 +* **Case**: + +```sql +select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; +``` +## GROUP BY的条件为表达式 + +* **Item**:CLA.010 +* **Severity**:L2 +* **Content**:当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 +* **Case**: + +```sql +select description from film where title ='ACADEMY DINOSAUR' GROUP BY length-language_id; +``` +## 建议为表添加注释 + +* **Item**:CLA.011 +* **Severity**:L1 +* **Content**:为表添加注释能够使得表的意义更明确,从而为日后的维护带来极大的便利。 +* **Case**: + +```sql +CREATE TABLE `test1` (`ID` bigint(20) NOT NULL AUTO_INCREMENT,`c1` varchar(128) DEFAULT NULL,PRIMARY KEY (`ID`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 +``` +## 将复杂的裹脚布式查询分解成几个简单的查询 + +* **Item**:CLA.012 +* **Severity**:L2 +* **Content**:SQL是一门极具表现力的语言,您可以在单个SQL查询或者单条语句中完成很多事情。但这并不意味着必须强制只使用一行代码,或者认为使用一行代码就搞定每个任务是个好主意。通过一个查询来获得所有结果的常见后果是得到了一个笛卡儿积。当查询中的两张表之间没有条件限制它们的关系时,就会发生这种情况。没有对应的限制而直接使用两张表进行联结查询,就会得到第一张表中的每一行和第二张表中的每一行的一个组合。每一个这样的组合就会成为结果集中的一行,最终您就会得到一个行数很多的结果集。重要的是要考虑这些查询很难编写、难以修改和难以调试。数据库查询请求的日益增加应该是预料之中的事。经理们想要更复杂的报告以及在用户界面上添加更多的字段。如果您的设计很复杂,并且是一个单一查询,要扩展它们就会很费时费力。不论对您还是项目来说,时间花在这些事情上面不值得。将复杂的意大利面条式查询分解成几个简单的查询。当您拆分一个复杂的SQL查询时,得到的结果可能是很多类似的查询,可能仅仅在数据类型上有所不同。编写所有的这些查询是很乏味的,因此,最好能够有个程序自动生成这些代码。SQL代码生成是一个很好的应用。尽管SQL支持用一行代码解决复杂的问题,但也别做不切实际的事情。 +* **Case**: + +```sql +这是一条很长很长的SQL,案例略。 +``` +## 不建议使用HAVING子句 + +* **Item**:CLA.013 +* **Severity**:L3 +* **Content**:将查询的HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引。 +* **Case**: + +```sql +SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id +``` +## 删除全表时建议使用TRUNCATE替代DELETE + +* **Item**:CLA.014 +* **Severity**:L2 +* **Content**:删除全表时建议使用TRUNCATE替代DELETE +* **Case**: + +```sql +delete from tbl +``` +## UPDATE未指定WHERE条件 + +* **Item**:CLA.015 +* **Severity**:L4 +* **Content**:UPDATE不指定WHERE条件一般是致命的,请您三思后行 +* **Case**: + +```sql +update tbl set col=1 +``` +## 不要UPDATE主键 + +* **Item**:CLA.016 +* **Severity**:L2 +* **Content**:主键是数据表中记录的唯一标识符,不建议频繁更新主键列,这将影响元数据统计信息进而影响正常的查询。 +* **Case**: + +```sql +update tbl set col=1 +``` +## 不建议使用存储过程、视图、触发器、临时表等 + +* **Item**:CLA.017 +* **Severity**:L2 +* **Content**:这些功能的使用在一定程度上会使得程序难以调试和拓展,更没有移植性,且会极大的增加出现BUG的概率。 +* **Case**: + +```sql +CREATE VIEW v_today (today) AS SELECT CURRENT_DATE; +``` +## 不建议使用SELECT \* 类型查询 + +* **Item**:COL.001 +* **Severity**:L1 +* **Content**:当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 +* **Case**: + +```sql +select * from tbl where id=1 +``` +## INSERT未指定列名 + +* **Item**:COL.002 +* **Severity**:L2 +* **Content**:当表结构发生变更,如果INSERT或REPLACE请求不明确指定列名,请求的结果将会与预想的不同; 建议使用“INSERT INTO tbl(col1,col2)VALUES ...”代替。 +* **Case**: + +```sql +insert into tbl values(1,'name') +``` +## 建议修改自增ID为无符号类型 + +* **Item**:COL.003 +* **Severity**:L2 +* **Content**:建议修改自增ID为无符号类型 +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL AUTO_INCREMENT) +``` +## 请为列添加默认值 + +* **Item**:COL.004 +* **Severity**:L1 +* **Content**:请为列添加默认值,如果是ALTER操作,请不要忘记将原字段的默认值写上。字段无默认值,当表较大时无法在线变更表结构。 +* **Case**: + +```sql +CREATE TABLE tbl (col int) ENGINE=InnoDB; +``` +## 列未添加注释 + +* **Item**:COL.005 +* **Severity**:L1 +* **Content**:建议对表中每个列添加注释,来明确每个列在表中的含义及作用。 +* **Case**: + +```sql +CREATE TABLE tbl (col int) ENGINE=InnoDB; +``` +## 表中包含有太多的列 + +* **Item**:COL.006 +* **Severity**:L3 +* **Content**:表中包含有太多的列 +* **Case**: + +```sql +CREATE TABLE tbl ( cols ....); +``` +## 可使用VARCHAR代替CHAR,VARBINARY代替BINARY + +* **Item**:COL.008 +* **Severity**:L1 +* **Content**:为首先变长字段存储空间小,可以节省存储空间。其次对于查询来说,在一个相对较小的字段内搜索效率显然要高些。 +* **Case**: + +```sql +create table t1(id int,name char(20),last_time date) +``` +## 建议使用精确的数据类型 + +* **Item**:COL.009 +* **Severity**:L2 +* **Content**:实际上,任何使用FLOAT、REAL或DOUBLE PRECISION数据类型的设计都有可能是反模式。大多数应用程序使用的浮点数的取值范围并不需要达到IEEE 754标准所定义的最大/最小区间。在计算总量时,非精确浮点数所积累的影响是严重的。使用SQL中的NUMERIC或DECIMAL类型来代替FLOAT及其类似的数据类型进行固定精度的小数存储。这些数据类型精确地根据您定义这一列时指定的精度来存储数据。尽可能不要使用浮点数。 +* **Case**: + +```sql +CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,hours float not null,PRIMARY KEY (p_id, a_id)) +``` +## 不建议使用ENUM数据类型 + +* **Item**:COL.010 +* **Severity**:L2 +* **Content**:ENUM定义了列中值的类型,使用字符串表示ENUM里的值时,实际存储在列中的数据是这些值在定义时的序数。因此,这列的数据是字节对齐的,当您进行一次排序查询时,结果是按照实际存储的序数值排序的,而不是按字符串值的字母顺序排序的。这可能不是您所希望的。没有什么语法支持从ENUM或者check约束中添加或删除一个值;您只能使用一个新的集合重新定义这一列。如果您打算废弃一个选项,您可能会为历史数据而烦恼。作为一种策略,改变元数据——也就是说,改变表和列的定义——应该是不常见的,并且要注意测试和质量保证。有一个更好的解决方案来约束一列中的可选值:创建一张检查表,每一行包含一个允许在列中出现的候选值;然后在引用新表的旧表上声明一个外键约束。 +* **Case**: + +```sql +create table tab1(status ENUM('new','in progress','fixed')) +``` +## 当需要唯一约束时才使用NULL,仅当列不能有缺失值时才使用NOT NULL + +* **Item**:COL.011 +* **Severity**:L0 +* **Content**:NULL和0是不同的,10乘以NULL还是NULL。NULL和空字符串是不一样的。将一个字符串和标准SQL中的NULL联合起来的结果还是NULL。NULL和FALSE也是不同的。AND、OR和NOT这三个布尔操作如果涉及NULL,其结果也让很多人感到困惑。当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。使用NULL来表示任意类型不存在的空值。 当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where c4 is null or c4 <> 1 +``` +## BLOB和TEXT类型的字段不可设置为NULL + +* **Item**:COL.012 +* **Severity**:L5 +* **Content**:BLOB和TEXT类型的字段不可设置为NULL +* **Case**: + +```sql +CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`)); +``` +## TIMESTAMP类型未设置默认值 + +* **Item**:COL.013 +* **Severity**:L4 +* **Content**:TIMESTAMP类型未设置默认值 +* **Case**: + +```sql +CREATE TABLE tbl( `id` bigint not null, `create_time` timestamp); +``` +## 为列指定了字符集 + +* **Item**:COL.014 +* **Severity**:L5 +* **Content**:建议列与表使用同一个字符集,不要单独指定列的字符集。 +* **Case**: + +```sql +CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL) +``` +## BLOB类型的字段不可指定默认值 + +* **Item**:COL.015 +* **Severity**:L4 +* **Content**:BLOB类型的字段不可指定默认值 +* **Case**: + +```sql +CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL DEFAULT '', PRIMARY KEY (`id`)); +``` +## 整型定义建议采用INT(10)或BIGINT(20) + +* **Item**:COL.016 +* **Severity**:L1 +* **Content**:INT(M) 在 integer 数据类型中,M 表示最大显示宽度。 在 INT(M) 中,M 的值跟 INT(M) 所占多少存储空间并无任何关系。 INT(3)、INT(4)、INT(8) 在磁盘上都是占用 4 bytes 的存储空间。 +* **Case**: + +```sql +CREATE TABLE tab (a INT(1)); +``` +## varchar定义长度过长 + +* **Item**:COL.017 +* **Severity**:L2 +* **Content**:varchar 是可变长字符串,不预先分配存储空间,长度不要超过1024,如果存储长度过长MySQL将定义字段类型为text,独立出来一张表,用主键来对应,避免影响其它字段索引效率。 +* **Case**: + +```sql +CREATE TABLE tab (a varchar(3500)); +``` +## 消除不必要的DISTINCT条件 + +* **Item**:DIS.001 +* **Severity**:L1 +* **Content**:太多DISTINCT条件是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少DISTINCT条件的数量。如果主键列是列的结果集的一部分,则DISTINCT条件可能没有影响。 +* **Case**: + +```sql +SELECT DISTINCT c.c_id,count(DISTINCT c.c_name),count(DISTINCT c.c_e),count(DISTINCT c.c_n),count(DISTINCT c.c_me),c.c_d FROM (select distinct xing, name from B) as e WHERE e.country_id = c.country_id +``` +## COUNT(DISTINCT)多列时结果可能和你预想的不同 + +* **Item**:DIS.002 +* **Severity**:L3 +* **Content**:COUNT(DISTINCT col)计算该列除NULL之外的不重复行数,注意COUNT(DISTINCT col, col2)如果其中一列全为NULL那么即使另一列有不同的值,也返回0。 +* **Case**: + +```sql +SELECT COUNT(DISTINCT col, col2) FROM tbl; +``` +## DISTINCT \*对有主键的表没有意义 + +* **Item**:DIS.003 +* **Severity**:L3 +* **Content**:当表已经有主键时,对所有列进行DISTINCT的输出结果与不进行DISTINCT操作的结果相同,请不要画蛇添足。 +* **Case**: + +```sql +SELECT DISTINCT * FROM film; +``` +## 避免在WHERE条件中使用函数或其他运算符 + +* **Item**:FUN.001 +* **Severity**:L2 +* **Content**:虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。 +* **Case**: + +```sql +select id from t where substring(name,1,3)='abc' +``` +## 指定了WHERE条件或非MyISAM引擎时使用COUNT(\*)操作性能不佳 + +* **Item**:FUN.002 +* **Severity**:L1 +* **Content**:COUNT(\*)的作用是统计表行数,COUNT(COL)的作用是统计指定列非NULL的行数。MyISAM表对于COUNT(\*)统计全表行数进行了特殊的优化,通常情况下非常快。但对于非MyISAM表或指定了某些WHERE条件,COUNT(\*)操作需要扫描大量的行才能获取精确的结果,性能也因此不佳。有时候某些业务场景并不需要完全精确的COUNT值,此时可以用近似值来代替。EXPLAIN出来的优化器估算的行数就是一个不错的近似值,执行EXPLAIN并不需要真正去执行查询,所以成本很低。 +* **Case**: + +```sql +SELECT c3, COUNT(*) AS accounts FROM tab where c2 < 10000 GROUP BY c3 ORDER BY num +``` +## 使用了合并为可空列的字符串连接 + +* **Item**:FUN.003 +* **Severity**:L3 +* **Content**:在一些查询请求中,您需要强制让某一列或者某个表达式返回非NULL的值,从而让查询逻辑变得更简单,担忧不想将这个值存下来。使用COALESCE()函数来构造连接的表达式,这样即使是空值列也不会使整表达式变为NULL。 +* **Case**: + +```sql +select c1 || coalesce(' ' || c2 || ' ', ' ') || c3 as c from tbl +``` +## 不建议使用SYSDATE()函数 + +* **Item**:FUN.004 +* **Severity**:L4 +* **Content**:SYSDATE()函数可能导致主从数据不一致,请使用NOW()函数替代SYSDATE()。 +* **Case**: + +```sql +SELECT SYSDATE(); +``` +## 不建议使用COUNT(col)或COUNT(常量) + +* **Item**:FUN.005 +* **Severity**:L1 +* **Content**:不要使用COUNT(col)或COUNT(常量)来替代COUNT(\*),COUNT(\*)是SQL92定义的标准统计行数的方法,跟数据无关,跟NULL和非NULL也无关。 +* **Case**: + +```sql +SELECT COUNT(1) FROM tbl; +``` +## 使用SUM(COL)时需注意NPE问题 + +* **Item**:FUN.006 +* **Severity**:L1 +* **Content**:当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题。可以使用如下方式来避免SUM的NPE问题: SELECT IF(ISNULL(SUM(COL)), 0, SUM(COL)) FROM tbl +* **Case**: + +```sql +SELECT SUM(COL) FROM tbl; +``` +## 不建议对等值查询列使用GROUP BY + +* **Item**:GRP.001 +* **Severity**:L2 +* **Content**:GROUP BY中的列在前面的WHERE条件中使用了等值查询,对这样的列进行GROUP BY意义不大。 +* **Case**: + +```sql +select film_id, title from film where release_year='2006' group by release_year +``` +## JOIN语句混用逗号和ANSI模式 + +* **Item**:JOI.001 +* **Severity**:L2 +* **Content**:表连接的时候混用逗号和ANSI JOIN不便于人类理解,并且MySQL不同版本的表连接行为和优先级均有所不同,当MySQL版本变化后可能会引入错误。 +* **Case**: + +```sql +select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1,t1.c3=t3,c1 where id>1000 +``` +## 同一张表被连接两次 + +* **Item**:JOI.002 +* **Severity**:L4 +* **Content**:相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。 +* **Case**: + +```sql +select tb1.col from (tb1, tb2) join tb2 on tb1.id=tb.id where tb1.id=1 +``` +## OUTER JOIN失效 + +* **Item**:JOI.003 +* **Severity**:L4 +* **Content**:由于WHERE条件错误使得OUTER JOIN的外部表无数据返回,这会将查询隐式转换为 INNER JOIN 。如:select c from L left join R using(c) where L.a=5 and R.b=10。这种SQL逻辑上可能存在错误或程序员对OUTER JOIN如何工作存在误解,因为LEFT/RIGHT JOIN是LEFT/RIGHT OUTER JOIN的缩写。 +* **Case**: + +```sql +select c1,c2,c3 from t1 left outer join t2 using(c1) where t1.c2=2 and t2.c3=4 +``` +## 不建议使用排它JOIN + +* **Item**:JOI.004 +* **Severity**:L4 +* **Content**:只在右侧表为NULL的带WHERE子句的LEFT OUTER JOIN语句,有可能是在WHERE子句中使用错误的列,如:“... FROM l LEFT OUTER JOIN r ON l.l = r.r WHERE r.z IS NULL”,这个查询正确的逻辑可能是 WHERE r.r IS NULL。 +* **Case**: + +```sql +select c1,c2,c3 from t1 left outer join t2 on t1.c1=t2.c1 where t2.c2 is null +``` +## 减少JOIN的数量 + +* **Item**:JOI.005 +* **Severity**:L2 +* **Content**:太多的JOIN是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少JOIN的数量。 +* **Case**: + +```sql +select bp1.p_id, b1.d_d as l, b1.b_id from b1 join bp1 on (b1.b_id = bp1.b_id) left outer join (b1 as b2 join bp2 on (b2.b_id = bp2.b_id)) on (bp1.p_id = bp2.p_id ) join bp21 on (b1.b_id = bp1.b_id) join bp31 on (b1.b_id = bp1.b_id) join bp41 on (b1.b_id = bp1.b_id) where b2.b_id = 0 +``` +## 将嵌套查询重写为JOIN通常会导致更高效的执行和更有效的优化 + +* **Item**:JOI.006 +* **Severity**:L4 +* **Content**:一般来说,非嵌套子查询总是用于关联子查询,最多是来自FROM子句中的一个表,这些子查询用于ANY、ALL和EXISTS的谓词。如果可以根据查询语义决定子查询最多返回一个行,那么一个不相关的子查询或来自FROM子句中的多个表的子查询就被压平了。 +* **Case**: + +```sql +SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 ) +``` +## 不建议使用联表更新 + +* **Item**:JOI.007 +* **Severity**:L4 +* **Content**:当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 +* **Case**: + +```sql +UPDATE users u LEFT JOIN hobby h ON u.id = h.uid SET u.name = 'pianoboy' WHERE h.hobby = 'piano'; +``` +## 不要使用跨DB的Join查询 + +* **Item**:JOI.008 +* **Severity**:L4 +* **Content**:一般来说,跨DB的Join查询意味着查询语句跨越了两个不同的子系统,这可能意味着系统耦合度过高或库表结构设计不合理。 +* **Case**: + +```sql +SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 ) +``` +## 建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列 + +* **Item**:KEY.001 +* **Severity**:L2 +* **Content**:建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列 +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL PRIMARY KEY (`id`)) +``` +## 无主键或唯一键,无法在线变更表结构 + +* **Item**:KEY.002 +* **Severity**:L4 +* **Content**:无主键或唯一键,无法在线变更表结构 +* **Case**: + +```sql +create table test(col varchar(5000)) +``` +## 避免外键等递归关系 + +* **Item**:KEY.003 +* **Severity**:L4 +* **Content**:存在递归关系的数据很常见,数据常会像树或者以层级方式组织。然而,创建一个外键约束来强制执行同一表中两列之间的关系,会导致笨拙的查询。树的每一层对应着另一个连接。您将需要发出递归查询,以获得节点的所有后代或所有祖先。解决方案是构造一个附加的闭包表。它记录了树中所有节点间的关系,而不仅仅是那些具有直接的父子关系。您也可以比较不同层次的数据设计:闭包表,路径枚举,嵌套集。然后根据应用程序的需要选择一个。 +* **Case**: + +```sql +CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,PRIMARY KEY (p_id, a_id),FOREIGN KEY (p_id) REFERENCES tab1(p_id),FOREIGN KEY (a_id) REFERENCES tab3(a_id)) +``` +## 提醒:请将索引属性顺序与查询对齐 + +* **Item**:KEY.004 +* **Severity**:L0 +* **Content**:如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。 +* **Case**: + +```sql +create index idx1 on tbl (last_name,first_name) +``` +## 表建的索引过多 + +* **Item**:KEY.005 +* **Severity**:L2 +* **Content**:表建的索引过多 +* **Case**: + +```sql +CREATE TABLE tbl ( a int, b int, c int, KEY idx_a (`a`),KEY idx_b(`b`),KEY idx_c(`c`)); +``` +## 主键中的列过多 + +* **Item**:KEY.006 +* **Severity**:L4 +* **Content**:主键中的列过多 +* **Case**: + +```sql +CREATE TABLE tbl ( a int, b int, c int, PRIMARY KEY(`a`,`b`,`c`)); +``` +## 未指定主键或主键非int或bigint + +* **Item**:KEY.007 +* **Severity**:L4 +* **Content**:未指定主键或主键非int或bigint,建议将主键设置为int unsigned或bigint unsigned。 +* **Case**: + +```sql +CREATE TABLE tbl (a int); +``` +## ORDER BY多个列但排序方向不同时可能无法使用索引 + +* **Item**:KEY.008 +* **Severity**:L4 +* **Content**:在MySQL 8.0之前当ORDER BY多个列指定的排序方向不同时将无法使用已经建立的索引。 +* **Case**: + +```sql +SELECT * FROM tbl ORDER BY a DESC, b ASC; +``` +## 添加唯一索引前请注意检查数据唯一性 + +* **Item**:KEY.009 +* **Severity**:L0 +* **Content**:请提前检查添加唯一索引列的数据唯一性,如果数据不唯一在线表结构调整时将有可能自动将重复列删除,这有可能导致数据丢失。 +* **Case**: + +```sql +CREATE UNIQUE INDEX part_of_name ON customer (name(10)); +``` +## SQL\_CALC\_FOUND\_ROWS效率低下 + +* **Item**:KWR.001 +* **Severity**:L2 +* **Content**:因为SQL\_CALC\_FOUND\_ROWS不能很好地扩展,所以可能导致性能问题; 建议业务使用其他策略来替代SQL\_CALC\_FOUND\_ROWS提供的计数功能,比如:分页结果展示等。 +* **Case**: + +```sql +select SQL_CALC_FOUND_ROWS col from tbl where id>1000 +``` +## 不建议使用MySQL关键字做列名或表名 + +* **Item**:KWR.002 +* **Severity**:L2 +* **Content**:当使用关键字做为列名或表名时程序需要对列名和表名进行转义,如果疏忽被将导致请求无法执行。 +* **Case**: + +```sql +CREATE TABLE tbl ( `select` int ) +``` +## 不建议使用复数做列名或表名 + +* **Item**:KWR.003 +* **Severity**:L1 +* **Content**:表名应该仅仅表示表里面的实体内容,不应该表示实体数量,对应于 DO 类名也是单数形式,符合表达习惯。 +* **Case**: + +```sql +CREATE TABLE tbl ( `books` int ) +``` +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item**:LCK.001 +* **Severity**:L3 +* **Content**:INSERT INTO xx SELECT加锁粒度较大请谨慎 +* **Case**: + +```sql +INSERT INTO tbl SELECT * FROM tbl2; +``` +## 请慎用INSERT ON DUPLICATE KEY UPDATE + +* **Item**:LCK.002 +* **Severity**:L3 +* **Content**:当主键为自增键时使用INSERT ON DUPLICATE KEY UPDATE可能会导致主键出现大量不连续快速增长,导致主键快速溢出无法继续写入。极端情况下还有可能导致主从数据不一致。 +* **Case**: + +```sql +INSERT INTO t1(a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1; +``` +## 用字符类型存储IP地址 + +* **Item**:LIT.001 +* **Severity**:L2 +* **Content**:字符串字面上看起来像IP地址,但不是INET\_ATON()的参数,表示数据被存储为字符而不是整数。将IP地址存储为整数更为有效。 +* **Case**: + +```sql +insert into tbl (IP,name) values('10.20.306.122','test') +``` +## 日期/时间未使用引号括起 + +* **Item**:LIT.002 +* **Severity**:L4 +* **Content**:诸如“WHERE col <2010-02-12”之类的查询是有效的SQL,但可能是一个错误,因为它将被解释为“WHERE col <1996”; 日期/时间文字应该加引号。 +* **Case**: + +```sql +select col1,col2 from tbl where time < 2018-01-10 +``` +## 一列中存储一系列相关数据的集合 + +* **Item**:LIT.003 +* **Severity**:L3 +* **Content**:将ID存储为一个列表,作为VARCHAR/TEXT列,这样能导致性能和数据完整性问题。查询这样的列需要使用模式匹配的表达式。使用逗号分隔的列表来做多表联结查询定位一行数据是极不优雅和耗时的。这将使验证ID更加困难。考虑一下,列表最多支持存放多少数据呢?将ID存储在一张单独的表中,代替使用多值属性,从而每个单独的属性值都可以占据一行。这样交叉表实现了两张表之间的多对多关系。这将更好地简化查询,也更有效地验证ID。 +* **Case**: + +```sql +select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]' +``` +## 请使用分号或已设定的DELIMITER结尾 + +* **Item**:LIT.004 +* **Severity**:L1 +* **Content**:USE database, SHOW DATABASES等命令也需要使用使用分号或已设定的DELIMITER结尾。 +* **Case**: + +```sql +USE db +``` +## 非确定性的GROUP BY + +* **Item**:RES.001 +* **Severity**:L4 +* **Content**:SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c2='foo' group by c2 +``` +## 未使用ORDER BY的LIMIT查询 + +* **Item**:RES.002 +* **Severity**:L4 +* **Content**:没有ORDER BY的LIMIT会导致非确定性的结果,这取决于查询执行计划。 +* **Case**: + +```sql +select col1,col2 from tbl where name=xx limit 10 +``` +## UPDATE/DELETE操作使用了LIMIT条件 + +* **Item**:RES.003 +* **Severity**:L4 +* **Content**:UPDATE/DELETE操作使用LIMIT条件和不添加WHERE条件一样危险,它可将会导致主从数据不一致或从库同步中断。 +* **Case**: + +```sql +UPDATE film SET length = 120 WHERE title = 'abc' LIMIT 1; +``` +## UPDATE/DELETE操作指定了ORDER BY条件 + +* **Item**:RES.004 +* **Severity**:L4 +* **Content**:UPDATE/DELETE操作不要指定ORDER BY条件。 +* **Case**: + +```sql +UPDATE film SET length = 120 WHERE title = 'abc' ORDER BY title +``` +## UPDATE可能存在逻辑错误,导致数据损坏 + +* **Item**:RES.005 +* **Severity**:L4 +* **Content**: +* **Case**: + +```sql +update tbl set col = 1 and cl = 2 where col=3; +``` +## 永远不真的比较条件 + +* **Item**:RES.006 +* **Severity**:L4 +* **Content**:查询条件永远非真,这将导致查询无匹配到的结果。 +* **Case**: + +```sql +select * from tbl where 1 != 1; +``` +## 永远为真的比较条件 + +* **Item**:RES.007 +* **Severity**:L4 +* **Content**:查询条件永远为真,这将导致WHERE条件失效进行全表查询。 +* **Case**: + +```sql +select * from tbl where 1 = 1; +``` +## 不建议使用LOAD DATA/SELECT ... INTO OUTFILE + +* **Item**:RES.008 +* **Severity**:L2 +* **Content**:SELECT INTO OUTFILE需要授予FILE权限,这通过会引入安全问题。LOAD DATA虽然可以提高数据导入速度,但同时也可能导致从库同步延迟过大。 +* **Case**: + +```sql +LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table; +``` +## 请谨慎使用TRUNCATE操作 + +* **Item**:SEC.001 +* **Severity**:L0 +* **Content**:一般来说想清空一张表最快速的做法就是使用TRUNCATE TABLE tbl\_name;语句。但TRUNCATE操作也并非是毫无代价的,TRUNCATE TABLE无法返回被删除的准确行数,如果需要返回被删除的行数建议使用DELETE语法。TRUNCATE操作还会重置AUTO\_INCREMENT,如果不想重置该值建议使用DELETE FROM tbl\_name WHERE 1;替代。TRUNCATE操作会对数据字典添加源数据锁(MDL),当一次需要TRUNCATE很多表时会影响整个实例的所有请求,因此如果要TRUNCATE多个表建议用DROP+CREATE的方式以减少锁时长。 +* **Case**: + +```sql +TRUNCATE TABLE tbl_name +``` +## 不使用明文存储密码 + +* **Item**:SEC.002 +* **Severity**:L0 +* **Content**:使用明文存储密码或者使用明文在网络上传递密码都是不安全的。如果攻击者能够截获您用来插入密码的SQL语句,他们就能直接读到密码。另外,将用户输入的字符串以明文的形式插入到纯SQL语句中,也会让攻击者发现它。如果您能够读取密码,黑客也可以。解决方案是使用单向哈希函数对原始密码进行加密编码。哈希是指将输入字符串转化成另一个新的、不可识别的字符串的函数。对密码加密表达式加点随机串来防御“字典攻击”。不要将明文密码输入到SQL查询语句中。在应用程序代码中计算哈希串,只在SQL查询中使用哈希串。 +* **Case**: + +```sql +create table test(id int,name varchar(20) not null,password varchar(200)not null) +``` +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item**:SEC.003 +* **Severity**:L0 +* **Content**:在执行高危操作之前对数据进行备份是十分有必要的。 +* **Case**: + +```sql +delete from table where col = 'condition' +``` +## '!=' 运算符是非标准的 + +* **Item**:STA.001 +* **Severity**:L0 +* **Content**:"<>"才是标准SQL中的不等于运算符。 +* **Case**: + +```sql +select col1,col2 from tbl where type!=0 +``` +## 库名或表名点后建议不要加空格 + +* **Item**:STA.002 +* **Severity**:L1 +* **Content**:当使用db.table或table.column格式访问表或字段时,请不要在点号后面添加空格,虽然这样语法正确。 +* **Case**: + +```sql +select col from sakila. film +``` +## 索引起名不规范 + +* **Item**:STA.003 +* **Severity**:L1 +* **Content**:建议普通二级索引以idx\_为前缀,唯一索引以uk\_为前缀。 +* **Case**: + +```sql +select col from now where type!=0 +``` +## 起名时请不要使用字母、数字和下划线之外的字符 + +* **Item**:STA.004 +* **Severity**:L1 +* **Content**:以字母或下划线开头,名字只允许使用字母、数字和下划线。请统一大小写,不要使用驼峰命名法。不要在名字中出现连续下划线'\_\_',这样很难辨认。 +* **Case**: + +```sql +CREATE TABLE ` abc` (a int); +``` +## MySQL对子查询的优化效果不佳 + +* **Item**:SUB.001 +* **Severity**:L4 +* **Content**:MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 +* **Case**: + +```sql +select col1,col2,col3 from table1 where col2 in(select col from table2) +``` +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item**:SUB.002 +* **Severity**:L2 +* **Content**:与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 +* **Case**: + +```sql +select teacher_id as id,people_name as name from t1,t2 where t1.teacher_id=t2.people_id union select student_id as id,people_name as name from t1,t2 where t1.student_id=t2.people_id +``` +## 考虑使用EXISTS而不是DISTINCT子查询 + +* **Item**:SUB.003 +* **Severity**:L3 +* **Content**:DISTINCT关键字在对元组排序后删除重复。相反,考虑使用一个带有EXISTS关键字的子查询,您可以避免返回整个表。 +* **Case**: + +```sql +SELECT DISTINCT c.c_id, c.c_name FROM c,e WHERE e.c_id = c.c_id +``` +## 执行计划中嵌套连接深度过深 + +* **Item**:SUB.004 +* **Severity**:L3 +* **Content**:MySQL对子查询的优化效果不佳,MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。 +* **Case**: + +```sql +SELECT * from tb where id in (select id from (select id from tb)) +``` +## 子查询不支持LIMIT + +* **Item**:SUB.005 +* **Severity**:L8 +* **Content**:当前MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'。 +* **Case**: + +```sql +SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1) +``` +## 不建议在子查询中使用函数 + +* **Item**:SUB.006 +* **Severity**:L2 +* **Content**:MySQL将外部查询中的每一行作为依赖子查询执行子查询,如果在子查询中使用函数,即使是semi-join也很难进行高效的查询。可以将子查询重写为OUTER JOIN语句并用连接条件对数据进行过滤。 +* **Case**: + +```sql +SELECT * FROM staff WHERE name IN (SELECT max(NAME) FROM customer) +``` +## 不建议使用分区表 + +* **Item**:TBL.001 +* **Severity**:L4 +* **Content**:不建议使用分区表 +* **Case**: + +```sql +CREATE TABLE trb3(id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE(YEAR(purchased)) (PARTITION p0 VALUES LESS THAN (1990), PARTITION p1 VALUES LESS THAN (1995), PARTITION p2 VALUES LESS THAN (2000), PARTITION p3 VALUES LESS THAN (2005) ); +``` +## 请为表选择合适的存储引擎 + +* **Item**:TBL.002 +* **Severity**:L4 +* **Content**:建表或修改表的存储引擎时建议使用推荐的存储引擎,如:innodb +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL AUTO_INCREMENT) +``` +## 以DUAL命名的表在数据库中有特殊含义 + +* **Item**:TBL.003 +* **Severity**:L8 +* **Content**:DUAL表为虚拟表,不需要创建即可使用,也不建议服务以DUAL命名表。 +* **Case**: + +```sql +create table dual(id int, primary key (id)); +``` +## 表的初始AUTO\_INCREMENT值不为0 + +* **Item**:TBL.004 +* **Severity**:L2 +* **Content**:AUTO\_INCREMENT不为0会导致数据空洞。 +* **Case**: + +```sql +CREATE TABLE tbl (a int) AUTO_INCREMENT = 10; +``` +## 请使用推荐的字符集 + +* **Item**:TBL.005 +* **Severity**:L4 +* **Content**:表字符集只允许设置为utf8,utf8mb4 +* **Case**: + +```sql +CREATE TABLE tbl (a int) DEFAULT CHARSET = latin1; +``` diff --git a/advisor/testdata/TestListTestSQLs.golden b/advisor/testdata/TestListTestSQLs.golden new file mode 100644 index 00000000..4ac6be8c --- /dev/null +++ b/advisor/testdata/TestListTestSQLs.golden @@ -0,0 +1,80 @@ +SELECT * FROM film WHERE length = 86; +SELECT * FROM film WHERE length IS NULL; +SELECT * FROM film HAVING title = 'abc'; +SELECT * FROM sakila.film WHERE length >= 60; +SELECT * FROM sakila.film WHERE length >= '60'; +SELECT * FROM film WHERE length BETWEEN 60 AND 84; +SELECT * FROM film WHERE title LIKE 'AIR%'; +SELECT * FROM film WHERE title IS NOT NULL; +SELECT * FROM film WHERE length = 114 and title = 'ALABAMA DEVIL'; +SELECT * FROM film WHERE length > 100 and title = 'ALABAMA DEVIL'; +SELECT * FROM film WHERE length > 100 and language_id < 10 and title = 'xyz'; +SELECT * FROM film WHERE length > 100 and language_id < 10; +SELECT release_year, sum(length) FROM film WHERE length = 123 AND language_id = 1 GROUP BY release_year; +SELECT release_year, sum(length) FROM film WHERE length >= 123 GROUP BY release_year; +SELECT release_year, language_id, sum(length) FROM film GROUP BY release_year, language_id; +SELECT release_year, sum(length) FROM film WHERE length = 123 GROUP BY release_year,(length+language_id); +SELECT release_year, sum(film_id) FROM film GROUP BY release_year; +SELECT * FROM address GROUP BY address,district; +SELECT title FROM film WHERE ABS(language_id) = 3 GROUP BY title; +SELECT language_id FROM film WHERE length = 123 GROUP BY release_year ORDER BY language_id; +SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year; +SELECT * FROM film WHERE length = 123 ORDER BY release_year ASC, language_id DESC; +SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year LIMIT 10; +SELECT * FROM film WHERE length = 123 ORDER BY release_year LIMIT 10; +SELECT * FROM film ORDER BY release_year LIMIT 10; +SELECT * FROM film WHERE length > 100 ORDER BY length LIMIT 10; +SELECT * FROM film WHERE length < 100 ORDER BY length LIMIT 10; +SELECT * FROM customer WHERE address_id in (224,510) ORDER BY last_name; +SELECT * FROM film WHERE release_year = 2016 AND length != 1 ORDER BY title; +SELECT title FROM film WHERE release_year = 1995; +SELECT title, replacement_cost FROM film WHERE language_id = 5 AND length = 70; +SELECT title FROM film WHERE language_id > 5 AND length > 70; +SELECT * FROM film WHERE length = 100 and title = 'xyz' ORDER BY release_year; +SELECT * FROM film WHERE length > 100 and title = 'xyz' ORDER BY release_year; +SELECT * FROM film WHERE length > 100 ORDER BY release_year; +SELECT * FROM city a INNER JOIN country b ON a.country_id=b.country_id; +SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id; +SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id; +SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL; +SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL; +SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id UNION SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id; +SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL UNION SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL; +SELECT country_id, last_update FROM city NATURAL JOIN country; +SELECT country_id, last_update FROM city NATURAL LEFT JOIN country; +SELECT country_id, last_update FROM city NATURAL RIGHT JOIN country; +SELECT a.country_id, a.last_update FROM city a STRAIGHT_JOIN country b ON a.country_id=b.country_id; +SELECT d.deptno,d.dname,d.loc FROM scott.dept d WHERE d.deptno IN (SELECT e.deptno FROM scott.emp e); +SELECT visitor_id, url FROM (SELECT id FROM log WHERE ip="123.45.67.89" order by tsdesc limit 50, 10) I JOIN log ON (I.id=log.id) JOIN url ON (url.id=log.url_id) order by TS desc; +DELETE city, country FROM city INNER JOIN country using (country_id) WHERE city.city_id = 1; +DELETE city FROM city LEFT JOIN country ON city.country_id = country.country_id WHERE country.country IS NULL; +DELETE a1, a2 FROM city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id; +DELETE FROM a1, a2 USING city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id; +DELETE FROM film WHERE length > 100; +UPDATE city INNER JOIN country USING(country_id) SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10; +UPDATE city INNER JOIN country ON city.country_id = country.country_id INNER JOIN address ON city.city_id = address.city_id SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10; +UPDATE city, country SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.country_id = country.country_id AND city.city_id=10; +UPDATE film SET length = 10 WHERE language_id = 20; +INSERT INTO city (country_id) SELECT country_id FROM country; +INSERT INTO city (country_id) VALUES (1),(2),(3); +INSERT INTO city (country_id) VALUES (10); +INSERT INTO city (country_id) SELECT 10 FROM DUAL; +REPLACE INTO city (country_id) SELECT country_id FROM country; +REPLACE INTO city (country_id) VALUES (1),(2),(3); +REPLACE INTO city (country_id) VALUES (10); +REPLACE INTO city (country_id) SELECT 10 FROM DUAL; +SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film; +SELECT * FROM film WHERE language_id = (SELECT language_id FROM language LIMIT 1); +SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id; +SELECT * FROM (SELECT * FROM actor WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE') t WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE' GROUP BY first_name; +SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id; +SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id WHERE o.country_id is null union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id WHERE i.city_id is null; +SELECT first_name,last_name,email FROM customer STRAIGHT_JOIN address ON customer.address_id=address.address_id; +SELECT ID,name FROM (SELECT address FROM customer_list WHERE SID=1 order by phone limit 50,10) a JOIN customer_list l ON (a.address=l.address) JOIN city c ON (c.city=l.city) order by phone desc; +SELECT * FROM film WHERE date(last_update)='2006-02-15'; +SELECT last_update FROM film GROUP BY date(last_update); +SELECT last_update FROM film order by date(last_update); +SELECT description FROM film WHERE description IN('NEWS','asd') GROUP BY description; +alter table address add index idx_city_id(city_id); +alter table inventory add index `idx_store_film` (`store_id`,`film_id`); +alter table inventory add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`); diff --git a/advisor/testdata/TestMergeConflictHeuristicRules.golden b/advisor/testdata/TestMergeConflictHeuristicRules.golden new file mode 100644 index 00000000..90232655 --- /dev/null +++ b/advisor/testdata/TestMergeConflictHeuristicRules.golden @@ -0,0 +1,109 @@ +advisor.Rule{Item:"ALI.001", Severity:"L0", Summary:"建议使用AS关键字显示声明一个别名", Content:"在列或表别名(如\"tbl AS alias\")中, 明确使用AS关键字比隐含别名(如\"tbl alias\")更易懂。", Case:"select name from tbl t1 where id < 1000", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALI.002", Severity:"L8", Summary:"不建议给列通配符'*'设置别名", Content:"例: \"SELECT tbl.* col1, col2\"上面这条SQL给列通配符设置了别名,这样的SQL可能存在逻辑错误。您可能意在查询col1, 但是代替它的是重命名的是tbl的最后一列。", Case:"select tbl.* as c1,c2,c3 from tbl where id < 1000", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALI.003", Severity:"L1", Summary:"别名不要与表或列的名字相同", Content:"表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨。", Case:"select name from tbl as tbl where id < 1000", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALT.001", Severity:"L4", Summary:"修改表的默认字符集不会改表各个字段的字符集", Content:"很多初学者会将ALTER TABLE tbl_name [DEFAULT] CHARACTER SET 'UTF8'误认为会修改所有字段的字符集,但实际上它只会影响后续新增的字段不会改表已有字段的字符集。如果想修改整张表所有字段的字符集建议使用ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name;", Case:"ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALT.002", Severity:"L2", Summary:"同一张表的多条ALTER请求建议合为一条", Content:"每次表结构变更对线上服务都会产生影响,即使是能够通过在线工具进行调整也请尽量通过合并ALTER请求的试减少操作次数。", Case:"ALTER TABLE tbl ADD COLUMN col int, ADD INDEX idx_col (`col`);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALT.003", Severity:"L0", Summary:"删除列为高危操作,操作前请注意检查业务逻辑是否还有依赖", Content:"如业务逻辑依赖未完全消除,列被删除后可能导致数据无法写入或无法查询到已删除列数据导致程序异常的情况。这种情况下即使通过备份数据回滚也会丢失用户请求写入的数据。", Case:"ALTER TABLE tbl DROP COLUMN col;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ALT.004", Severity:"L0", Summary:"删除主键和外键为高危操作,操作前请与DBA确认影响", Content:"主键和外键为关系型数据库中两种重要约束,删除已有约束会打破已有业务逻辑,操作前请业务开发与DBA确认影响,三思而行。", Case:"ALTER TABLE tbl DROP PRIMARY KEY;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.001", Severity:"L4", Summary:"不建议使用前项通配符查找", Content:"例如“%foo”,查询参数有一个前项通配符的情况无法使用已有索引。", Case:"select c1,c2,c3 from tbl where name like '%foo'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.002", Severity:"L1", Summary:"没有通配符的LIKE查询", Content:"不包含通配符的LIKE查询可能存在逻辑错误,因为逻辑上它与等值查询相同。", Case:"select c1,c2,c3 from tbl where name like 'foo'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.003", Severity:"L4", Summary:"参数比较包含隐式转换,无法使用索引", Content:"隐式类型转换有无法命中索引的风险,在高并发、大数据量的情况下,命不中索引带来的后果非常严重。", Case:"SELECT * FROM sakila.film WHERE length >= '60';", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.004", Severity:"L4", Summary:"IN (NULL)/NOT IN (NULL)永远非真", Content:"正确的作法是col IN ('val1', 'val2', 'val3') OR col IS NULL", Case:"SELECT * FROM sakila.film WHERE length >= '60';", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.006", Severity:"L1", Summary:"应尽量避免在WHERE子句中对字段进行NULL值判断", Content:"使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0;", Case:"select id from t where num is null", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.007", Severity:"L3", Summary:"避免使用模式匹配", Content:"性能问题是使用模式匹配操作符的最大缺点。使用LIKE或正则表达式进行模式匹配进行查询的另一个问题,是可能会返回意料之外的结果。最好的方案就是使用特殊的搜索引擎技术来替代SQL,比如Apache Lucene。另一个可选方案是将结果保存起来从而减少重复的搜索开销。如果一定要使用SQL,请考虑在MySQL中使用像FULLTEXT索引这样的第三方扩展。但更广泛地说,您不一定要使用SQL来解决所有问题。", Case:"select c_id,c2,c3 from tbl where c2 like 'test%'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.008", Severity:"L1", Summary:"OR查询索引列时请尽量使用IN谓词", Content:"IN-list谓词可以用于索引检索,并且优化器可以对IN-list进行排序,以匹配索引的排序序列,从而获得更有效的检索。请注意,IN-list必须只包含常量,或在查询块执行期间保持常量的值,例如外引用。", Case:"SELECT c1,c2,c3 FROM tbl WHERE c1 = 14 OR c1 = 17", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.009", Severity:"L1", Summary:"引号中的字符串开头或结尾包含空格", Content:"如果VARCHAR列的前后存在空格将可能引起逻辑问题,如在MySQL 5.5中'a'和'a '可能会在查询中被认为是相同的值。", Case:"SELECT 'abc '", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.010", Severity:"L1", Summary:"不要使用hint,如sql_no_cache,force index,ignore key,straight join等", Content:"hint是用来强制SQL按照某个执行计划来执行,但随着数据量变化我们无法保证自己当初的预判是正确的。", Case:"SELECT 'abc '", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"ARG.011", Severity:"L3", Summary:"不要使用负向查询,如:NOT IN/NOT LIKE", Content:"请尽量不要使用负向查询,这将导致全表扫描,对查询性能影响较大。", Case:"select id from t where num not in(1,2,3);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.001", Severity:"L4", Summary:"最外层SELECT未指定WHERE条件", Content:"SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。", Case:"select id from tbl", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.002", Severity:"L3", Summary:"不建议使用ORDER BY RAND()", Content:"ORDER BY RAND()是从结果集中检索随机行的一种非常低效的方法,因为它会对整个结果进行排序并丢弃其大部分数据。", Case:"select name from tbl where id < 1000 order by rand(number)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.003", Severity:"L2", Summary:"不建议使用带OFFSET的LIMIT查询", Content:"使用LIMIT和OFFSET对结果集分页的复杂度是O(n^2),并且会随着数据增大而导致性能问题。采用“书签”扫描的方法实现分页效率更高。", Case:"select c1,c2 from tbl where name=xx order by number limit 1 offset 20", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.004", Severity:"L2", Summary:"不建议对常量进行GROUP BY", Content:"GROUP BY 1 表示按第一列进行GROUP BY。如果在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,可能会导致问题。", Case:"select col1,col2 from tbl group by 1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.005", Severity:"L2", Summary:"ORDER BY常数列没有任何意义", Content:"SQL逻辑上可能存在错误; 最多只是一个无用的操作,不会更改查询结果。", Case:"select id from test where id=1 order by id", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.006", Severity:"L4", Summary:"在不同的表中GROUP BY或ORDER BY", Content:"这将强制使用临时表和filesort,可能产生巨大性能隐患,并且可能消耗大量内存和磁盘上的临时空间。", Case:"select tb1.col, tb2.col from tb1, tb2 where id=1 group by tb1.col, tb2.col", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.007", Severity:"L2", Summary:"ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引", Content:"ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。", Case:"select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.008", Severity:"L2", Summary:"请为GROUP BY显示添加ORDER BY条件", Content:"默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。", Case:"select c1,c2,c3 from t1 where c1='foo' group by c2", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.009", Severity:"L2", Summary:"ORDER BY的条件为表达式", Content:"当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。", Case:"select description from film where title ='ACADEMY DINOSAUR' order by length-language_id;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.010", Severity:"L2", Summary:"GROUP BY的条件为表达式", Content:"当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。", Case:"select description from film where title ='ACADEMY DINOSAUR' GROUP BY length-language_id;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.011", Severity:"L1", Summary:"建议为表添加注释", Content:"为表添加注释能够使得表的意义更明确,从而为日后的维护带来极大的便利。", Case:"CREATE TABLE `test1` (`ID` bigint(20) NOT NULL AUTO_INCREMENT,`c1` varchar(128) DEFAULT NULL,PRIMARY KEY (`ID`)) ENGINE=InnoDB DEFAULT CHARSET=utf8", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.012", Severity:"L2", Summary:"将复杂的裹脚布式查询分解成几个简单的查询", Content:"SQL是一门极具表现力的语言,您可以在单个SQL查询或者单条语句中完成很多事情。但这并不意味着必须强制只使用一行代码,或者认为使用一行代码就搞定每个任务是个好主意。通过一个查询来获得所有结果的常见后果是得到了一个笛卡儿积。当查询中的两张表之间没有条件限制它们的关系时,就会发生这种情况。没有对应的限制而直接使用两张表进行联结查询,就会得到第一张表中的每一行和第二张表中的每一行的一个组合。每一个这样的组合就会成为结果集中的一行,最终您就会得到一个行数很多的结果集。重要的是要考虑这些查询很难编写、难以修改和难以调试。数据库查询请求的日益增加应该是预料之中的事。经理们想要更复杂的报告以及在用户界面上添加更多的字段。如果您的设计很复杂,并且是一个单一查询,要扩展它们就会很费时费力。不论对您还是项目来说,时间花在这些事情上面不值得。将复杂的意大利面条式查询分解成几个简单的查询。当您拆分一个复杂的SQL查询时,得到的结果可能是很多类似的查询,可能仅仅在数据类型上有所不同。编写所有的这些查询是很乏味的,因此,最好能够有个程序自动生成这些代码。SQL代码生成是一个很好的应用。尽管SQL支持用一行代码解决复杂的问题,但也别做不切实际的事情。", Case:"这是一条很长很长的SQL,案例略。", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.013", Severity:"L3", Summary:"不建议使用HAVING子句", Content:"将查询的HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引。", Case:"SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.014", Severity:"L2", Summary:"删除全表时建议使用TRUNCATE替代DELETE", Content:"删除全表时建议使用TRUNCATE替代DELETE", Case:"delete from tbl", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.015", Severity:"L4", Summary:"UPDATE未指定WHERE条件", Content:"UPDATE不指定WHERE条件一般是致命的,请您三思后行", Case:"update tbl set col=1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.016", Severity:"L2", Summary:"不要UPDATE主键", Content:"主键是数据表中记录的唯一标识符,不建议频繁更新主键列,这将影响元数据统计信息进而影响正常的查询。", Case:"update tbl set col=1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"CLA.017", Severity:"L2", Summary:"不建议使用存储过程、视图、触发器、临时表等", Content:"这些功能的使用在一定程度上会使得程序难以调试和拓展,更没有移植性,且会极大的增加出现BUG的概率。", Case:"CREATE VIEW v_today (today) AS SELECT CURRENT_DATE;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.001", Severity:"L1", Summary:"不建议使用SELECT * 类型查询", Content:"当表结构变更时,使用*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。", Case:"select * from tbl where id=1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.002", Severity:"L2", Summary:"INSERT未指定列名", Content:"当表结构发生变更,如果INSERT或REPLACE请求不明确指定列名,请求的结果将会与预想的不同; 建议使用“INSERT INTO tbl(col1,col2)VALUES ...”代替。", Case:"insert into tbl values(1,'name')", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.003", Severity:"L2", Summary:"建议修改自增ID为无符号类型", Content:"建议修改自增ID为无符号类型", Case:"create table test(`id` int(11) NOT NULL AUTO_INCREMENT)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.004", Severity:"L1", Summary:"请为列添加默认值", Content:"请为列添加默认值,如果是ALTER操作,请不要忘记将原字段的默认值写上。字段无默认值,当表较大时无法在线变更表结构。", Case:"CREATE TABLE tbl (col int) ENGINE=InnoDB;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.005", Severity:"L1", Summary:"列未添加注释", Content:"建议对表中每个列添加注释,来明确每个列在表中的含义及作用。", Case:"CREATE TABLE tbl (col int) ENGINE=InnoDB;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.006", Severity:"L3", Summary:"表中包含有太多的列", Content:"表中包含有太多的列", Case:"CREATE TABLE tbl ( cols ....);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.008", Severity:"L1", Summary:"可使用VARCHAR代替CHAR,VARBINARY代替BINARY", Content:"为首先变长字段存储空间小,可以节省存储空间。其次对于查询来说,在一个相对较小的字段内搜索效率显然要高些。", Case:"create table t1(id int,name char(20),last_time date)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.009", Severity:"L2", Summary:"建议使用精确的数据类型", Content:"实际上,任何使用FLOAT、REAL或DOUBLE PRECISION数据类型的设计都有可能是反模式。大多数应用程序使用的浮点数的取值范围并不需要达到IEEE 754标准所定义的最大/最小区间。在计算总量时,非精确浮点数所积累的影响是严重的。使用SQL中的NUMERIC或DECIMAL类型来代替FLOAT及其类似的数据类型进行固定精度的小数存储。这些数据类型精确地根据您定义这一列时指定的精度来存储数据。尽可能不要使用浮点数。", Case:"CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,hours float not null,PRIMARY KEY (p_id, a_id))", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.010", Severity:"L2", Summary:"不建议使用ENUM数据类型", Content:"ENUM定义了列中值的类型,使用字符串表示ENUM里的值时,实际存储在列中的数据是这些值在定义时的序数。因此,这列的数据是字节对齐的,当您进行一次排序查询时,结果是按照实际存储的序数值排序的,而不是按字符串值的字母顺序排序的。这可能不是您所希望的。没有什么语法支持从ENUM或者check约束中添加或删除一个值;您只能使用一个新的集合重新定义这一列。如果您打算废弃一个选项,您可能会为历史数据而烦恼。作为一种策略,改变元数据——也就是说,改变表和列的定义——应该是不常见的,并且要注意测试和质量保证。有一个更好的解决方案来约束一列中的可选值:创建一张检查表,每一行包含一个允许在列中出现的候选值;然后在引用新表的旧表上声明一个外键约束。", Case:"create table tab1(status ENUM('new','in progress','fixed'))", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.011", Severity:"L0", Summary:"当需要唯一约束时才使用NULL,仅当列不能有缺失值时才使用NOT NULL", Content:"NULL和0是不同的,10乘以NULL还是NULL。NULL和空字符串是不一样的。将一个字符串和标准SQL中的NULL联合起来的结果还是NULL。NULL和FALSE也是不同的。AND、OR和NOT这三个布尔操作如果涉及NULL,其结果也让很多人感到困惑。当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。使用NULL来表示任意类型不存在的空值。 当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。", Case:"select c1,c2,c3 from tbl where c4 is null or c4 <> 1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.012", Severity:"L5", Summary:"BLOB和TEXT类型的字段不可设置为NULL", Content:"BLOB和TEXT类型的字段不可设置为NULL", Case:"CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.013", Severity:"L4", Summary:"TIMESTAMP类型未设置默认值", Content:"TIMESTAMP类型未设置默认值", Case:"CREATE TABLE tbl( `id` bigint not null, `create_time` timestamp);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.014", Severity:"L5", Summary:"为列指定了字符集", Content:"建议列与表使用同一个字符集,不要单独指定列的字符集。", Case:"CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.015", Severity:"L4", Summary:"BLOB类型的字段不可指定默认值", Content:"BLOB类型的字段不可指定默认值", Case:"CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL DEFAULT '', PRIMARY KEY (`id`));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.016", Severity:"L1", Summary:"整型定义建议采用INT(10)或BIGINT(20)", Content:"INT(M) 在 integer 数据类型中,M 表示最大显示宽度。 在 INT(M) 中,M 的值跟 INT(M) 所占多少存储空间并无任何关系。 INT(3)、INT(4)、INT(8) 在磁盘上都是占用 4 bytes 的存储空间。", Case:"CREATE TABLE tab (a INT(1));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"COL.017", Severity:"L2", Summary:"varchar定义长度过长", Content:"varchar 是可变长字符串,不预先分配存储空间,长度不要超过1024,如果存储长度过长MySQL将定义字段类型为text,独立出来一张表,用主键来对应,避免影响其它字段索引效率。", Case:"CREATE TABLE tab (a varchar(3500));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"DIS.001", Severity:"L1", Summary:"消除不必要的DISTINCT条件", Content:"太多DISTINCT条件是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少DISTINCT条件的数量。如果主键列是列的结果集的一部分,则DISTINCT条件可能没有影响。", Case:"SELECT DISTINCT c.c_id,count(DISTINCT c.c_name),count(DISTINCT c.c_e),count(DISTINCT c.c_n),count(DISTINCT c.c_me),c.c_d FROM (select distinct xing, name from B) as e WHERE e.country_id = c.country_id", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"DIS.002", Severity:"L3", Summary:"COUNT(DISTINCT)多列时结果可能和你预想的不同", Content:"COUNT(DISTINCT col)计算该列除NULL之外的不重复行数,注意COUNT(DISTINCT col, col2)如果其中一列全为NULL那么即使另一列有不同的值,也返回0。", Case:"SELECT COUNT(DISTINCT col, col2) FROM tbl;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"DIS.003", Severity:"L3", Summary:"DISTINCT *对有主键的表没有意义", Content:"当表已经有主键时,对所有列进行DISTINCT的输出结果与不进行DISTINCT操作的结果相同,请不要画蛇添足。", Case:"SELECT DISTINCT * FROM film;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.001", Severity:"L2", Summary:"避免在WHERE条件中使用函数或其他运算符", Content:"虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。", Case:"select id from t where substring(name,1,3)='abc'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.002", Severity:"L1", Summary:"指定了WHERE条件或非MyISAM引擎时使用COUNT(*)操作性能不佳", Content:"COUNT(*)的作用是统计表行数,COUNT(COL)的作用是统计指定列非NULL的行数。MyISAM表对于COUNT(*)统计全表行数进行了特殊的优化,通常情况下非常快。但对于非MyISAM表或指定了某些WHERE条件,COUNT(*)操作需要扫描大量的行才能获取精确的结果,性能也因此不佳。有时候某些业务场景并不需要完全精确的COUNT值,此时可以用近似值来代替。EXPLAIN出来的优化器估算的行数就是一个不错的近似值,执行EXPLAIN并不需要真正去执行查询,所以成本很低。", Case:"SELECT c3, COUNT(*) AS accounts FROM tab where c2 < 10000 GROUP BY c3 ORDER BY num", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.003", Severity:"L3", Summary:"使用了合并为可空列的字符串连接", Content:"在一些查询请求中,您需要强制让某一列或者某个表达式返回非NULL的值,从而让查询逻辑变得更简单,担忧不想将这个值存下来。使用COALESCE()函数来构造连接的表达式,这样即使是空值列也不会使整表达式变为NULL。", Case:"select c1 || coalesce(' ' || c2 || ' ', ' ') || c3 as c from tbl", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.004", Severity:"L4", Summary:"不建议使用SYSDATE()函数", Content:"SYSDATE()函数可能导致主从数据不一致,请使用NOW()函数替代SYSDATE()。", Case:"SELECT SYSDATE();", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.005", Severity:"L1", Summary:"不建议使用COUNT(col)或COUNT(常量)", Content:"不要使用COUNT(col)或COUNT(常量)来替代COUNT(*),COUNT(*)是SQL92定义的标准统计行数的方法,跟数据无关,跟NULL和非NULL也无关。", Case:"SELECT COUNT(1) FROM tbl;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"FUN.006", Severity:"L1", Summary:"使用SUM(COL)时需注意NPE问题", Content:"当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题。可以使用如下方式来避免SUM的NPE问题: SELECT IF(ISNULL(SUM(COL)), 0, SUM(COL)) FROM tbl", Case:"SELECT SUM(COL) FROM tbl;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"GRP.001", Severity:"L2", Summary:"不建议对等值查询列使用GROUP BY", Content:"GROUP BY中的列在前面的WHERE条件中使用了等值查询,对这样的列进行GROUP BY意义不大。", Case:"select film_id, title from film where release_year='2006' group by release_year", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.001", Severity:"L2", Summary:"JOIN语句混用逗号和ANSI模式", Content:"表连接的时候混用逗号和ANSI JOIN不便于人类理解,并且MySQL不同版本的表连接行为和优先级均有所不同,当MySQL版本变化后可能会引入错误。", Case:"select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1,t1.c3=t3,c1 where id>1000", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.002", Severity:"L4", Summary:"同一张表被连接两次", Content:"相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。", Case:"select tb1.col from (tb1, tb2) join tb2 on tb1.id=tb.id where tb1.id=1", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.003", Severity:"L4", Summary:"OUTER JOIN失效", Content:"由于WHERE条件错误使得OUTER JOIN的外部表无数据返回,这会将查询隐式转换为 INNER JOIN 。如:select c from L left join R using(c) where L.a=5 and R.b=10。这种SQL逻辑上可能存在错误或程序员对OUTER JOIN如何工作存在误解,因为LEFT/RIGHT JOIN是LEFT/RIGHT OUTER JOIN的缩写。", Case:"select c1,c2,c3 from t1 left outer join t2 using(c1) where t1.c2=2 and t2.c3=4", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.004", Severity:"L4", Summary:"不建议使用排它JOIN", Content:"只在右侧表为NULL的带WHERE子句的LEFT OUTER JOIN语句,有可能是在WHERE子句中使用错误的列,如:“... FROM l LEFT OUTER JOIN r ON l.l = r.r WHERE r.z IS NULL”,这个查询正确的逻辑可能是 WHERE r.r IS NULL。", Case:"select c1,c2,c3 from t1 left outer join t2 on t1.c1=t2.c1 where t2.c2 is null", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.005", Severity:"L2", Summary:"减少JOIN的数量", Content:"太多的JOIN是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少JOIN的数量。", Case:"select bp1.p_id, b1.d_d as l, b1.b_id from b1 join bp1 on (b1.b_id = bp1.b_id) left outer join (b1 as b2 join bp2 on (b2.b_id = bp2.b_id)) on (bp1.p_id = bp2.p_id ) join bp21 on (b1.b_id = bp1.b_id) join bp31 on (b1.b_id = bp1.b_id) join bp41 on (b1.b_id = bp1.b_id) where b2.b_id = 0", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"JOI.008", Severity:"L4", Summary:"不要使用跨DB的Join查询", Content:"一般来说,跨DB的Join查询意味着查询语句跨越了两个不同的子系统,这可能意味着系统耦合度过高或库表结构设计不合理。", Case:"SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 )", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.001", Severity:"L2", Summary:"建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列", Content:"建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列", Case:"create table test(`id` int(11) NOT NULL PRIMARY KEY (`id`))", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.003", Severity:"L4", Summary:"避免外键等递归关系", Content:"存在递归关系的数据很常见,数据常会像树或者以层级方式组织。然而,创建一个外键约束来强制执行同一表中两列之间的关系,会导致笨拙的查询。树的每一层对应着另一个连接。您将需要发出递归查询,以获得节点的所有后代或所有祖先。解决方案是构造一个附加的闭包表。它记录了树中所有节点间的关系,而不仅仅是那些具有直接的父子关系。您也可以比较不同层次的数据设计:闭包表,路径枚举,嵌套集。然后根据应用程序的需要选择一个。", Case:"CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,PRIMARY KEY (p_id, a_id),FOREIGN KEY (p_id) REFERENCES tab1(p_id),FOREIGN KEY (a_id) REFERENCES tab3(a_id))", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.004", Severity:"L0", Summary:"提醒:请将索引属性顺序与查询对齐", Content:"如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。", Case:"create index idx1 on tbl (last_name,first_name)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.005", Severity:"L2", Summary:"表建的索引过多", Content:"表建的索引过多", Case:"CREATE TABLE tbl ( a int, b int, c int, KEY idx_a (`a`),KEY idx_b(`b`),KEY idx_c(`c`));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.006", Severity:"L4", Summary:"主键中的列过多", Content:"主键中的列过多", Case:"CREATE TABLE tbl ( a int, b int, c int, PRIMARY KEY(`a`,`b`,`c`));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.007", Severity:"L4", Summary:"未指定主键或主键非int或bigint", Content:"未指定主键或主键非int或bigint,建议将主键设置为int unsigned或bigint unsigned。", Case:"CREATE TABLE tbl (a int);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.008", Severity:"L4", Summary:"ORDER BY多个列但排序方向不同时可能无法使用索引", Content:"在MySQL 8.0之前当ORDER BY多个列指定的排序方向不同时将无法使用已经建立的索引。", Case:"SELECT * FROM tbl ORDER BY a DESC, b ASC;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KEY.009", Severity:"L0", Summary:"添加唯一索引前请注意检查数据唯一性", Content:"请提前检查添加唯一索引列的数据唯一性,如果数据不唯一在线表结构调整时将有可能自动将重复列删除,这有可能导致数据丢失。", Case:"CREATE UNIQUE INDEX part_of_name ON customer (name(10));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KWR.001", Severity:"L2", Summary:"SQL_CALC_FOUND_ROWS效率低下", Content:"因为SQL_CALC_FOUND_ROWS不能很好地扩展,所以可能导致性能问题; 建议业务使用其他策略来替代SQL_CALC_FOUND_ROWS提供的计数功能,比如:分页结果展示等。", Case:"select SQL_CALC_FOUND_ROWS col from tbl where id>1000", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KWR.002", Severity:"L2", Summary:"不建议使用MySQL关键字做列名或表名", Content:"当使用关键字做为列名或表名时程序需要对列名和表名进行转义,如果疏忽被将导致请求无法执行。", Case:"CREATE TABLE tbl ( `select` int )", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"KWR.003", Severity:"L1", Summary:"不建议使用复数做列名或表名", Content:"表名应该仅仅表示表里面的实体内容,不应该表示实体数量,对应于 DO 类名也是单数形式,符合表达习惯。", Case:"CREATE TABLE tbl ( `books` int )", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LCK.001", Severity:"L3", Summary:"INSERT INTO xx SELECT加锁粒度较大请谨慎", Content:"INSERT INTO xx SELECT加锁粒度较大请谨慎", Case:"INSERT INTO tbl SELECT * FROM tbl2;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LCK.002", Severity:"L3", Summary:"请慎用INSERT ON DUPLICATE KEY UPDATE", Content:"当主键为自增键时使用INSERT ON DUPLICATE KEY UPDATE可能会导致主键出现大量不连续快速增长,导致主键快速溢出无法继续写入。极端情况下还有可能导致主从数据不一致。", Case:"INSERT INTO t1(a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LIT.001", Severity:"L2", Summary:"用字符类型存储IP地址", Content:"字符串字面上看起来像IP地址,但不是INET_ATON()的参数,表示数据被存储为字符而不是整数。将IP地址存储为整数更为有效。", Case:"insert into tbl (IP,name) values('10.20.306.122','test')", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LIT.002", Severity:"L4", Summary:"日期/时间未使用引号括起", Content:"诸如“WHERE col <2010-02-12”之类的查询是有效的SQL,但可能是一个错误,因为它将被解释为“WHERE col <1996”; 日期/时间文字应该加引号。", Case:"select col1,col2 from tbl where time < 2018-01-10", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LIT.003", Severity:"L3", Summary:"一列中存储一系列相关数据的集合", Content:"将ID存储为一个列表,作为VARCHAR/TEXT列,这样能导致性能和数据完整性问题。查询这样的列需要使用模式匹配的表达式。使用逗号分隔的列表来做多表联结查询定位一行数据是极不优雅和耗时的。这将使验证ID更加困难。考虑一下,列表最多支持存放多少数据呢?将ID存储在一张单独的表中,代替使用多值属性,从而每个单独的属性值都可以占据一行。这样交叉表实现了两张表之间的多对多关系。这将更好地简化查询,也更有效地验证ID。", Case:"select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"LIT.004", Severity:"L1", Summary:"请使用分号或已设定的DELIMITER结尾", Content:"USE database, SHOW DATABASES等命令也需要使用使用分号或已设定的DELIMITER结尾。", Case:"USE db", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"OK", Severity:"L0", Summary:"✔️", Content:"✔️", Case:"✔️", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.001", Severity:"L4", Summary:"非确定性的GROUP BY", Content:"SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo=\"bar\" group by a,该SQL返回的结果就是不确定的。", Case:"select c1,c2,c3 from t1 where c2='foo' group by c2", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.002", Severity:"L4", Summary:"未使用ORDER BY的LIMIT查询", Content:"没有ORDER BY的LIMIT会导致非确定性的结果,这取决于查询执行计划。", Case:"select col1,col2 from tbl where name=xx limit 10", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.003", Severity:"L4", Summary:"UPDATE/DELETE操作使用了LIMIT条件", Content:"UPDATE/DELETE操作使用LIMIT条件和不添加WHERE条件一样危险,它可将会导致主从数据不一致或从库同步中断。", Case:"UPDATE film SET length = 120 WHERE title = 'abc' LIMIT 1;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.004", Severity:"L4", Summary:"UPDATE/DELETE操作指定了ORDER BY条件", Content:"UPDATE/DELETE操作不要指定ORDER BY条件。", Case:"UPDATE film SET length = 120 WHERE title = 'abc' ORDER BY title", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.005", Severity:"L4", Summary:"UPDATE可能存在逻辑错误,导致数据损坏", Content:"", Case:"update tbl set col = 1 and cl = 2 where col=3;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.006", Severity:"L4", Summary:"永远不真的比较条件", Content:"查询条件永远非真,这将导致查询无匹配到的结果。", Case:"select * from tbl where 1 != 1;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.007", Severity:"L4", Summary:"永远为真的比较条件", Content:"查询条件永远为真,这将导致WHERE条件失效进行全表查询。", Case:"select * from tbl where 1 = 1;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"RES.008", Severity:"L2", Summary:"不建议使用LOAD DATA/SELECT ... INTO OUTFILE", Content:"SELECT INTO OUTFILE需要授予FILE权限,这通过会引入安全问题。LOAD DATA虽然可以提高数据导入速度,但同时也可能导致从库同步延迟过大。", Case:"LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SEC.001", Severity:"L0", Summary:"请谨慎使用TRUNCATE操作", Content:"一般来说想清空一张表最快速的做法就是使用TRUNCATE TABLE tbl_name;语句。但TRUNCATE操作也并非是毫无代价的,TRUNCATE TABLE无法返回被删除的准确行数,如果需要返回被删除的行数建议使用DELETE语法。TRUNCATE操作还会重置AUTO_INCREMENT,如果不想重置该值建议使用DELETE FROM tbl_name WHERE 1;替代。TRUNCATE操作会对数据字典添加源数据锁(MDL),当一次需要TRUNCATE很多表时会影响整个实例的所有请求,因此如果要TRUNCATE多个表建议用DROP+CREATE的方式以减少锁时长。", Case:"TRUNCATE TABLE tbl_name", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SEC.002", Severity:"L0", Summary:"不使用明文存储密码", Content:"使用明文存储密码或者使用明文在网络上传递密码都是不安全的。如果攻击者能够截获您用来插入密码的SQL语句,他们就能直接读到密码。另外,将用户输入的字符串以明文的形式插入到纯SQL语句中,也会让攻击者发现它。如果您能够读取密码,黑客也可以。解决方案是使用单向哈希函数对原始密码进行加密编码。哈希是指将输入字符串转化成另一个新的、不可识别的字符串的函数。对密码加密表达式加点随机串来防御“字典攻击”。不要将明文密码输入到SQL查询语句中。在应用程序代码中计算哈希串,只在SQL查询中使用哈希串。", Case:"create table test(id int,name varchar(20) not null,password varchar(200)not null)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SEC.003", Severity:"L0", Summary:"使用DELETE/DROP/TRUNCATE等操作时注意备份", Content:"在执行高危操作之前对数据进行备份是十分有必要的。", Case:"delete from table where col = 'condition'", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"STA.001", Severity:"L0", Summary:"'!=' 运算符是非标准的", Content:"\"<>\"才是标准SQL中的不等于运算符。", Case:"select col1,col2 from tbl where type!=0", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"STA.002", Severity:"L1", Summary:"库名或表名点后建议不要加空格", Content:"当使用db.table或table.column格式访问表或字段时,请不要在点号后面添加空格,虽然这样语法正确。", Case:"select col from sakila. film", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"STA.003", Severity:"L1", Summary:"索引起名不规范", Content:"建议普通二级索引以idx_为前缀,唯一索引以uk_为前缀。", Case:"select col from now where type!=0", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"STA.004", Severity:"L1", Summary:"起名时请不要使用字母、数字和下划线之外的字符", Content:"以字母或下划线开头,名字只允许使用字母、数字和下划线。请统一大小写,不要使用驼峰命名法。不要在名字中出现连续下划线'__',这样很难辨认。", Case:"CREATE TABLE ` abc` (a int);", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SUB.002", Severity:"L2", Summary:"如果您不在乎重复的话,建议使用UNION ALL替代UNION", Content:"与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。", Case:"select teacher_id as id,people_name as name from t1,t2 where t1.teacher_id=t2.people_id union select student_id as id,people_name as name from t1,t2 where t1.student_id=t2.people_id", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SUB.003", Severity:"L3", Summary:"考虑使用EXISTS而不是DISTINCT子查询", Content:"DISTINCT关键字在对元组排序后删除重复。相反,考虑使用一个带有EXISTS关键字的子查询,您可以避免返回整个表。", Case:"SELECT DISTINCT c.c_id, c.c_name FROM c,e WHERE e.c_id = c.c_id", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SUB.004", Severity:"L3", Summary:"执行计划中嵌套连接深度过深", Content:"MySQL对子查询的优化效果不佳,MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。", Case:"SELECT * from tb where id in (select id from (select id from tb))", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SUB.005", Severity:"L8", Summary:"子查询不支持LIMIT", Content:"当前MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'。", Case:"SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"SUB.006", Severity:"L2", Summary:"不建议在子查询中使用函数", Content:"MySQL将外部查询中的每一行作为依赖子查询执行子查询,如果在子查询中使用函数,即使是semi-join也很难进行高效的查询。可以将子查询重写为OUTER JOIN语句并用连接条件对数据进行过滤。", Case:"SELECT * FROM staff WHERE name IN (SELECT max(NAME) FROM customer)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"TBL.001", Severity:"L4", Summary:"不建议使用分区表", Content:"不建议使用分区表", Case:"CREATE TABLE trb3(id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE(YEAR(purchased)) (PARTITION p0 VALUES LESS THAN (1990), PARTITION p1 VALUES LESS THAN (1995), PARTITION p2 VALUES LESS THAN (2000), PARTITION p3 VALUES LESS THAN (2005) );", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"TBL.002", Severity:"L4", Summary:"请为表选择合适的存储引擎", Content:"建表或修改表的存储引擎时建议使用推荐的存储引擎,如:innodb", Case:"create table test(`id` int(11) NOT NULL AUTO_INCREMENT)", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"TBL.003", Severity:"L8", Summary:"以DUAL命名的表在数据库中有特殊含义", Content:"DUAL表为虚拟表,不需要创建即可使用,也不建议服务以DUAL命名表。", Case:"create table dual(id int, primary key (id));", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"TBL.004", Severity:"L2", Summary:"表的初始AUTO_INCREMENT值不为0", Content:"AUTO_INCREMENT不为0会导致数据空洞。", Case:"CREATE TABLE tbl (a int) AUTO_INCREMENT = 10;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} +advisor.Rule{Item:"TBL.005", Severity:"L4", Summary:"请使用推荐的字符集", Content:"表字符集只允许设置为utf8,utf8mb4", Case:"CREATE TABLE tbl (a int) DEFAULT CHARSET = latin1;", Position:0, Func:func(*advisor.Query4Audit) advisor.Rule {...}} diff --git a/ast/doc.go b/ast/doc.go new file mode 100644 index 00000000..43bbb74f --- /dev/null +++ b/ast/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package ast is an interface for Abstract Syntax Tree parser +package ast diff --git a/ast/meta.go b/ast/meta.go new file mode 100644 index 00000000..12a3f815 --- /dev/null +++ b/ast/meta.go @@ -0,0 +1,754 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + "fmt" + "strings" + + "github.com/XiaoMi/soar/common" + "vitess.io/vitess/go/vt/sqlparser" +) + +// GetTableFromExprs 从sqlparser.Exprs中获取所有的库表 +func GetTableFromExprs(exprs sqlparser.TableExprs, metas ...common.Meta) common.Meta { + meta := make(map[string]*common.DB) + if len(metas) >= 1 { + meta = metas[0] + } + + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.AliasedTableExpr: + + switch table := expr.Expr.(type) { + case sqlparser.TableName: + db := table.Qualifier.String() + tb := table.Name.String() + + if meta[db] == nil { + meta[db] = common.NewDB(db) + } + + meta[db].Table[tb] = common.NewTable(tb) + + // alias去重 + aliasExist := false + for _, existedAlias := range meta[db].Table[tb].TableAliases { + if existedAlias == expr.As.String() { + aliasExist = true + } + } + + if !aliasExist { + meta[db].Table[tb].TableAliases = append(meta[db].Table[tb].TableAliases, expr.As.String()) + } + } + } + return true, nil + }, exprs) + common.LogIfWarn(err, "") + return meta +} + +// GetMeta 获取元数据信息,构建到db->table层级。 +// 从 SQL 或 Statement 中获取表信息,并返回。当 meta 不为 nil 时,返回值会将新老 meta 合并去重 +func GetMeta(stmt sqlparser.Statement, meta common.Meta) common.Meta { + // 初始化meta + if meta == nil { + meta = make(map[string]*common.DB) + } + + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.DDL: + // 如果SQL是一个DDL,则不需要继续遍历语法树了 + db1 := expr.Table.Qualifier.String() + tb1 := expr.Table.Name.String() + db2 := expr.NewName.Qualifier.String() + tb2 := expr.NewName.Name.String() + + if tb1 != "" { + if _, ok := meta[db1]; !ok { + meta[db1] = common.NewDB(db1) + } + + meta[db1].Table[tb1] = common.NewTable(tb1) + } + + if tb2 != "" { + if _, ok := meta[db2]; !ok { + meta[db2] = common.NewDB(db2) + } + + meta[db1].Table[tb2] = common.NewTable(tb2) + } + + return false, nil + case *sqlparser.AliasedTableExpr: + // 非 DDL 情况下处理 TableExpr + // 在 sqlparser 中存在三种 TableExpr: AliasedTableExpr,ParenTableExpr 以及 JoinTableExpr。 + // 其中 AliasedTableExpr 是其他两种 TableExpr 的基础组成,SQL中的 表信息(别名、前缀)在这个结构体中。 + + switch table := expr.Expr.(type) { + + // 获取表名、别名与前缀名(数据库名) + // 表名存放在 AST 中 TableName 里,包含表名与表前缀名。 + // 当与 As 相对应的 Expr 为 TableName 的时候,别名才是一张实体表的别名,否则为结果集的别名。 + case sqlparser.TableName: + db := table.Qualifier.String() + tb := table.Name.String() + + if meta[db] == nil { + meta[db] = common.NewDB(db) + } + + meta[db].Table[tb] = common.NewTable(tb) + + // alias去重 + aliasExist := false + for _, existedAlias := range meta[db].Table[tb].TableAliases { + if existedAlias == expr.As.String() { + aliasExist = true + } + } + if !aliasExist { + meta[db].Table[tb].TableAliases = append(meta[db].Table[tb].TableAliases, expr.As.String()) + } + + default: + // 如果 AliasedTableExpr 中的 Expr 不是 TableName 结构体,则表示该表为一个查询结果集(子查询或临时表)。 + // 在这里记录一下别名,但将列名制空,用来保证在其他环节中判断列前缀的时候不会有遗漏 + // 最终结果为所有的子查询别名都会归于 ""(空) 数据库 ""(空) 表下,对于空数据库,空表后续在索引优化时直接PASS + if meta == nil { + meta = make(map[string]*common.DB) + } + + if meta[""] == nil { + meta[""] = common.NewDB("") + } + + meta[""].Table[""] = common.NewTable("") + meta[""].Table[""].TableAliases = append(meta[""].Table[""].TableAliases, expr.As.String()) + } + } + return true, nil + }, stmt) + common.LogIfWarn(err, "") + return meta +} + +// eqOperators 等值条件判断关键字 +var eqOperators = map[string]string{ + "=": "eq", + "<=>": "eq", + "is true": "eq", + "is false": "eq", + "is not true": "eq", + "is not false": "eq", + "is null": "eq", + "in": "eq", // 单值的in属于等值条件 +} + +// inEqOperators 非等值条件判断关键字 +var inEqOperators = map[string]string{ + "<": "inEq", + ">": "inEq", + "<=": "inEq", + ">=": "inEq", + "!=": "inEq", + "is not null": "inEq", + "like": "inEq", + "not like": "inEq", + "->": "inEq", + "->>": "inEq", + "between": "inEq", + "not between": "inEq", + "in": "inEq", // 多值in属于非等值条件 + + // 某些非等值条件无需添加索引,所以忽略即可 + // 比如"not in",比如"exists"、 "not exists"等 +} + +// FindColumn 从传入的node中获取所有可能加索引的的column信息 +func FindColumn(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindColumn, Caller: %s", common.Caller()) + var result []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch col := node.(type) { + case *sqlparser.FuncExpr: + // 忽略function + return false, nil + case *sqlparser.ColName: + result = common.MergeColumn(result, &common.Column{ + Name: col.Name.String(), + Table: col.Qualifier.Name.String(), + DB: col.Qualifier.Qualifier.String(), + Alias: make([]string, 0), + }) + } + + return true, nil + }, node) + common.LogIfWarn(err, "") + return result +} + +// inEqIndexAble 判断非等值查询条件是否可以复用到索引 +// Output: true 可以考虑添加索引, false 不需要添加索引 +func inEqIndexAble(node sqlparser.SQLNode) bool { + common.Log.Debug("Enter: inEqIndexAble(), Caller: %s", common.Caller()) + var indexAble bool + switch expr := node.(type) { + case *sqlparser.ComparisonExpr: + // like前百分号查询无法使用索引 + // TODO date类型的like属于隐式数据类型转换,会导致无法使用索引 + if expr.Operator == "like" || expr.Operator == "not like" { + switch right := expr.Right.(type) { + case *sqlparser.SQLVal: + return !(strings.HasPrefix(string(right.Val), "%")) + } + } + + // 如果是in查询,则需要判断in查询是否是多值查询 + if expr.Operator == "in" { + switch right := expr.Right.(type) { + case sqlparser.ValTuple: + // 若是单值查询则应该属于等值条件而非非等值条件 + return len(right) > 1 + } + } + + _, indexAble = inEqOperators[expr.Operator] + + case *sqlparser.IsExpr: + _, indexAble = inEqOperators[expr.Operator] + + case *sqlparser.RangeCond: + _, indexAble = inEqOperators[expr.Operator] + + default: + indexAble = false + } + return indexAble +} + +// FindWhereEQ 找到Where中的等值条件 +func FindWhereEQ(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindWhereEQ(), Caller: %s", common.Caller()) + return append(FindEQColsInWhere(node), FindEQColsInJoinCond(node)...) +} + +// FindWhereINEQ 找到Where条件中的非等值条件 +func FindWhereINEQ(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindWhereINEQ(), Caller: %s", common.Caller()) + return append(FindINEQColsInWhere(node), FindINEQColsInJoinCond(node)...) +} + +// FindEQColsInWhere 获取等值条件信息 +// 将所有值得加索引的condition条件信息进行过滤 +func FindEQColsInWhere(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindEQColsInWhere(), Caller: %s", common.Caller()) + var columns []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + // 对AST中所有节点进行扫描 + case *sqlparser.Subquery, *sqlparser.JoinTableExpr, *sqlparser.BinaryExpr, *sqlparser.OrExpr: + // 忽略子查询,join condition,数值计算,or condition + return false, nil + + case *sqlparser.ComparisonExpr: + var newCols []*common.Column + // ComparisonExpr中可能含有等值查询列条件 + switch node.Operator { + case "in": + // 对in进行特别判断,只有单值的in条件才算做是等值查询 + switch right := node.Right.(type) { + case sqlparser.ValTuple: + if len(right) == 1 { + newCols = FindColumn(node) + } + } + + default: + if _, ok := eqOperators[node.Operator]; ok { + newCols = FindColumn(node) + } + } + + // operator两边都为列的情况不提供索引建议 + // 如果该列位于function中则不予提供索引建议 + if len(newCols) == 1 { + columns = common.MergeColumn(columns, newCols[0]) + } + + case *sqlparser.IsExpr: + // IsExpr中可能含有等值查询列条件 + if _, ok := eqOperators[node.Operator]; ok { + newCols := FindColumn(node) + if len(newCols) == 1 { + columns = common.MergeColumn(columns, newCols[0]) + } + } + } + return true, nil + + }, node) + common.LogIfWarn(err, "") + return columns +} + +// FindINEQColsInWhere 获取非等值条件中可能需要加索引的列 +// 将所有值得加索引的condition条件信息进行过滤 +// TODO: 将where条件中隐含的join条件合并到join condition中 +func FindINEQColsInWhere(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindINEQColsInWhere(), Caller: %s", common.Caller()) + var columns []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + // 对AST中所有节点进行扫描 + case *sqlparser.Subquery, *sqlparser.JoinTableExpr, *sqlparser.BinaryExpr, *sqlparser.OrExpr: + // 忽略子查询,join condition,数值计算,or condition + return false, nil + + case *sqlparser.ComparisonExpr: + // ComparisonExpr中可能含有非等值查询列条件 + if inEqIndexAble(node) { + newCols := FindColumn(node) + // operator两边都为列的情况不提供索引建议 + if len(newCols) == 1 { + columns = common.MergeColumn(columns, newCols[0]) + } + } + case *sqlparser.IsExpr: + // IsExpr中可能含有非等值查询列条件 + if inEqIndexAble(node) { + newCols := FindColumn(node) + if len(newCols) == 1 { + columns = common.MergeColumn(columns, newCols[0]) + } + } + + case *sqlparser.RangeCond: + // RangeCond中只存在非等值条件查询 + if inEqIndexAble(node) { + columns = common.MergeColumn(columns, FindColumn(node)...) + } + } + + return true, nil + + }, node) + common.LogIfWarn(err, "") + return columns +} + +// FindGroupByCols 获取groupBy中可能需要加索引的列信息 +func FindGroupByCols(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindGroupByCols(), Caller: %s", common.Caller()) + isIgnore := false + var columns []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case sqlparser.GroupBy: + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.BinaryExpr, *sqlparser.FuncExpr: + // 如果group by中出现数值计算、函数等 + isIgnore = true + return false, nil + default: + columns = common.MergeColumn(columns, FindColumn(node)...) + } + return true, nil + }, expr) + common.LogIfWarn(err, "") + case *sqlparser.Subquery, *sqlparser.JoinTableExpr, *sqlparser.BinaryExpr: + // 忽略子查询,join condition以及数值计算 + return false, nil + } + return true, nil + }, node) + common.LogIfWarn(err, "") + if isIgnore { + return []*common.Column{} + } + + return columns +} + +// FindOrderByCols 为索引优化获取orderBy中可能添加索引的列信息 +func FindOrderByCols(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindOrderByCols(), Caller: %s", common.Caller()) + var columns []*common.Column + lastDirection := "" + directionNotEq := false + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.Order: + // MySQL对于排序顺序不同的查询无法使用索引(8.0后支持) + if lastDirection != "" && expr.Direction != lastDirection { + directionNotEq = true + return false, nil + } + lastDirection = expr.Direction + columns = common.MergeColumn(columns, FindColumn(expr)...) + case *sqlparser.Subquery, *sqlparser.JoinTableExpr, *sqlparser.BinaryExpr: + // 忽略子查询,join condition以及数值计算 + return false, nil + } + return true, nil + }, node) + common.LogIfWarn(err, "") + if directionNotEq { + // 当发现Order by中排序顺序不同时,即放弃Oder by条件中的字段 + return []*common.Column{} + } + + return columns +} + +// FindJoinTable 获取 Join 中需要添加索引的表 +// join 优化添加索引分为三种类型:1. inner join, 2. left join, 3.right join +// 针对三种优化类型,需要三种不同的索引添加方案: +// 1. inner join 需要对 join 左右的表添加索引 +// 2. left join 由于左表为全表扫描,需要对右表的关联列添加索引。 +// 3. right join 与 left join 相反,需要对左表的关联列添加索引。 +// 以上添加索引的策略前提为join的表为实体表而非临时表。 +func FindJoinTable(node sqlparser.SQLNode, meta common.Meta) common.Meta { + common.Log.Debug("Enter: FindJoinTable(), Caller: %s", common.Caller()) + if meta == nil { + meta = make(common.Meta) + } + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.JoinTableExpr: + switch expr.Join { + case "join", "natural join": + // 两边表都需要 + findJoinTable(expr.LeftExpr, meta) + findJoinTable(expr.RightExpr, meta) + case "left join", "natural left join", "straight_join": + // 只需要右表 + findJoinTable(expr.RightExpr, meta) + + case "right join", "natural right join": + // 只需要左表 + findJoinTable(expr.LeftExpr, meta) + } + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return meta +} + +// findJoinTable 获取join table +func findJoinTable(expr sqlparser.TableExpr, meta common.Meta) { + common.Log.Debug("Enter: findJoinTable(), Caller: %s", common.Caller()) + if meta == nil { + meta = make(common.Meta) + } + switch tableExpr := expr.(type) { + case *sqlparser.AliasedTableExpr: + switch table := tableExpr.Expr.(type) { + // 获取表名、别名与前缀名(数据库名) + // 表名存放在 AST 中 TableName 里,包含表名与表前缀名。 + // 当与 As 相对应的 Expr 为 TableName 的时候,别名才是一张实体表的别名,否则为结果集的别名。 + case sqlparser.TableName: + db := table.Qualifier.String() + tb := table.Name.String() + + if meta == nil { + meta = make(map[string]*common.DB) + } + + if meta[db] == nil { + meta[db] = common.NewDB(db) + } + + meta[db].Table[tb] = common.NewTable(tb) + + // alias去重 + aliasExist := false + for _, existedAlias := range meta[db].Table[tb].TableAliases { + if existedAlias == tableExpr.As.String() { + aliasExist = true + } + } + if !aliasExist { + meta[db].Table[tb].TableAliases = append(meta[db].Table[tb].TableAliases, tableExpr.As.String()) + } + } + case *sqlparser.ParenTableExpr: + // join 时可能会同时 join 多张表 + for _, tbExpr := range tableExpr.Exprs { + findJoinTable(tbExpr, meta) + } + default: + // 如果是如上两种类型都没有命中,说明join的表为临时表,递归调用 FindJoinTable 继续下探查找。 + // NOTE: 这里需要注意的是,如果不递归寻找,如果存在子查询结果集的join表,subquery也会把这个查询提取出。 + // 所以针对default这一段理论上可以忽略处理(待测试) + FindJoinTable(tableExpr, meta) + } +} + +// FindJoinCols 获取 join condition 中使用到的列(必须是 `列 operator 列` 的情况。 +// 如果列对应的值或是function,则应该移到where condition中) +// 某些where条件隐含在Join条件中(INNER JOIN) +func FindJoinCols(node sqlparser.SQLNode) [][]*common.Column { + common.Log.Debug("Enter: FindJoinCols(), Caller: %s", common.Caller()) + var columns [][]*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case *sqlparser.JoinTableExpr: + // on + if on := expr.Condition.On; on != nil { + cols := FindColumn(expr.Condition.On) + if len(cols) > 1 { + columns = append(columns, cols) + } + } + + // using + if using := expr.Condition.Using; using != nil { + left := "" + right := "" + + switch tableExpr := expr.LeftExpr.(type) { + case *sqlparser.AliasedTableExpr: + switch table := tableExpr.Expr.(type) { + case sqlparser.TableName: + left = table.Name.String() + } + } + + switch tableExpr := expr.RightExpr.(type) { + case *sqlparser.AliasedTableExpr: + switch table := tableExpr.Expr.(type) { + case sqlparser.TableName: + right = table.Name.String() + } + } + + var cols []*common.Column + for _, col := range using { + if left != "" { + cols = append(cols, &common.Column{ + Name: col.String(), + Table: left, + Alias: make([]string, 0), + }) + } + + if right != "" { + cols = append(cols, &common.Column{ + Name: col.String(), + Table: right, + Alias: make([]string, 0), + }) + } + + } + columns = append(columns, cols) + + } + + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return columns +} + +// FindEQColsInJoinCond 获取 join condition 中应转为whereEQ条件的列 +func FindEQColsInJoinCond(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindEQColsInJoinCond(), Caller: %s", common.Caller()) + var columns []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case sqlparser.JoinCondition: + columns = append(columns, FindEQColsInWhere(expr)...) + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return columns +} + +// FindINEQColsInJoinCond 获取 join condition 中应转为whereINEQ条件的列 +func FindINEQColsInJoinCond(node sqlparser.SQLNode) []*common.Column { + common.Log.Debug("Enter: FindINEQColsInJoinCond(), Caller: %s", common.Caller()) + var columns []*common.Column + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + case sqlparser.JoinCondition: + columns = append(columns, FindINEQColsInWhere(expr)...) + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return columns +} + +// FindSubquery 拆分subquery,获取最深层的subquery +// 为索引优化获取subquery中包含的列信息 +func FindSubquery(depth int, node sqlparser.SQLNode, queries ...string) []string { + common.Log.Debug("Enter: FindSubquery(), Caller: %s", common.Caller()) + if queries == nil { + queries = make([]string, 0) + } + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch expr := node.(type) { + // 查找SQL中的子查询 + case *sqlparser.Subquery: + noSub := true + // 查看子查询中是否还包含子查询,如果包含,递归找到最深层的子查询 + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch sub := node.(type) { + case *sqlparser.Subquery: + noSub = false + // 查找深度depth,超过最大深度后不再向下查找 + depth = depth + 1 + if depth < common.Config.MaxSubqueryDepth { + queries = append(queries, FindSubquery(depth, sub.Select)...) + } + } + return true, nil + }, expr.Select) + common.LogIfWarn(err, "") + + // 如果没有嵌套的子查询了,返回子查询的SQL + if noSub { + sql := sqlparser.String(expr) + // 去除SQL前后的括号 + queries = append(queries, sql[1:len(sql)-1]) + } + + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return queries +} + +// FindAllCondition 获取AST中所有的condition条件 +func FindAllCondition(node sqlparser.SQLNode) []interface{} { + common.Log.Debug("Enter: FindAllCondition(), Caller: %s", common.Caller()) + var conditions []interface{} + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ComparisonExpr, *sqlparser.RangeCond, *sqlparser.IsExpr: + conditions = append(conditions, node) + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return conditions +} + +// FindAllCols 获取AST中某个节点下所有的columns +func FindAllCols(node sqlparser.SQLNode, targets ...string) []*common.Column { + var result []*common.Column + // 获取节点内所有的列 + f := func(node sqlparser.SQLNode) { + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch col := node.(type) { + case *sqlparser.ColName: + result = common.MergeColumn(result, &common.Column{ + Name: col.Name.String(), + Table: col.Qualifier.Name.String(), + DB: col.Qualifier.Qualifier.String(), + Alias: make([]string, 0), + }) + } + return true, nil + }, node) + common.LogIfWarn(err, "") + } + + if len(targets) == 0 { + // 如果不指定具体节点类型,则获取全部的column + f(node) + } else { + // 根据target获取所有的节点 + for _, target := range targets { + target = strings.Replace(strings.ToLower(target), " ", "", -1) + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.Subquery: + // 忽略子查询 + case *sqlparser.JoinTableExpr: + if target == "join" { + f(node) + } + case *sqlparser.Where: + if target == "where" { + f(node) + } + case *sqlparser.GroupBy: + if target == "groupby" { + f(node) + } + case sqlparser.OrderBy: + if target == "orderby" { + f(node) + } + } + return true, nil + }, node) + common.LogIfWarn(err, "") + } + } + + return result +} + +// GetSubqueryDepth 获取一条SQL的嵌套深度 +func GetSubqueryDepth(node sqlparser.SQLNode) int { + depth := 1 + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery: + depth++ + } + return true, nil + }, node) + common.LogIfWarn(err, "") + return depth +} + +// getColumnName 获取node中Column具体的定义以及名称 +func getColumnName(node sqlparser.SQLNode) (*sqlparser.ColName, string) { + var colName *sqlparser.ColName + str := "" + switch c := node.(type) { + case *sqlparser.ColName: + if c.Qualifier.Name.IsEmpty() { + str = fmt.Sprintf("`%s`", c.Name.String()) + } else { + if c.Qualifier.Qualifier.IsEmpty() { + str = fmt.Sprintf("`%s`.`%s`", c.Qualifier.Name.String(), c.Name.String()) + } else { + str = fmt.Sprintf("`%s`.`%s`.`%s`", + c.Qualifier.Qualifier.String(), c.Qualifier.Name.String(), c.Name.String()) + } + } + colName = c + } + return colName, str +} diff --git a/ast/meta_test.go b/ast/meta_test.go new file mode 100644 index 00000000..2da7a706 --- /dev/null +++ b/ast/meta_test.go @@ -0,0 +1,324 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + "fmt" + "testing" + + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestGetTableFromExprs(t *testing.T) { + tbExprs := sqlparser.TableExprs{ + &sqlparser.AliasedTableExpr{ + Expr: sqlparser.TableName{ + Name: sqlparser.NewTableIdent("table"), + Qualifier: sqlparser.NewTableIdent("db"), + }, + As: sqlparser.NewTableIdent("as"), + }, + } + meta := GetTableFromExprs(tbExprs) + if tb, ok := meta["db"]; !ok { + t.Errorf("no table qualifier, meta: %s", pretty.Sprint(tb)) + } +} + +func TestGetParseTableWithStmt(t *testing.T) { + for _, sql := range common.TestSQLs { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + if err != nil { + t.Errorf("SQL Parsed error: %v", err) + } + meta := GetMeta(stmt, nil) + pretty.Println(meta) + fmt.Println() + } +} + +func TestFindCondition(t *testing.T) { + for _, sql := range common.TestSQLs { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + eq := FindEQColsInWhere(stmt) + inEq := FindINEQColsInWhere(stmt) + fmt.Println("WherEQ:") + pretty.Println(eq) + fmt.Println("WherINEQ:") + pretty.Println(inEq) + fmt.Println() + } +} + +func TestFindGroupBy(t *testing.T) { + sqlList := []string{ + "select a from t group by c", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + if err != nil { + panic(err) + } + res := FindGroupByCols(stmt) + pretty.Println(res) + fmt.Println() + } +} + +func TestFindOrderBy(t *testing.T) { + sqlList := []string{ + "select a from t group by c order by d, c desc", + "select a from t group by c order by d desc", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + if err != nil { + panic(err) + } + res := FindOrderByCols(stmt) + pretty.Println(res) + fmt.Println() + } +} + +func TestFindSubquery(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 WHERE column1 = (SELECT column1 FROM (SELECT column1 FROM t2) a);", + "select column1 from t2", + "SELECT * FROM t1 WHERE column1 = (SELECT column1 FROM t2);", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + if err != nil { + panic(err) + } + + subquery := FindSubquery(0, stmt) + fmt.Println(len(subquery)) + pretty.Println(subquery) + } + +} + +func TestFindJoinTable(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + joinMeta := FindJoinTable(stmt, nil) + pretty.Println(joinMeta) + } +} + +func TestFindJoinCols(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "select t from a LEFT JOIN b USING (c1, c2, c3)", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindJoinCols(stmt) + pretty.Println(columns) + } +} + +func TestFindJoinColBeWhereEQ(t *testing.T) { + sqlList := []string{ + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindEQColsInJoinCond(stmt) + pretty.Println(columns) + } +} + +func TestFindJoinColBeWhereINEQ(t *testing.T) { + sqlList := []string{ + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b > 'b' AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindINEQColsInJoinCond(stmt) + pretty.Println(columns) + } +} + +func TestFindAllCondition(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "select t from a LEFT JOIN b USING (c1, c2, c3)", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT * FROM t1 where a in ('a','b')", + "SELECT * FROM t1 where a BETWEEN 'bar' AND 'foo'", + "SELECT * FROM t1 where a = sum(a,b)", + "SELECT distinct a FROM t1 where a = '2001-01-01 01:01:01'", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindAllCondition(stmt) + pretty.Println(columns) + } +} + +func TestFindColumn(t *testing.T) { + sqlList := []string{ + "select col, col2, sum(col1) from tb group by col", + "select col from tb group by col,sum(col1)", + "select col, sum(col1) from tb group by col", + } + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindColumn(stmt) + pretty.Println(columns) + } +} + +func TestFindAllCols(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "select t from a LEFT JOIN b USING (c1, c2, c3)", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT * FROM t1 where a in ('a','b')", + "SELECT * FROM t1 where a BETWEEN 'bar' AND 'foo'", + "SELECT * FROM t1 where a = sum(a,b)", + "SELECT distinct a FROM t1 where a = '2001-01-01 01:01:01'", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + //pretty.Println(stmt) + if err != nil { + panic(err) + } + + columns := FindAllCols(stmt, "order by") + pretty.Println(columns) + } +} + +func TestGetSubqueryDepth(t *testing.T) { + sqlList := []string{ + "SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "select t from a LEFT JOIN b USING (c1, c2, c3)", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + "SELECT * FROM t1 LEFT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT * FROM t1 RIGHT JOIN (t2, t3, t4) ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c)", + "SELECT left_tbl.* FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT left_tbl.* FROM left_tbl RIGHT JOIN right_tbl ON left_tbl.id = right_tbl.id WHERE right_tbl.id IS NULL;", + "SELECT * FROM t1 where a in ('a','b')", + "SELECT * FROM t1 where a BETWEEN 'bar' AND 'foo'", + "SELECT * FROM t1 where a = sum(a,b)", + "SELECT distinct a FROM t1 where a = '2001-01-01 01:01:01'", + } + + for _, sql := range sqlList { + fmt.Println(sql) + stmt, err := sqlparser.Parse(sql) + if err != nil { + t.Error("syntax check error.") + } + + dep := GetSubqueryDepth(stmt) + fmt.Println(dep) + } +} diff --git a/ast/node_array.go b/ast/node_array.go new file mode 100644 index 00000000..f2164f4a --- /dev/null +++ b/ast/node_array.go @@ -0,0 +1,123 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + "errors" + + "github.com/XiaoMi/soar/common" + "vitess.io/vitess/go/vt/sqlparser" +) + +// 该文件用于构造一个存储AST生成节点的链表 +// 以能够更好的对AST中的每个节点进行查询、跳转、重建等 + +// NodeItem 链表节点 +type NodeItem struct { + ID int // NodeItem在List中的编号,与顺序有关 + Prev *NodeItem // 前一个节点 + Self sqlparser.SQLNode // 自身指向的AST Node + Next *NodeItem // 后一个节点 + Array *NodeList // 指针指向所在的链表,用于快速跳转node +} + +// NodeList 链表结构体 +type NodeList struct { + Length int + Head *NodeItem + NodeMap map[int]*NodeItem +} + +// NewNodeList 从抽象语法树中构造一个链表 +func NewNodeList(statement sqlparser.Statement) *NodeList { + // 将AST构造成链表 + l := &NodeList{NodeMap: make(map[int]*NodeItem)} + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + l.Add(node) + return true, nil + }, statement) + common.LogIfWarn(err, "") + return l +} + +// Add 将会把一个sqlparser.SQLNode添加到节点中 +func (l *NodeList) Add(node sqlparser.SQLNode) *NodeItem { + if l.Length == 0 { + l.Head = &NodeItem{ + ID: 0, + Self: node, + Next: nil, + Prev: nil, + Array: l, + } + l.NodeMap[l.Length] = l.Head + } else { + if n, ok := l.NodeMap[l.Length-1]; ok { + n.Next = &NodeItem{ + ID: l.Length - 1, + Prev: n, + Self: node, + Next: nil, + Array: l, + } + l.NodeMap[l.Length] = n.Next + } + } + l.Length++ + + return l.NodeMap[l.Length-1] +} + +// Remove 从链表中移除一个节点 +func (l *NodeList) Remove(node *NodeItem) error { + var err error + defer func() { + err := recover() + if err != nil { + common.Log.Error("func (l *NodeList) Remove recovered: %v", err) + } + }() + + if node.Array != l { + return errors.New("node not belong to this array") + } + + if node.Prev == nil { + // 如果是头结点 + node.Next.Prev = nil + } else if node.Next == nil { + // 如果是尾节点 + node.Prev.Next = nil + } else { + // 删除节点,连接断开的链表 + node.Prev.Next = node.Next + node.Next.Prev = node.Prev + delete(l.NodeMap, node.ID) + } + + return err +} + +// First 返回链表头结点 +func (l *NodeList) First() *NodeItem { + return l.Head +} + +// Last 返回链表末尾节点 +func (l *NodeList) Last() *NodeItem { + return l.NodeMap[l.Length-1] +} diff --git a/ast/pretty.go b/ast/pretty.go new file mode 100644 index 00000000..8933513a --- /dev/null +++ b/ast/pretty.go @@ -0,0 +1,347 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + "container/list" + "regexp" + "strings" + + "github.com/XiaoMi/soar/common" + + "github.com/percona/go-mysql/query" +) + +// Pretty 格式化输出SQL +func Pretty(sql string, method string) (output string) { + // 超出 Config.MaxPrettySQLLength 长度的SQL会对其指纹进行pretty + if len(sql) > common.Config.MaxPrettySQLLength { + fingerprint := query.Fingerprint(sql) + // 超出 Config.MaxFpPrettySqlLength 长度的指纹不会进行pretty + if len(fingerprint) > common.Config.MaxPrettySQLLength { + return sql + } + sql = fingerprint + } + + switch method { + case "builtin", "markdown": + return format(sql) + default: + return sql + } +} + +// format the whitespace in a SQL string to make it easier to read. +// @param string $query The SQL string +// @return String The SQL string with HTML styles and formatting wrapped in a
 tag
+func format(query string) string {
+	// This variable will be populated with formatted html
+	result := ""
+	// Use an actual tab while formatting and then switch out with self::$tab at the end
+	tab := "  "
+	indentLevel := 0
+	var newline bool
+	var inlineParentheses bool
+	var increaseSpecialIndent bool
+	var increaseBlockIndent bool
+	var addedNewline bool
+	var inlineCount int
+	var inlineIndented bool
+	var clauseLimit bool
+	indentTypes := list.New()
+
+	// Tokenize String
+	originalTokens := Tokenize(query)
+
+	// Remove existing whitespace//
+	var tokens []Token
+	for i, token := range originalTokens {
+		if token.Type != TokenTypeWhitespace {
+			token.i = i
+			tokens = append(tokens, token)
+		}
+	}
+
+	for i, token := range tokens {
+		highlighted := token.Val
+
+		// If we are increasing the special indent level now
+		if increaseSpecialIndent {
+			indentLevel++
+			increaseSpecialIndent = false
+			indentTypes.PushFront("special")
+		}
+
+		// If we are increasing the block indent level now
+		if increaseBlockIndent {
+			indentLevel++
+			increaseBlockIndent = false
+			indentTypes.PushFront("block")
+		}
+
+		// If we need a new line before the token
+		if newline {
+			result += "\n" + strings.Repeat(tab, indentLevel)
+			newline = false
+			addedNewline = true
+		} else {
+			addedNewline = false
+		}
+
+		// Display comments directly where they appear in the source
+		if token.Type == TokenTypeComment || token.Type == TokenTypeBlockComment {
+			if token.Type == TokenTypeBlockComment {
+				indent := strings.Repeat(tab, indentLevel)
+				result += "\n" + indent
+				highlighted = strings.Replace(highlighted, "\n", "\n"+indent, -1)
+			}
+
+			result += highlighted
+			newline = true
+			continue
+		}
+
+		if inlineParentheses {
+			// End of inline parentheses
+			if token.Val == ")" {
+				result = strings.TrimRight(result, " ")
+
+				if inlineIndented {
+					indentTypes.Remove(indentTypes.Front())
+					if indentLevel > 0 {
+						indentLevel--
+					}
+					result += strings.Repeat(tab, indentLevel)
+				}
+
+				inlineParentheses = false
+
+				result += highlighted + " "
+				continue
+			}
+
+			if token.Val == "," {
+				if inlineCount >= 30 {
+					inlineCount = 0
+					newline = true
+				}
+			}
+
+			inlineCount += len(token.Val)
+		}
+
+		// Opening parentheses increase the block indent level and start a new line
+		if token.Val == "(" {
+			// First check if this should be an inline parentheses block
+			// Examples are "NOW()", "COUNT(*)", "int(10)", key(`somecolumn`), DECIMAL(7,2)
+			// Allow up to 3 non-whitespace tokens inside inline parentheses
+			length := 0
+			for j := 1; j <= 250; j++ {
+				// Reached end of string
+				if i+j >= len(tokens) {
+					break
+				}
+
+				next := tokens[i+j]
+
+				// Reached closing parentheses, able to inline it
+				if next.Val == ")" {
+					inlineParentheses = true
+					inlineCount = 0
+					inlineIndented = false
+					break
+				}
+
+				// Reached an invalid token for inline parentheses
+				if next.Val == ";" || next.Val == "(" {
+					break
+				}
+
+				// Reached an invalid token type for inline parentheses
+				if next.Type == TokenTypeReservedToplevel ||
+					next.Type == TokenTypeReservedNewline ||
+					next.Type == TokenTypeComment ||
+					next.Type == TokenTypeBlockComment {
+					break
+				}
+
+				length += len(next.Val)
+			}
+
+			if inlineParentheses && length > 30 {
+				increaseBlockIndent = true
+				inlineIndented = true
+				newline = true
+			}
+
+			// Take out the preceding space unless there was whitespace there in the original query
+			if token.i != 0 && (token.i-1) > len(originalTokens)-1 &&
+				originalTokens[token.i-1].Type != TokenTypeWhitespace {
+
+				result = strings.TrimRight(result, " ")
+			}
+
+			if inlineParentheses {
+				increaseBlockIndent = true
+				// Add a newline after the parentheses
+				newline = true
+			}
+
+		} else if token.Val == ")" {
+			// Closing parentheses decrease the block indent level
+			// Remove whitespace before the closing parentheses
+			result = strings.TrimRight(result, " ")
+
+			if indentLevel > 0 {
+				indentLevel--
+			}
+
+			// Reset indent level
+			for j := indentTypes.Front(); indentTypes.Len() > 0; indentTypes.Remove(j) {
+				if j.Value.(string) == "special" {
+					if indentLevel > 0 {
+						indentLevel--
+					}
+				} else {
+					break
+				}
+			}
+
+			if indentLevel < 0 {
+				// This is an error
+				indentLevel = 0
+			}
+
+			// Add a newline before the closing parentheses (if not already added)
+			if !addedNewline {
+				result += "\n" + strings.Repeat(tab, indentLevel)
+			}
+
+		} else if token.Type == TokenTypeReservedToplevel {
+			// Top level reserved words start a new line and increase the special indent level
+			increaseSpecialIndent = true
+
+			// If the last indent type was 'special', decrease the special indent for this round
+			if indentTypes.Len() > 0 && indentTypes.Front().Value.(string) == "special" {
+				if indentLevel > 0 {
+					indentLevel--
+				}
+				indentTypes.Remove(indentTypes.Front())
+			}
+
+			// Add a newline after the top level reserved word
+			newline = true
+			// Add a newline before the top level reserved word (if not already added)
+			if !addedNewline {
+				result += "\n" + strings.Repeat(tab, indentLevel)
+			} else {
+				// If we already added a newline, redo the indentation since it may be different now
+				result = strings.TrimSuffix(result, tab) + strings.Repeat(tab, indentLevel)
+			}
+
+			// If the token may have extra whitespace
+			if strings.Index(token.Val, " ") != 0 ||
+				strings.Index(token.Val, "\n") != 0 ||
+				strings.Index(token.Val, "\t") != 0 {
+
+				re, _ := regexp.Compile(`\s+`)
+				highlighted = re.ReplaceAllString(highlighted, " ")
+
+			}
+
+			//if SQL 'LIMIT' clause, start variable to reset newline
+			if token.Val == "LIMIT" && inlineParentheses {
+				clauseLimit = true
+			}
+
+		} else if clauseLimit && token.Val != "," &&
+			token.Type != TokenTypeNumber &&
+			token.Type != TokenTypeWhitespace {
+			// Checks if we are out of the limit clause
+
+			clauseLimit = false
+
+		} else if token.Val == "," && !inlineParentheses {
+			// Commas start a new line (unless within inline parentheses or SQL 'LIMIT' clause)
+			if clauseLimit {
+				newline = false
+				clauseLimit = false
+			} else {
+				// All other cases of commas
+				newline = true
+			}
+
+		} else if token.Type == TokenTypeReservedNewline {
+			// Newline reserved words start a new line
+			// Add a newline before the reserved word (if not already added)
+			if !addedNewline {
+				result += "\n" + strings.Repeat(tab, indentLevel)
+			}
+
+			// If the token may have extra whitespace
+			if strings.Index(token.Val, " ") != 0 ||
+				strings.Index(token.Val, "\n") != 0 ||
+				strings.Index(token.Val, "\t") != 0 {
+
+				re, _ := regexp.Compile(`\s+`)
+				highlighted = re.ReplaceAllString(highlighted, " ")
+			}
+
+		} else if token.Type == TokenTypeBoundary {
+			// Multiple boundary characters in a row should not have spaces between them (not including parentheses)
+			if i != 0 && i < len(tokens) &&
+				tokens[i-1].Type == TokenTypeBoundary {
+
+				if token.i != 0 && token.i < len(originalTokens) &&
+					originalTokens[token.i-1].Type != TokenTypeWhitespace {
+
+					result = strings.TrimRight(result, " ")
+				}
+			}
+		}
+
+		// If the token shouldn't have a space before it
+		if token.Val == "." || token.Val == "," || token.Val == ";" {
+			result = strings.TrimRight(result, " ")
+		}
+
+		result += highlighted + " "
+
+		// If the token shouldn't have a space after it
+		if token.Val == "(" || token.Val == "." {
+			result = strings.TrimRight(result, " ")
+		}
+
+		// If this is the "-" of a negative number, it shouldn't have a space after it
+		if token.Val == "-" && i+1 < len(tokens) && tokens[i+1].Type == TokenTypeNumber && i != 0 {
+			prev := tokens[i-1].Type
+			if prev != TokenTypeQuote &&
+				prev != TokenTypeBacktickQuote &&
+				prev != TokenTypeWord &&
+				prev != TokenTypeNumber {
+
+				result = strings.TrimRight(result, " ")
+			}
+		}
+	}
+
+	// Replace tab characters with the configuration tab character
+	result = strings.TrimRight(strings.Replace(result, "\t", tab, -1), " ")
+
+	return result
+}
diff --git a/ast/pretty_test.go b/ast/pretty_test.go
new file mode 100644
index 00000000..877c83ba
--- /dev/null
+++ b/ast/pretty_test.go
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"flag"
+	"fmt"
+	"testing"
+
+	"github.com/XiaoMi/soar/common"
+
+	"vitess.io/vitess/go/vt/sqlparser"
+)
+
+var update = flag.Bool("update", false, "update .golden files")
+
+var TestSqlsPretty = []string{
+	"select sourcetable, if(f.lastcontent = ?, f.lastupdate, f.lastcontent) as lastactivity, f.totalcount as activity, type.class as type, (f.nodeoptions & ?) as nounsubscribe from node as f inner join contenttype as type on type.contenttypeid = f.contenttypeid inner join subscribed as sd on sd.did = f.nodeid and sd.userid = ? union all select f.name as title, f.userid as keyval, ? as sourcetable, ifnull(f.lastpost, f.joindate) as lastactivity, f.posts as activity, ? as type, ? as nounsubscribe from user as f inner join userlist as ul on ul.relationid = f.userid and ul.userid = ? where ul.type = ? and ul.aq = ? order by title limit ?",
+	"administrator command: Init DB",
+	"CALL foo(1, 2, 3)",
+	"### Channels ###\n\u0009\u0009\u0009\u0009\u0009SELECT sourcetable, IF(f.lastcontent = 0, f.lastupdate, f.lastcontent) AS lastactivity,\n\u0009\u0009\u0009\u0009\u0009f.totalcount AS activity, type.class AS type,\n\u0009\u0009\u0009\u0009\u0009(f.nodeoptions \u0026 512) AS noUnsubscribe\n\u0009\u0009\u0009\u0009\u0009FROM node AS f\n\u0009\u0009\u0009\u0009\u0009INNER JOIN contenttype AS type ON type.contenttypeid = f.contenttypeid \n\n\u0009\u0009\u0009\u0009\u0009INNER JOIN subscribed AS sd ON sd.did = f.nodeid AND sd.userid = 15965\n UNION  ALL \n\n\u0009\u0009\u0009\u0009\u0009### Users ###\n\u0009\u0009\u0009\u0009\u0009SELECT f.name AS title, f.userid AS keyval, 'user' AS sourcetable, IFNULL(f.lastpost, f.joindate) AS lastactivity,\n\u0009\u0009\u0009\u0009\u0009f.posts as activity, 'Member' AS type,\n\u0009\u0009\u0009\u0009\u00090 AS noUnsubscribe\n\u0009\u0009\u0009\u0009\u0009FROM user AS f\n\u0009\u0009\u0009\u0009\u0009INNER JOIN userlist AS ul ON ul.relationid = f.userid AND ul.userid = 15965\n\u0009\u0009\u0009\u0009\u0009WHERE ul.type = 'f' AND ul.aq = 'yes'\n ORDER BY title ASC LIMIT 100",
+	"CREATE DATABASE org235_percona345 COLLATE 'utf8_general_ci'",
+	"insert into abtemp.coxed select foo.bar from foo",
+	"insert into foo(a, b, c) value(2, 4, 5)",
+	"insert into foo(a, b, c) values(2, 4, 5)",
+	"insert into foo(a, b, c) values(2, 4, 5) , (2,4,5)",
+	"insert into foo values (1, '(2)', 'This is a trick: ). More values.', 4)",
+	"insert into tb values (1)",
+	"INSERT INTO t (ts) VALUES ('()', '\\(', '\\)')",
+	"INSERT INTO t (ts) VALUES (NOW())",
+	"INSERT INTO t () VALUES ()",
+	"insert into t values (1), (2), (3)\n\n\ton duplicate key update query_count=1",
+	"insert into t values (1) on duplicate key update query_count=COALESCE(query_count, 0) + VALUES(query_count)",
+	"LOAD DATA INFILE '/tmp/foo.txt' INTO db.tbl",
+	"select 0e0, +6e-30, -6.00 from foo where a = 5.5 or b=0.5 or c=.5",
+	"select 0x0, x'123', 0b1010, b'10101' from foo",
+	"select 123_foo from 123_foo",
+	"select 123foo from 123foo",
+	`SELECT 	1 AS one FROM calls USE INDEX(index_name)`,
+	"SELECT /*!40001 SQL_NO_CACHE */ * FROM `film`",
+	"SELECT 'a' 'b' 'c' 'd' FROM kamil",
+	"SELECT BENCHMARK(100000000, pow(rand(), rand())), 1 FROM `-hj-7d6-shdj5-7jd-kf-g988h-`.`-aaahj-7d6-shdj5-7&^%$jd-kf-g988h-9+4-5*6ab-`",
+	"SELECT c FROM org235.t WHERE id=0xdeadbeaf",
+	"select c from t where i=1 order by c asc",
+	"SELECT c FROM t WHERE id=0xdeadbeaf",
+	"SELECT c FROM t WHERE id=1",
+	"select `col` from `table-1` where `id` = 5",
+	"SELECT `db`.*, (CASE WHEN (`date_start` <=  '2014-09-10 09:17:59' AND `date_end` >=  '2014-09-10 09:17:59') THEN 'open' WHEN (`date_start` >  '2014-09-10 09:17:59' AND `date_end` >  '2014-09-10 09:17:59') THEN 'tbd' ELSE 'none' END) AS `status` FROM `foo` AS `db` WHERE (a_b in ('1', '10101'))",
+	"select field from `-master-db-1`.`-table-1-` order by id, ?;",
+	"select   foo",
+	"select foo_1 from foo_2_3",
+	"select foo -- bar\n",
+	"select foo-- bar\n,foo",
+	"select '\\\\' from foo",
+	"select * from foo limit 5",
+	"select * from foo limit 5, 10",
+	"select * from foo limit 5 offset 10",
+	"SELECT * from foo where a = 5",
+	"select * from foo where a in (5) and b in (5, 8,9 ,9 , 10)",
+	"SELECT '' '' '' FROM kamil",
+	" select  * from\nfoo where a = 5",
+	"SELECT * FROM prices.rt_5min where id=1",
+	"SELECT * FROM table WHERE field = 'value' /*arbitrary/31*/ ",
+	"SELECT * FROM table WHERE field = 'value' /*arbitrary31*/ ",
+	"SELECT *    FROM t WHERE 1=1 AND id=1",
+	"select * from t where (base.nid IN  ('1412', '1410', '1411'))",
+	`select * from t where i=1      order            by
+             a,  b          ASC, d    DESC,
+
+                                    e asc`,
+	"select * from t where i=1 order by a, b ASC, d DESC, e asc",
+	"select 'hello'\n",
+	"select 'hello', '\nhello\n', \"hello\", '\\'' from foo",
+	"SELECT ID, name, parent, type FROM posts WHERE _name IN ('perf','caching') AND (type = 'page' OR type = 'attachment')",
+	"SELECT name, value FROM variable",
+	"select \n-- bar\n foo",
+	"select null, 5.001, 5001. from foo",
+	"select sleep(2) from test.n",
+	"SELECT t FROM field WHERE  (entity_type = 'node') AND (entity_id IN  ('609')) AND (language IN  ('und')) AND (deleted = '0') ORDER BY delta ASC",
+	"select  t.table_schema,t.table_name,engine  from information_schema.tables t  inner join information_schema.columns c  on t.table_schema=c.table_schema and t.table_name=c.table_name group by t.table_schema,t.table_name having  sum(if(column_key in ('PRI','UNI'),1,0))=0",
+	"/* -- S++ SU ABORTABLE -- spd_user: rspadim */SELECT SQL_SMALL_RESULT SQL_CACHE DISTINCT centro_atividade FROM est_dia WHERE unidade_id=1001 AND item_id=67 AND item_id_red=573",
+	`UPDATE groups_search SET  charter = '   -------3\'\' XXXXXXXXX.\n    \n    -----------------------------------------------------', show_in_list = 'Y' WHERE group_id='aaaaaaaa'`,
+	"use `foo`",
+	"select sourcetable, if(f.lastcontent = ?, f.lastupdate, f.lastcontent) as lastactivity, f.totalcount as activity, type.class as type, (f.nodeoptions & ?) as nounsubscribe from node as f inner join contenttype as type on type.contenttypeid = f.contenttypeid inner join subscribed as sd on sd.did = f.nodeid and sd.userid = ? union all select f.name as title, f.userid as keyval, ? as sourcetable, ifnull(f.lastpost, f.joindate) as lastactivity, f.posts as activity, ? as type, ? as nounsubscribe from user as f inner join userlist as ul on ul.relationid = f.userid and ul.userid = ? where ul.type = ? and ul.aq = ? order by title limit ?",
+	"CREATE INDEX part_of_name ON customer (name(10));",
+	"alter table `sakila`.`t1` add index `idx_col`(`col`)",
+	"alter table `sakila`.`t1` add UNIQUE index `idx_col`(`col`)",
+	"alter table `sakila`.`t1` add index `idx_ID`(`ID`)",
+
+	// ADD|DROP COLUMN
+	"ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d;",
+	"ALTER TABLE T2 ADD COLUMN C int;",
+	"ALTER TABLE T2 ADD COLUMN D int FIRST;",
+	"ALTER TABLE T2 ADD COLUMN E int AFTER D;",
+
+	// RENMAE COLUMN
+	"ALTER TABLE t1 RENAME COLUMN a TO b",
+
+	// RENAME INDEX
+	"ALTER TABLE t1 RENAME INDEX idx_a TO idx_b",
+	"ALTER TABLE t1 RENAME KEY idx_a TO idx_b",
+
+	// RENAME TABLE
+	"ALTER TABLE db.old_table RENAME new_table;",
+	"ALTER TABLE old_table RENAME TO new_table;",
+	"ALTER TABLE old_table RENAME AS new_table;",
+
+	// MODIFY & CHANGE
+	"ALTER TABLE t1 MODIFY col1 BIGINT UNSIGNED DEFAULT 1 COMMENT 'my column';",
+	"ALTER TABLE t1 CHANGE b a INT NOT NULL;",
+}
+
+func TestPretty(t *testing.T) {
+	err := common.GoldenDiff(func() {
+		for _, sql := range append(TestSqlsPretty, common.TestSQLs...) {
+			fmt.Println(sql)
+			fmt.Println(Pretty(sql, "builtin"))
+		}
+	}, t.Name(), update)
+	if nil != err {
+		t.Fatal(err)
+	}
+}
+
+func TestIsKeyword(t *testing.T) {
+	tks := map[string]bool{
+		"AGAINST":        true,
+		"AUTO_INCREMENT": true,
+		"ADD":            true,
+		"BETWEEN":        true,
+		".":              false,
+		"actions":        false,
+		`"`:              false,
+		":":              false,
+	}
+	for tk, v := range tks {
+		if IsMysqlKeyword(tk) != v {
+			t.Error("isKeyword:", tk)
+		}
+	}
+}
+
+func TestRemoveComments(t *testing.T) {
+	for _, sql := range TestSqlsPretty {
+		stmt, _ := sqlparser.Parse(sql)
+		newSQL := sqlparser.String(stmt)
+		if newSQL != sql {
+			fmt.Print(newSQL)
+		}
+	}
+}
+
+func TestMysqlEscapeString(t *testing.T) {
+	var strs = []map[string]string{
+		{
+			"input":  "abc",
+			"output": "abc",
+		},
+		{
+			"input":  "'abc",
+			"output": "\\'abc",
+		},
+		{
+			"input": `
+abc`,
+			"output": `\
+abc`,
+		},
+		{
+			"input":  "\"abc",
+			"output": "\\\"abc",
+		},
+	}
+	for _, str := range strs {
+		output, err := MysqlEscapeString(str["input"])
+		if err != nil {
+			t.Error("TestMysqlEscapeString", err)
+		} else {
+			if output != str["output"] {
+				t.Error("TestMysqlEscapeString", output, str["output"])
+			}
+		}
+	}
+}
diff --git a/ast/rewrite.go b/ast/rewrite.go
new file mode 100644
index 00000000..a21bbcdd
--- /dev/null
+++ b/ast/rewrite.go
@@ -0,0 +1,1728 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strings"
+
+	"github.com/XiaoMi/soar/common"
+
+	"github.com/kr/pretty"
+	"vitess.io/vitess/go/vt/sqlparser"
+)
+
+// Rule SQL重写规则
+type Rule struct {
+	Name        string                  `json:"Name"`
+	Description string                  `json:"Description"`
+	Original    string                  `json:"Original"` // 错误示范。为空或"暂不支持"不会出现在list-rewrite-rules中
+	Suggest     string                  `json:"Suggest"`  // 正确示范。
+	Func        func(*Rewrite) *Rewrite `json:"-"`        // 如果不定义Func需要多条SQL联动改写
+}
+
+// RewriteRules SQL重写规则,注意这个规则是有序的,先后顺序不能乱
+var RewriteRules = []Rule{
+	{
+		Name:        "dml2select",
+		Description: "将数据库更新请求转换为只读查询请求,便于执行EXPLAIN",
+		Original:    "DELETE FROM film WHERE length > 100",
+		Suggest:     "select * from film where length > 100",
+		Func:        (*Rewrite).RewriteDML2Select,
+	},
+	{
+		Name:        "star2columns",
+		Description: "为SELECT *补全表的列信息",
+		Original:    "SELECT * FROM film",
+		Suggest:     "select film.film_id, film.title from film",
+		Func:        (*Rewrite).RewriteStar2Columns,
+	},
+	{
+		Name:        "insertcolumns",
+		Description: "为INSERT补全表的列信息",
+		Original:    "insert into film values(1,2,3,4,5)",
+		Suggest:     "insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5)",
+		Func:        (*Rewrite).RewriteInsertColumns,
+	},
+	{
+		Name:        "having",
+		Description: "将查询的HAVING子句改写为WHERE中的查询条件",
+		Original:    "SELECT state, COUNT(*) FROM Drivers GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state",
+		Suggest:     "select state, COUNT(*) from Drivers where state in ('GA', 'TX') group by state order by state asc",
+		Func:        (*Rewrite).RewriteHaving,
+	},
+	{
+		Name:        "orderbynull",
+		Description: "如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加ORDER BY NULL",
+		Original:    "SELECT sum(col1) FROM tbl GROUP BY col",
+		Suggest:     "select sum(col1) from tbl group by col order by null",
+		Func:        (*Rewrite).RewriteAddOrderByNull,
+	},
+	{
+		Name:        "unionall",
+		Description: "可以接受重复的时间,使用UNION ALL替代UNION以提高查询效率",
+		Original:    "select country_id from city union select country_id from country",
+		Suggest:     "select country_id from city union all select country_id from country",
+		Func:        (*Rewrite).RewriteUnionAll,
+	},
+	{
+		Name:        "or2in",
+		Description: "将同一列不同条件的OR查询转写为IN查询",
+		Original:    "select country_id from city where col1 = 1 or (col2 = 1 or col2 = 2 ) or col1 = 3;",
+		Suggest:     "select country_id from city where (col2 in (1, 2)) or col1 in (1, 3);",
+		Func:        (*Rewrite).RewriteOr2In,
+	},
+	{
+		Name:        "innull",
+		Description: "如果IN条件中可能有NULL值而又想匹配NULL值时,建议添加OR col IS NULL",
+		Original:    "暂不支持",
+		Suggest:     "暂不支持",
+		Func:        (*Rewrite).RewriteInNull,
+	},
+	// 把所有跟or相关的重写完之后才进行or转union的重写
+	{
+		Name:        "or2union",
+		Description: "将不同列的OR查询转为UNION查询,建议结合unionall重写策略一起使用",
+		Original:    "暂不支持",
+		Suggest:     "暂不支持",
+		Func:        (*Rewrite).RewriteOr2Union,
+	},
+	{
+		Name:        "dmlorderby",
+		Description: "删除DML更新操作中无意义的ORDER BY",
+		Original:    "DELETE FROM tbl WHERE col1=1 ORDER BY col",
+		Suggest:     "delete from tbl where col1 = 1",
+		Func:        (*Rewrite).RewriteRemoveDMLOrderBy,
+	},
+	/*
+		{
+			Name:        "groupbyconst",
+			Description: "删除无意义的GROUP BY常量",
+			Original:    "SELECT sum(col1) FROM tbl GROUP BY 1;",
+			Suggest:     "select sum(col1) from tbl",
+			Func:        (*Rewrite).RewriteGroupByConst,
+		},
+	*/
+	{
+		Name:        "sub2join",
+		Description: "将子查询转换为JOIN查询",
+		Original:    "暂不支持",
+		Suggest:     "暂不支持",
+		Func:        (*Rewrite).RewriteSubQuery2Join,
+	},
+	{
+		Name:        "join2sub",
+		Description: "将JOIN查询转换为子查询",
+		Original:    "暂不支持",
+		Suggest:     "暂不支持",
+		Func:        (*Rewrite).RewriteJoin2SubQuery,
+	},
+	{
+		Name:        "distinctstar",
+		Description: "DISTINCT *对有主键的表没有意义,可以将DISTINCT删掉",
+		Original:    "SELECT DISTINCT * FROM film;",
+		Suggest:     "SELECT * FROM film",
+		Func:        (*Rewrite).RewriteDistinctStar,
+	},
+	{
+		Name:        "standard",
+		Description: "SQL标准化,如:关键字转换为小写",
+		Original:    "SELECT sum(col1) FROM tbl GROUP BY 1;",
+		Suggest:     "select sum(col1) from tbl group by 1",
+		Func:        (*Rewrite).RewriteStandard,
+	},
+	{
+		Name:        "mergealter",
+		Description: "合并同一张表的多条ALTER语句",
+		Original:    "ALTER TABLE t2 DROP COLUMN c;ALTER TABLE t2 DROP COLUMN d;",
+		Suggest:     "ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d;",
+	},
+	{
+		Name:        "alwaystrue",
+		Description: "删除无用的恒真判断条件",
+		Original:    "SELECT count(col) FROM tbl where 'a'= 'a' or ('b' = 'b' and a = 'b');",
+		Suggest:     "select count(col) from tbl where (a = 'b');",
+		Func:        (*Rewrite).RewriteAlwaysTrue,
+	},
+	{
+		Name:        "countstar",
+		Description: "不建议使用COUNT(col)或COUNT(常量),建议改写为COUNT(*)",
+		Original:    "SELECT count(col) FROM tbl GROUP BY 1;",
+		Suggest:     "SELECT count(*) FROM tbl GROUP BY 1;",
+		Func:        (*Rewrite).RewriteCountStar,
+	},
+	{
+		Name:        "innodb",
+		Description: "建表时建议使用InnoDB引擎,非InnoDB引擎表自动转InnoDB",
+		Original:    "CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT);",
+		Suggest:     "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB;",
+		Func:        (*Rewrite).RewriteInnoDB,
+	},
+	{
+		Name:        "autoincrement",
+		Description: "将autoincrement初始化为1",
+		Original:    "CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802;",
+		Suggest:     "create table t1(id bigint(20) not null auto_increment) ENGINE=InnoDB auto_increment=1;",
+		Func:        (*Rewrite).RewriteAutoIncrement,
+	},
+	{
+		Name:        "intwidth",
+		Description: "整型数据类型修改默认显示宽度",
+		Original:    "create table t1 (id int(20) not null auto_increment) ENGINE=InnoDB;",
+		Suggest:     "create table t1 (id int(10) not null auto_increment) ENGINE=InnoDB;",
+		Func:        (*Rewrite).RewriteIntWidth,
+	},
+	{
+		Name:        "truncate",
+		Description: "不带WHERE条件的DELETE操作建议修改为TRUNCATE",
+		Original:    "DELETE FROM tbl",
+		Suggest:     "truncate table tbl",
+		Func:        (*Rewrite).RewriteTruncate,
+	},
+	{
+		Name:        "rmparenthesis",
+		Description: "去除没有意义的括号",
+		Original:    "select col from table where (col = 1);",
+		Suggest:     "select col from table where col = 1;",
+		Func:        (*Rewrite).RewriteRmParenthesis,
+	},
+	// delimiter要放在最后,不然补不上
+	{
+		Name:        "delimiter",
+		Description: "补全DELIMITER",
+		Original:    "use sakila",
+		Suggest:     "use sakila;",
+		Func:        (*Rewrite).RewriteDelimiter,
+	},
+	// TODO in to exists
+	// TODO exists to in
+}
+
+// ListRewriteRules 打印SQL重写规则
+func ListRewriteRules(rules []Rule) {
+	switch common.Config.ReportType {
+	case "json":
+		js, err := json.MarshalIndent(rules, "", "  ")
+		if err == nil {
+			fmt.Println(string(js))
+		}
+	default:
+
+		fmt.Print("# 重写规则\n\n[toc]\n\n")
+		for _, r := range rules {
+			if !common.Config.Verbose && (r.Original == "" || r.Original == "暂不支持") {
+				continue
+			}
+
+			fmt.Print("## ", common.MarkdownEscape(r.Name),
+				"\n* **Description**:", r.Description+"\n",
+				"\n* **Original**:\n\n```sql\n", r.Original, "\n```\n",
+				"\n* **Suggest**:\n\n```sql\n", r.Suggest, "\n```\n")
+
+		}
+	}
+}
+
+// Rewrite 用于重写SQL
+type Rewrite struct {
+	SQL     string
+	NewSQL  string
+	Stmt    sqlparser.Statement
+	Columns common.TableColumns
+}
+
+// NewRewrite 返回一个*Rewrite对象,如果SQL无法被正常解析,将错误输出到日志中,返回一个nil
+func NewRewrite(sql string) *Rewrite {
+	stmt, err := sqlparser.Parse(sql)
+	if err != nil {
+		common.Log.Error(err.Error(), sql)
+		return nil
+	}
+
+	return &Rewrite{
+		SQL:  sql,
+		Stmt: stmt,
+	}
+}
+
+// Rewrite 入口函数
+func (rw *Rewrite) Rewrite() *Rewrite {
+	defer func() {
+		if err := recover(); err != nil {
+			common.Log.Error("Query rewrite Error: %s, maybe hit a bug.\nQuery: %s \nAST: %s",
+				err, rw.SQL, pretty.Sprint(rw.Stmt))
+			return
+		}
+	}()
+
+	for _, rule := range RewriteRules {
+		if RewriteRuleMatch(rule.Name) && rule.Func != nil {
+			rule.Func(rw)
+			common.Log.Debug("Rewrite Rule:%s Output NewSQL: %s", rule.Name, rw.NewSQL)
+		}
+	}
+	if rw.NewSQL == "" {
+		rw.NewSQL = rw.SQL
+	}
+	rw.Stmt, _ = sqlparser.Parse(rw.NewSQL)
+
+	// TODO: 重新前后返回结果一致性对比
+
+	// TODO: 前后SQL性能对比
+	return rw
+}
+
+// RewriteDelimiter delimiter: 补分号,可以指定不同的DELIMITER
+func (rw *Rewrite) RewriteDelimiter() *Rewrite {
+	if rw.NewSQL != "" {
+		rw.NewSQL = strings.TrimSuffix(rw.NewSQL, common.Config.Delimiter) + common.Config.Delimiter
+	} else {
+		rw.NewSQL = strings.TrimSuffix(rw.SQL, common.Config.Delimiter) + common.Config.Delimiter
+	}
+	return rw
+}
+
+// RewriteStandard standard: 使用vitess提供的String功能将抽象语法树转写回SQL,注意:这可能转写失败。
+func (rw *Rewrite) RewriteStandard() *Rewrite {
+	if _, err := sqlparser.Parse(rw.SQL); err == nil {
+		rw.NewSQL = sqlparser.String(rw.Stmt)
+	}
+	return rw
+}
+
+// RewriteAlwaysTrue alwaystrue: 删除恒真条件
+func (rw *Rewrite) RewriteAlwaysTrue() (reWriter *Rewrite) {
+	array := NewNodeList(rw.Stmt)
+	tNode := array.Head
+	for {
+		omitAwaysTrue(tNode)
+		tNode = tNode.Next
+		if tNode == nil {
+			break
+		}
+	}
+
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// isAlwaysTrue 用于判断ComparisonExpr是否是恒真
+func isAlwaysTrue(expr *sqlparser.ComparisonExpr) bool {
+	if expr == nil {
+		return true
+	}
+
+	var result bool
+	switch expr.Operator {
+	case "<>":
+		expr.Operator = "!="
+	case "<=>":
+		expr.Operator = "="
+	case ">=", "<=", "!=", "=":
+	default:
+		return false
+	}
+
+	var left []byte
+	var right []byte
+
+	// left
+	switch l := expr.Left.(type) {
+	case *sqlparser.SQLVal:
+		left = l.Val
+	default:
+		return false
+	}
+
+	// right
+	switch r := expr.Right.(type) {
+	case *sqlparser.SQLVal:
+		right = r.Val
+	default:
+		return false
+	}
+
+	switch expr.Operator {
+	case "=":
+		result = bytes.Equal(left, right)
+	case "!=":
+		result = !bytes.Equal(left, right)
+	case ">":
+		result = bytes.Compare(left, right) > 0
+	case ">=":
+		result = bytes.Compare(left, right) >= 0
+	case "<":
+		result = bytes.Compare(left, right) < 0
+	case "<=":
+		result = bytes.Compare(left, right) <= 0
+	default:
+		result = false
+	}
+
+	return result
+}
+
+// omitAwaysTrue 移除AST中的恒真条件
+func omitAwaysTrue(node *NodeItem) {
+	if node == nil {
+		return
+	}
+
+	switch self := node.Self.(type) {
+	case *sqlparser.Where:
+		if self != nil {
+			switch cond := self.Expr.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(cond) {
+					self.Expr = nil
+				}
+			case *sqlparser.ParenExpr:
+				if cond.Expr == nil {
+					self.Expr = nil
+				}
+			}
+		}
+	case *sqlparser.ParenExpr:
+		if self != nil {
+			switch cond := self.Expr.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(cond) {
+					self.Expr = nil
+				}
+			}
+		}
+	case *sqlparser.AndExpr:
+		if self != nil {
+			var tmp sqlparser.Expr
+			isRightTrue := false
+			isLeftTrue := false
+			tmp = nil
+
+			// 查看左树的情况
+			switch l := self.Left.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(l) {
+					self.Left = nil
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			case *sqlparser.ParenExpr:
+				if l.Expr == nil {
+					self.Left = nil
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			default:
+				if l == nil {
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			}
+
+			// 查看右树的情况
+			switch r := self.Right.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(r) {
+					self.Right = nil
+					isRightTrue = true
+					tmp = self.Left
+				}
+			case *sqlparser.ParenExpr:
+				if r.Expr == nil {
+					self.Right = nil
+					isRightTrue = true
+					tmp = self.Left
+				}
+			default:
+				if r == nil {
+					isRightTrue = true
+					tmp = self.Left
+				}
+			}
+
+			if isRightTrue && isLeftTrue {
+				tmp = nil
+			} else if !isLeftTrue && !isRightTrue {
+				return
+			}
+
+			// 根据类型开始替换节点
+			switch l := node.Prev.Self.(type) {
+			case *sqlparser.Where:
+				l.Expr = tmp
+			case *sqlparser.ParenExpr:
+				l.Expr = tmp
+			case *sqlparser.AndExpr:
+				if l.Left == self {
+					l.Left = tmp
+				} else if l.Right == self {
+					l.Right = tmp
+				}
+			case *sqlparser.OrExpr:
+				if l.Left == self {
+					l.Left = tmp
+				} else if l.Right == self {
+					l.Right = tmp
+				}
+			default:
+				// 未匹配到对应数据类型则从链表中移除该节点
+				err := node.Array.Remove(node.Prev)
+				common.LogIfError(err, "")
+			}
+
+		}
+
+	case *sqlparser.OrExpr:
+		// 与AndExpr相同
+		if self != nil {
+			var tmp sqlparser.Expr
+			isRightTrue := false
+			isLeftTrue := false
+			tmp = nil
+
+			switch l := self.Left.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(l) {
+					self.Left = nil
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			case *sqlparser.ParenExpr:
+				if l.Expr == nil {
+					self.Left = nil
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			default:
+				if l == nil {
+					isLeftTrue = true
+					tmp = self.Right
+				}
+			}
+
+			switch r := self.Right.(type) {
+			case *sqlparser.ComparisonExpr:
+				if isAlwaysTrue(r) {
+					self.Right = nil
+					isRightTrue = true
+					tmp = self.Left
+				}
+			case *sqlparser.ParenExpr:
+				if r.Expr == nil {
+					self.Right = nil
+					isRightTrue = true
+					tmp = self.Left
+				}
+			default:
+				if r == nil {
+					isRightTrue = true
+					tmp = self.Left
+				}
+			}
+
+			if isRightTrue && isLeftTrue {
+				tmp = nil
+			} else if !isLeftTrue && !isRightTrue {
+				return
+			}
+
+			switch l := node.Prev.Self.(type) {
+			case *sqlparser.Where:
+				l.Expr = tmp
+			case *sqlparser.ParenExpr:
+				l.Expr = tmp
+			case *sqlparser.AndExpr:
+				if l.Left == self {
+					l.Left = tmp
+				} else if l.Right == self {
+					l.Right = tmp
+				}
+			case *sqlparser.OrExpr:
+				if l.Left == self {
+					l.Left = tmp
+				} else if l.Right == self {
+					l.Right = tmp
+				}
+			default:
+				err := node.Array.Remove(node.Prev)
+				common.LogIfError(err, "")
+			}
+		}
+	}
+
+	omitAwaysTrue(node.Prev)
+}
+
+// RewriteCountStar countstar: 将COUNT(col)改写为COUNT(*)
+// COUNT(DISTINCT col)不能替换为COUNT(*)
+func (rw *Rewrite) RewriteCountStar() *Rewrite {
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch f := node.(type) {
+		case *sqlparser.FuncExpr:
+			if strings.ToLower(f.Name.String()) == "count" && len(f.Exprs) > 0 {
+				switch colExpr := f.Exprs[0].(type) {
+				case *sqlparser.AliasedExpr:
+					switch col := colExpr.Expr.(type) {
+					case *sqlparser.ColName:
+						f.Exprs[0] = &sqlparser.StarExpr{TableName: col.Qualifier}
+					}
+				}
+			}
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteInnoDB innodb: 为未指定Engine的表默认添加InnoDB引擎,将其他存储引擎转为InnoDB
+func (rw *Rewrite) RewriteInnoDB() *Rewrite {
+	switch create := rw.Stmt.(type) {
+	case *sqlparser.DDL:
+		if create.Action != "create" {
+			return rw
+		}
+
+		if strings.Contains(strings.ToLower(create.TableSpec.Options), "engine=") {
+			reg := regexp.MustCompile(`(?i)engine=[a-z]+`)
+			create.TableSpec.Options = reg.ReplaceAllString(create.TableSpec.Options, "ENGINE=InnoDB ")
+		} else {
+			create.TableSpec.Options = " ENGINE=InnoDB " + create.TableSpec.Options
+		}
+
+	}
+
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteAutoIncrement autoincrement: 将auto_increment设置为1
+func (rw *Rewrite) RewriteAutoIncrement() *Rewrite {
+	switch create := rw.Stmt.(type) {
+	case *sqlparser.DDL:
+		if create.Action != "create" || create.TableSpec == nil {
+			return rw
+		}
+		if strings.Contains(strings.ToLower(create.TableSpec.Options), "auto_increment=") {
+			reg := regexp.MustCompile(`(?i)auto_increment=[0-9]+`)
+			create.TableSpec.Options = reg.ReplaceAllString(create.TableSpec.Options, "auto_increment=1 ")
+		}
+	}
+
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteIntWidth intwidth: int类型转为int(10),bigint类型转为bigint(20)
+func (rw *Rewrite) RewriteIntWidth() *Rewrite {
+	switch create := rw.Stmt.(type) {
+	case *sqlparser.DDL:
+		if create.Action != "create" || create.TableSpec == nil {
+			return rw
+		}
+		for _, col := range create.TableSpec.Columns {
+			switch col.Type.Type {
+			case "int", "integer":
+				if col.Type.Length != nil &&
+					(string(col.Type.Length.Val) != "10" && string(col.Type.Length.Val) != "11") {
+					col.Type.Length = sqlparser.NewIntVal([]byte("10"))
+				}
+			case "bigint":
+				if col.Type.Length != nil && string(col.Type.Length.Val) != "20" || col.Type.Length == nil {
+					col.Type.Length = sqlparser.NewIntVal([]byte("20"))
+				}
+			default:
+			}
+		}
+	}
+
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteStar2Columns star2columns: 对应COL.001,SELECT补全*指代的列名
+func (rw *Rewrite) RewriteStar2Columns() *Rewrite {
+	// 如果未配置mysql环境或从环境中获取失败,*不进行替换
+	if common.Config.TestDSN.Disable || len(rw.Columns) == 0 {
+		common.Log.Debug("(rw *Rewrite) RewriteStar2Columns(): Rewrite failed. TestDSN.Disable: %v, len(rw.Columns):%d",
+			common.Config.TestDSN.Disable, len(rw.Columns))
+		return rw
+	}
+
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch n := node.(type) {
+		case *sqlparser.Select:
+
+			// select * 可能出现的情况:
+			// 1. select * from tb;
+			// 2. select * from tb1,tb2;
+			// 3. select tb1.* from tb1;
+			// 4. select tb1.*,tb2.col from tb1,tb2;
+			// 5. select db.tb1.* from tb1;
+			// 6. select db.tb1.*,db.tb2.col from db.tb1,db.tb2;
+
+			newSelectExprs := make(sqlparser.SelectExprs, 0)
+			for _, expr := range n.SelectExprs {
+				switch e := expr.(type) {
+				case *sqlparser.StarExpr:
+					// 一般情况下最外层循环不会超过两层
+					for _, tables := range rw.Columns {
+						for _, cols := range tables {
+							for _, col := range cols {
+								newExpr := &sqlparser.AliasedExpr{
+									Expr: &sqlparser.ColName{
+										Metadata: nil,
+										Name:     sqlparser.NewColIdent(col.Name),
+										Qualifier: sqlparser.TableName{
+											Name: sqlparser.NewTableIdent(col.Table),
+											// 因为不建议跨DB的查询,所以这里的db前缀将不进行补齐
+											Qualifier: sqlparser.TableIdent{},
+										},
+									},
+									As: sqlparser.ColIdent{},
+								}
+
+								if e.TableName.Name.IsEmpty() {
+									// 情况1,2
+									newSelectExprs = append(newSelectExprs, newExpr)
+								} else {
+									// 其他情况下只有在匹配表名的时候才会进行替换
+									if e.TableName.Name.String() == col.Table {
+										newSelectExprs = append(newSelectExprs, newExpr)
+									}
+								}
+							}
+						}
+					}
+				default:
+					newSelectExprs = append(newSelectExprs, e)
+				}
+			}
+
+			n.SelectExprs = newSelectExprs
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteInsertColumns insertcolumns: 对应COL.002,INSERT补全列名
+func (rw *Rewrite) RewriteInsertColumns() *Rewrite {
+
+	switch insert := rw.Stmt.(type) {
+	case *sqlparser.Insert:
+		switch insert.Action {
+		case "insert", "replace":
+			if insert.Columns != nil {
+				return rw
+			}
+
+			newColumns := make(sqlparser.Columns, 0)
+			db := insert.Table.Qualifier.String()
+			table := insert.Table.Name.String()
+			// 支持INSERT/REPLACE INTO VALUES形式,支持INSERT/REPLACE INTO SELECT
+			colCount := 0
+			switch v := insert.Rows.(type) {
+			case sqlparser.Values:
+				if len(v) > 0 {
+					colCount = len(v[0])
+				}
+
+			case *sqlparser.Select:
+				if l := len(v.SelectExprs); l > 0 {
+					colCount = l
+				}
+			}
+
+			// 开始对ast进行替换,补全前N列
+			counter := 0
+			for dbName, tb := range rw.Columns {
+				for tbName, cols := range tb {
+					for _, col := range cols {
+						// 只有全部列补全完成的时候才会替换ast
+						if counter == colCount {
+							insert.Columns = newColumns
+							rw.NewSQL = sqlparser.String(rw.Stmt)
+							return rw
+						}
+
+						if db != "" {
+							// 指定了DB的时候,只能怼指定DB的列
+							if db == dbName && table == tbName {
+								newColumns = append(newColumns, sqlparser.NewColIdent(col.Name))
+								counter++
+							}
+						} else {
+							// 没有指定DB的时候,将column中的列按顺序往里怼
+							if table == tbName {
+								newColumns = append(newColumns, sqlparser.NewColIdent(col.Name))
+								counter++
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+	return rw
+}
+
+// RewriteHaving having: 对应CLA.013,使用WHERE过滤条件替代HAVING
+func (rw *Rewrite) RewriteHaving() *Rewrite {
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch n := node.(type) {
+		case *sqlparser.Select:
+			if n.Having != nil {
+				if n.Where == nil {
+					// WHERE条件为空直接用HAVING替代WHERE即可
+					n.Where = n.Having
+				} else {
+					// WHERE条件不为空,需要对已有的条件进行括号保护,然后再AND+HAVING
+					n.Where = &sqlparser.Where{
+						Expr: &sqlparser.AndExpr{
+							Left: &sqlparser.ParenExpr{
+								Expr: n.Where.Expr,
+							},
+							Right: n.Having.Expr,
+						},
+					}
+				}
+				// 别忘了重置HAVING和Where.Type
+				n.Where.Type = "where"
+				n.Having = nil
+			}
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteAddOrderByNull orderbynull: 对应CLA.008,GROUP BY无排序要求时添加ORDER BY NULL
+func (rw *Rewrite) RewriteAddOrderByNull() *Rewrite {
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch n := node.(type) {
+		case *sqlparser.Select:
+			if n.GroupBy != nil && n.OrderBy == nil {
+				n.OrderBy = sqlparser.OrderBy{
+					&sqlparser.Order{
+						Expr:      &sqlparser.NullVal{},
+						Direction: "asc",
+					},
+				}
+			}
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteOr2Union or2union: 将OR查询转写为UNION ALL TODO: 暂无对应HeuristicRules
+// https://sqlperformance.com/2014/09/sql-plan/rewriting-queries-improve-performance
+func (rw *Rewrite) RewriteOr2Union() *Rewrite {
+	return rw
+}
+
+// RewriteUnionAll unionall: 不介意重复数据的情况下使用union all替换union
+func (rw *Rewrite) RewriteUnionAll() *Rewrite {
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch n := node.(type) {
+		case *sqlparser.Union:
+			n.Type = "union all"
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteOr2In or2in: 同一列的OR过滤条件使用IN()替代,如果值有相等的会进行合并
+func (rw *Rewrite) RewriteOr2In() *Rewrite {
+	// 通过AST生成node的双向链表,链表顺序为书写顺序
+	nodeList := NewNodeList(rw.Stmt)
+	tNode := nodeList.First()
+
+	for {
+		tNode.or2in()
+		if tNode.Next == nil {
+			break
+		}
+		tNode = tNode.Next
+	}
+
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// or2in 用于将or转换成in
+func (node *NodeItem) or2in() {
+	if node == nil || node.Self == nil {
+		return
+	}
+
+	switch selfNode := node.Self.(type) {
+	case *sqlparser.OrExpr:
+		newExpr := mergeExprs(selfNode.Left, selfNode.Right)
+		if newExpr != nil {
+			// or 自身两个节点可以合并的情况下,将父节点中的expr替换成新的
+			switch pre := node.Prev.Self.(type) {
+			case *sqlparser.OrExpr:
+				if pre.Left == node.Self {
+					node.Self = newExpr
+					pre.Left = newExpr
+				} else if pre.Right == node.Self {
+					node.Self = newExpr
+					pre.Right = newExpr
+				}
+			case *sqlparser.AndExpr:
+				if pre.Left == node.Self {
+					node.Self = newExpr
+					pre.Left = newExpr
+				} else if pre.Right == node.Self {
+					node.Self = newExpr
+					pre.Right = newExpr
+				}
+			case *sqlparser.Where:
+				node.Self = newExpr
+				pre.Expr = newExpr
+			case *sqlparser.ParenExpr:
+				// 如果SQL书写中带了括号,暂不会进行跨括号的合并,TODO:无意义括号打平,加个rewrite rule
+				node.Self = newExpr
+				pre.Expr = newExpr
+			}
+		} else {
+			// or 自身两个节点如不可以合并,则检测是否可以与父节点合并
+			// 与父节点的合并不能跨越and、括号等,可能会改变语义
+			// 检查自身左右节点是否能与上层节点中合并,or只能与or合并
+			switch pre := node.Prev.Self.(type) {
+			case *sqlparser.OrExpr:
+				// AST中如果出现复合条件,则一定在左树,所以只需要判断左边就可以
+				if pre.Left == selfNode {
+					switch n := pre.Right.(type) {
+					case *sqlparser.ComparisonExpr:
+						newLeftExpr := mergeExprs(selfNode.Left, n)
+						newRightExpr := mergeExprs(selfNode.Right, n)
+
+						// newLeftExpr 与 newRightExpr 一定有一个是nil,
+						// 否则说明该orExpr下的两个节点可合并,可以通过最后的向前递归合并pre节点中的expr
+						if newLeftExpr == nil || newRightExpr == nil {
+							if newLeftExpr != nil {
+								pre.Right = newLeftExpr
+								pre.Left = selfNode.Right
+								err := node.Array.Remove(node)
+								common.LogIfError(err, "")
+							}
+
+							if newRightExpr != nil {
+								pre.Right = newRightExpr
+								pre.Left = selfNode.Left
+								err := node.Array.Remove(node)
+								common.LogIfError(err, "")
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	// 逆向合并由更改AST后产生的新的可合并节点
+	node.Prev.or2in()
+}
+
+// mergeExprs 将两个属于同一个列的ComparisonExpr合并成一个,如果不能合并则返回nil
+func mergeExprs(left, right sqlparser.Expr) *sqlparser.ComparisonExpr {
+	// 用于对比两个列是否相同
+	colInLeft := ""
+	colInRight := ""
+	lOperator := ""
+	rOperator := ""
+
+	// 用于存放expr左右子树中的值
+	var values []sqlparser.SQLNode
+
+	// SQL中使用到的列
+	var colName *sqlparser.ColName
+
+	// 左子树
+	switch l := left.(type) {
+	case *sqlparser.ComparisonExpr:
+		// 获取列名
+		colName, colInLeft = getColumnName(l.Left)
+		// 获取值
+		if colInLeft != "" {
+			switch v := l.Right.(type) {
+			case *sqlparser.SQLVal, sqlparser.ValTuple, *sqlparser.BoolVal, *sqlparser.NullVal:
+				values = append(values, v)
+			}
+		}
+		// 获取operator
+		lOperator = l.Operator
+	default:
+		return nil
+	}
+
+	// 右子树
+	switch r := right.(type) {
+	case *sqlparser.ComparisonExpr:
+		// 获取列名
+		if colName.Name.String() != "" {
+			common.Log.Warn("colName shouldn't has value, but now it's %s", colName.Name.String())
+		}
+		colName, colInRight = getColumnName(r.Left)
+		// 获取值
+		if colInRight != "" {
+			switch v := r.Right.(type) {
+			case *sqlparser.SQLVal, sqlparser.ValTuple, *sqlparser.BoolVal, *sqlparser.NullVal:
+				values = append(values, v)
+			}
+		}
+		// 获取operator
+		rOperator = r.Operator
+	default:
+		return nil
+	}
+
+	// operator替换,用于在之后判断是否可以合并
+	switch lOperator {
+	case "in", "=":
+		lOperator = "="
+	default:
+		return nil
+	}
+
+	switch rOperator {
+	case "in", "=":
+		rOperator = "="
+	default:
+		return nil
+	}
+
+	// 不匹配则返回
+	if colInLeft == "" || colInLeft != colInRight ||
+		lOperator == "" || lOperator != rOperator {
+		return nil
+	}
+
+	// 合并左右子树的值
+	newValTuple := make(sqlparser.ValTuple, 0)
+	for _, v := range values {
+		switch v := v.(type) {
+		case *sqlparser.SQLVal:
+			newValTuple = append(newValTuple, v)
+		case *sqlparser.BoolVal:
+			newValTuple = append(newValTuple, v)
+		case *sqlparser.NullVal:
+			newValTuple = append(newValTuple, v)
+		case sqlparser.ValTuple:
+			newValTuple = append(newValTuple, v...)
+		}
+	}
+
+	// 去expr中除重复的value,
+	newValTuple = removeDup(newValTuple...)
+	newExpr := &sqlparser.ComparisonExpr{
+		Operator: "in",
+		Left:     colName,
+		Right:    newValTuple,
+	}
+	// 如果只有一个值则是一个等式,没有必要转写成in
+	if len(newValTuple) == 1 {
+		newExpr = &sqlparser.ComparisonExpr{
+			Operator: lOperator,
+			Left:     colName,
+			Right:    newValTuple[0],
+		}
+	}
+
+	return newExpr
+}
+
+// removeDup 清除sqlparser.ValTuple中重复的值
+func removeDup(vt ...sqlparser.Expr) sqlparser.ValTuple {
+	uni := make(sqlparser.ValTuple, 0)
+	m := make(map[string]sqlparser.SQLNode)
+
+	for _, value := range vt {
+		switch v := value.(type) {
+		case *sqlparser.SQLVal:
+			// Type:Val, 冒号用于分隔Type和Val,防止两种不同类型拼接后出现同一个值
+			if _, ok := m[string(v.Type)+":"+sqlparser.String(v)]; !ok {
+				uni = append(uni, v)
+				m[string(v.Type)+":"+sqlparser.String(v)] = v
+			}
+		case *sqlparser.BoolVal:
+			if _, ok := m[sqlparser.String(v)]; !ok {
+				uni = append(uni, v)
+				m[sqlparser.String(v)] = v
+			}
+		case *sqlparser.NullVal:
+			if _, ok := m[sqlparser.String(v)]; !ok {
+				uni = append(uni, v)
+				m[sqlparser.String(v)] = v
+			}
+		case sqlparser.ValTuple:
+			for _, val := range removeDup(v...) {
+				switch v := val.(type) {
+				case *sqlparser.SQLVal:
+					if _, ok := m[string(v.Type)+":"+sqlparser.String(v)]; !ok {
+						uni = append(uni, v)
+						m[string(v.Type)+":"+sqlparser.String(v)] = v
+					}
+				case *sqlparser.BoolVal:
+					if _, ok := m[sqlparser.String(v)]; !ok {
+						uni = append(uni, v)
+						m[sqlparser.String(v)] = v
+					}
+				case *sqlparser.NullVal:
+					if _, ok := m[sqlparser.String(v)]; !ok {
+						uni = append(uni, v)
+						m[sqlparser.String(v)] = v
+					}
+				}
+			}
+		}
+	}
+
+	return uni
+}
+
+// RewriteInNull innull: TODO: 对应ARG.004
+func (rw *Rewrite) RewriteInNull() *Rewrite {
+	return rw
+}
+
+// RewriteRmParenthesis rmparenthesis: 去除无意义的括号
+func (rw *Rewrite) RewriteRmParenthesis() *Rewrite {
+	rw.rmParenthesis()
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// rmParenthesis 用于语出无用的括号
+func (rw *Rewrite) rmParenthesis() {
+	continueFlag := false
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch node := node.(type) {
+		case *sqlparser.Where:
+			if node == nil {
+				return true, nil
+			}
+			switch paren := node.Expr.(type) {
+			case *sqlparser.ParenExpr:
+				switch paren.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Expr = paren.Expr
+					continueFlag = true
+				}
+			}
+
+		case *sqlparser.ParenExpr:
+			switch paren := node.Expr.(type) {
+			case *sqlparser.ParenExpr:
+				switch paren.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Expr = paren.Expr
+					continueFlag = true
+				}
+			}
+
+		case *sqlparser.AndExpr:
+			switch left := node.Left.(type) {
+			case *sqlparser.ParenExpr:
+				switch inner := left.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Left = inner
+					continueFlag = true
+				}
+			}
+
+			switch right := node.Right.(type) {
+			case *sqlparser.ParenExpr:
+				switch inner := right.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Right = inner
+					continueFlag = true
+				}
+			}
+
+		case *sqlparser.OrExpr:
+			switch left := node.Left.(type) {
+			case *sqlparser.ParenExpr:
+				switch inner := left.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Left = inner
+					continueFlag = true
+				}
+			}
+
+			switch right := node.Right.(type) {
+			case *sqlparser.ParenExpr:
+				switch inner := right.Expr.(type) {
+				case *sqlparser.ComparisonExpr:
+					node.Right = inner
+					continueFlag = true
+				}
+			}
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	// 本层的修改可能使得原本不符合条件的括号变为无意义括号
+	// 每次修改都需要再过滤一遍语法树
+	if continueFlag {
+		rw.rmParenthesis()
+	} else {
+		return
+	}
+}
+
+// RewriteRemoveDMLOrderBy dmlorderby: 对应RES.004,删除无LIMIT条件时UPDATE, DELETE中包含的ORDER BY
+func (rw *Rewrite) RewriteRemoveDMLOrderBy() *Rewrite {
+	switch st := rw.Stmt.(type) {
+	case *sqlparser.Update:
+		err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+			switch n := node.(type) {
+			case *sqlparser.Select:
+				if n.OrderBy != nil && n.Limit == nil {
+					n.OrderBy = nil
+				}
+				return false, nil
+			}
+			return true, nil
+		}, rw.Stmt)
+		common.LogIfError(err, "")
+		if st.OrderBy != nil && st.Limit == nil {
+			st.OrderBy = nil
+		}
+	case *sqlparser.Delete:
+		err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+			switch n := node.(type) {
+			case *sqlparser.Select:
+				if n.OrderBy != nil && n.Limit == nil {
+					n.OrderBy = nil
+				}
+				return false, nil
+			}
+			return true, nil
+		}, rw.Stmt)
+		common.LogIfError(err, "")
+		if st.OrderBy != nil && st.Limit == nil {
+			st.OrderBy = nil
+		}
+	}
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteGroupByConst 对应CLA.004,将GROUP BY CONST替换为列名
+// TODO:
+func (rw *Rewrite) RewriteGroupByConst() *Rewrite {
+	err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch n := node.(type) {
+		case *sqlparser.Select:
+			groupByCol := false
+			if n.GroupBy != nil {
+				for _, group := range n.GroupBy {
+					switch group.(type) {
+					case *sqlparser.SQLVal:
+					default:
+						groupByCol = true
+					}
+				}
+				if !groupByCol {
+					// TODO: 这里只是去掉了GROUP BY并没解决问题
+					n.GroupBy = nil
+				}
+			}
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	rw.NewSQL = sqlparser.String(rw.Stmt)
+	return rw
+}
+
+// RewriteSubQuery2Join 将subquery转写成join
+func (rw *Rewrite) RewriteSubQuery2Join() *Rewrite {
+	var err error
+	// 如果未配置mysql环境或从环境中获取失败
+	if common.Config.TestDSN.Disable || len(rw.Columns) == 0 {
+		common.Log.Debug("(rw *Rewrite) RewriteSubQuery2Join(): Rewrite failed. TestDSN.Disable: %v, len(rw.Columns):%d",
+			common.Config.TestDSN.Disable, len(rw.Columns))
+		return rw
+	}
+
+	if rw.NewSQL == "" {
+		rw.NewSQL = sqlparser.String(rw.Stmt)
+	}
+
+	// query backup
+	backup := rw.NewSQL
+	var subQueryList []string
+	err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch sub := node.(type) {
+		case sqlparser.SelectStatement:
+			subStr := sqlparser.String(sub)
+			if strings.HasPrefix(subStr, "(") {
+				subStr = subStr[1 : len(subStr)-1]
+			}
+			subQueryList = append(subQueryList, subStr)
+		}
+		return true, nil
+	}, rw.Stmt)
+	common.LogIfError(err, "")
+	if length := len(subQueryList); length > 1 {
+		lastResult := ""
+		for i := length - 1; i > 0; i-- {
+			if lastResult == "" {
+				lastResult, err = rw.sub2Join(subQueryList[i-1], subQueryList[i])
+			} else {
+				// 将subquery的部分替换成上次合并的结果
+				subQueryList[i-1] = strings.Replace(subQueryList[i-1], subQueryList[i], lastResult, -1)
+				lastResult, err = rw.sub2Join(subQueryList[i-1], lastResult)
+			}
+
+			if err != nil {
+				common.Log.Error("RewriteSubQuery2Join Error: %v", err)
+				return rw
+			}
+		}
+		rw.NewSQL = lastResult
+	} else if length == 1 {
+		var newSQL string
+		newSQL, err = rw.sub2Join(rw.NewSQL, subQueryList[0])
+		if err == nil {
+			rw.NewSQL = newSQL
+		}
+	}
+
+	// 因为这个修改不会直接修改rw.stmt,所以需要将rw.stmt也更新一下
+	newStmt, err := sqlparser.Parse(rw.NewSQL)
+	if err != nil {
+		rw.NewSQL = backup
+		rw.Stmt, _ = sqlparser.Parse(backup)
+	} else {
+		rw.Stmt = newStmt
+	}
+
+	return rw
+}
+
+// sub2Join 将subquery转写成join
+func (rw *Rewrite) sub2Join(parent, sub string) (string, error) {
+	// 只处理SelectStatement
+	if sqlparser.Preview(parent) != sqlparser.StmtSelect || sqlparser.Preview(sub) != sqlparser.StmtSelect {
+		return "", nil
+	}
+
+	// 如果子查询不属于parent,则不处理
+	if !strings.Contains(parent, sub) {
+		return "", nil
+	}
+
+	// 解析外层SQL语法树
+	stmt, err := sqlparser.Parse(parent)
+	if err != nil {
+		common.Log.Warn("(rw *Rewrite) RewriteSubQuery2Join() sub2Join sql `%s` parsed error: %v", parent, err)
+		return "", err
+	}
+
+	switch stmt.(type) {
+	case sqlparser.SelectStatement:
+	default:
+		common.Log.Debug("Query `%s` not select statement.", parent)
+		return "", nil
+	}
+
+	// 解析子查询语法树
+	subStmt, err := sqlparser.Parse(sub)
+	if err != nil {
+		common.Log.Warn("(rw *Rewrite) RewriteSubQuery2Join() sub2Join sql `%s` parsed error: %v", sub, err)
+		return "", err
+	}
+
+	// 获取外部SQL用到的表
+	stmtMeta := GetTableFromExprs(stmt.(*sqlparser.Select).From)
+	// 获取内部SQL用到的表
+	subMeta := GetTableFromExprs(subStmt.(*sqlparser.Select).From)
+
+	// 处理关联条件
+	err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+		switch p := node.(type) {
+		case *sqlparser.ComparisonExpr:
+			// a in (select * from tb)
+			switch subquery := p.Right.(type) {
+			case *sqlparser.Subquery:
+
+				// 获取左边的列
+				var leftColumn *sqlparser.ColName
+
+				switch l := p.Left.(type) {
+				case *sqlparser.ColName:
+					leftColumn = l
+				default:
+					return false, nil
+				}
+
+				// 用于存放获取的subquery中的列,有且只有一个
+				var rightColumn sqlparser.SQLNode
+
+				// 对subquery中的列进行替换
+				switch subSelectStmt := subquery.Select.(type) {
+				case *sqlparser.Select:
+					cachingOperator := p.Operator
+
+					rightColumn = subSelectStmt.SelectExprs[0]
+
+					rightCol, _ := getColumnName(rightColumn.(*sqlparser.AliasedExpr).Expr)
+					if rightCol != nil {
+						// 将subquery替换为等值条件
+						p.Operator = "="
+
+						// selectExpr 信息补齐
+						var newExprs []sqlparser.SelectExpr
+						err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+							switch col := node.(type) {
+							case *sqlparser.StarExpr:
+								if col.TableName.Name.IsEmpty() {
+									for dbName, db := range stmtMeta {
+										for tbName := range db.Table {
+
+											col.TableName.Name = sqlparser.NewTableIdent(tbName)
+											if dbName != "" {
+												col.TableName.Qualifier = sqlparser.NewTableIdent(dbName)
+											}
+
+											newExprs = append(newExprs, col)
+										}
+									}
+								}
+							case *sqlparser.AliasedExpr:
+								switch n := col.Expr.(type) {
+								case *sqlparser.ColName:
+									col.Expr = columnFromWhere(n, stmtMeta, rw.Columns)
+								}
+							}
+							return true, nil
+						}, stmt.(*sqlparser.Select).SelectExprs)
+						common.LogIfError(err, "")
+
+						// 原节点列信息补齐
+						p.Left = columnFromWhere(leftColumn, stmtMeta, rw.Columns)
+
+						// 将子查询中的节点上提,补充前缀信息
+						p.Right = columnFromWhere(rightCol, subMeta, rw.Columns)
+
+						// subquery Where条件中的列信息补齐
+						subWhereExpr := subStmt.(*sqlparser.Select).Where
+						err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+							switch n := node.(type) {
+							case *sqlparser.ComparisonExpr:
+								switch left := n.Left.(type) {
+								case *sqlparser.ColName:
+									n.Left = columnFromWhere(left, subMeta, rw.Columns)
+								}
+
+								switch right := n.Right.(type) {
+								case *sqlparser.ColName:
+									n.Right = columnFromWhere(right, subMeta, rw.Columns)
+								}
+							}
+							return true, nil
+						}, subWhereExpr)
+						common.LogIfError(err, "")
+						// 如果subquery中存在Where条件,怼在parent的where中后面
+						if subWhereExpr != nil {
+							if stmt.(*sqlparser.Select).Where != nil {
+								stmt.(*sqlparser.Select).Where.Expr = &sqlparser.AndExpr{
+									Left:  stmt.(*sqlparser.Select).Where.Expr,
+									Right: subWhereExpr.Expr,
+								}
+							} else {
+								stmt.(*sqlparser.Select).Where = subWhereExpr
+							}
+						}
+
+						switch cachingOperator {
+						case "in":
+							// 将表以inner join的形式追加到parent的from中
+							var newTables []sqlparser.TableExpr
+							for _, subExpr := range subStmt.(*sqlparser.Select).From {
+								has := false
+								for _, expr := range stmt.(*sqlparser.Select).From {
+									if reflect.DeepEqual(expr, subExpr) {
+										has = true
+									}
+								}
+								if !has {
+									newTables = append(newTables, subExpr)
+								}
+							}
+							stmt.(*sqlparser.Select).From = append(stmt.(*sqlparser.Select).From, newTables...)
+						case "not in":
+							// 将表以left join 的形式 追加到parent的from中
+							// TODO
+						}
+					}
+
+				}
+			}
+		}
+		return true, nil
+	}, stmt)
+	common.LogIfError(err, "")
+	newSQL := sqlparser.String(stmt)
+	return newSQL, nil
+}
+
+// columnFromWhere 获取列是来自哪个表,并补充前缀
+func columnFromWhere(col *sqlparser.ColName, meta common.Meta, columns common.TableColumns) *sqlparser.ColName {
+
+	for dbName, db := range meta {
+		for tbName := range db.Table {
+			for _, tables := range columns {
+				for _, columns := range tables {
+					for _, column := range columns {
+						if strings.EqualFold(col.Name.String(), column.Name) {
+							if col.Qualifier.Name.IsEmpty() && tbName == column.Table {
+								col.Qualifier.Name = sqlparser.NewTableIdent(column.Table)
+								return col
+							}
+							if (dbName == "" && tbName == column.Table) || (tbName == column.Table && dbName == column.DB) {
+								col.Qualifier.Name = sqlparser.NewTableIdent(column.Table)
+								if dbName != "" {
+									col.Qualifier.Qualifier = sqlparser.NewTableIdent(column.DB)
+								}
+								return col
+							}
+						}
+					}
+				}
+			}
+
+		}
+	}
+	return col
+}
+
+// RewriteJoin2SubQuery join2sub: TODO:
+// https://mariadb.com/kb/en/library/subqueries-and-joins/
+func (rw *Rewrite) RewriteJoin2SubQuery() *Rewrite {
+	return rw
+}
+
+// RewriteDistinctStar distinctstar: 对应DIS.003,将多余的`DISTINCT *`删除
+func (rw *Rewrite) RewriteDistinctStar() *Rewrite {
+	// 注意:这里并未对表是否有主键做检查,按照我们的SQL编程规范,一张表必须有主键
+	switch rw.Stmt.(type) {
+	case *sqlparser.Select:
+		meta := GetMeta(rw.Stmt, nil)
+		for _, m := range meta {
+			if len(m.Table) == 1 {
+				// distinct tbl.*, distinct *, count(distinct *)
+				re := regexp.MustCompile(`(?i)((distinct\s*\*)|(distinct\s+[0-9a-z_` + "`" + `]*\.\*))`)
+				if re.MatchString(rw.SQL) {
+					rw.NewSQL = re.ReplaceAllString(rw.SQL, "*")
+				}
+			}
+			break
+		}
+	}
+	if rw.NewSQL == "" {
+		rw.NewSQL = rw.SQL
+	}
+	rw.Stmt, _ = sqlparser.Parse(rw.NewSQL)
+	return rw
+}
+
+// RewriteTruncate truncate: DELETE全表修改为TRUNCATE TABLE
+func (rw *Rewrite) RewriteTruncate() *Rewrite {
+	switch n := rw.Stmt.(type) {
+	case *sqlparser.Delete:
+		meta := GetMeta(rw.Stmt, nil)
+		if len(meta) == 1 && n.Where == nil {
+			for _, db := range meta {
+				for _, tbl := range db.Table {
+					rw.NewSQL = "truncate table " + tbl.TableName
+				}
+			}
+		}
+	}
+	return rw
+}
+
+// RewriteDML2Select dml2select: DML转成SELECT,兼容低版本的EXPLAIN
+func (rw *Rewrite) RewriteDML2Select() *Rewrite {
+	if rw.Stmt == nil {
+		return rw
+	}
+
+	switch stmt := rw.Stmt.(type) {
+	case *sqlparser.Select:
+		rw.NewSQL = rw.SQL
+	case *sqlparser.Delete: // Multi DELETE not support yet.
+		rw.NewSQL = delete2Select(stmt)
+	case *sqlparser.Insert:
+		rw.NewSQL = insert2Select(stmt)
+	case *sqlparser.Update: // Multi UPDATE not support yet.
+		rw.NewSQL = update2Select(stmt)
+	}
+	rw.Stmt, _ = sqlparser.Parse(rw.NewSQL)
+	return rw
+}
+
+// delete2Select 将Delete语句改写成Select
+func delete2Select(stmt *sqlparser.Delete) string {
+	newSQL := &sqlparser.Select{
+		SelectExprs: []sqlparser.SelectExpr{
+			new(sqlparser.StarExpr),
+		},
+		From:    stmt.TableExprs,
+		Where:   stmt.Where,
+		OrderBy: stmt.OrderBy,
+	}
+	return sqlparser.String(newSQL)
+}
+
+// update2Select 将Update语句改写成Select
+func update2Select(stmt *sqlparser.Update) string {
+	newSQL := &sqlparser.Select{
+		SelectExprs: []sqlparser.SelectExpr{
+			new(sqlparser.StarExpr),
+		},
+		From:    stmt.TableExprs,
+		Where:   stmt.Where,
+		OrderBy: stmt.OrderBy,
+		Limit:   stmt.Limit,
+	}
+	return sqlparser.String(newSQL)
+}
+
+// insert2Select 将Insert语句改写成Select
+func insert2Select(stmt *sqlparser.Insert) string {
+	switch row := stmt.Rows.(type) {
+	// 如果insert包含子查询,只需要explain该子树
+	case *sqlparser.Select, *sqlparser.Union, *sqlparser.ParenSelect:
+		return sqlparser.String(row)
+	}
+
+	return "select 1 from DUAL"
+}
+
+// AlterAffectTable 获取ALTER影响的库表名,返回:`db`.`table`
+func AlterAffectTable(stmt sqlparser.Statement) string {
+	switch n := stmt.(type) {
+	case *sqlparser.DDL:
+		tableName := strings.ToLower(n.Table.Name.String())
+		dbName := strings.ToLower(n.Table.Qualifier.String())
+		if tableName != "" && tableName != "dual" {
+			if dbName == "" {
+				return "`" + tableName + "`"
+			}
+
+			return "`" + dbName + "`.`" + tableName + "`"
+		}
+	}
+	return ""
+}
+
+// MergeAlterTables mergealter: 将同一张表的多条ALTER语句合成一条ALTER语句
+// @input: sql, alter string
+// @output: [[db.]table]sql, 如果找不到DB,key为表名;如果找得到DB,key为db.table
+func MergeAlterTables(sqls ...string) map[string]string {
+	alterStrs := make(map[string][]string)
+	mergedAlterStr := make(map[string]string)
+
+	alterExp := regexp.MustCompile(`(?i)alter\s*table\s*[^\s]*\s*`)   // ALTER TABLE
+	renameExp := regexp.MustCompile(`(?i)rename\s*table\s*[^\s]*\s*`) // RENAME TABLE
+	// CREATE [UNIQUE|FULLTEXT|SPATIAL|PRIMARY] [KEY|INDEX] idx_name ON tbl_name
+	createIndexExp := regexp.MustCompile(`(?i)create((unique)|(fulltext)|(spatial)|(primary)|(\s*)\s*)((index)|(key))\s*`)
+	indexNameExp := regexp.MustCompile(`(?i)[^\s]*\s*`)
+	indexColsExp := regexp.MustCompile(`(?i)[^\s]*\s*on\s*[^\s]*\s*`)
+
+	for _, sql := range sqls {
+		sql = strings.Trim(sql, common.Config.Delimiter)
+		stmt, _ := sqlparser.Parse(sql)
+		alterStr := ""
+		dbName := ""
+		tableName := ""
+		switch n := stmt.(type) {
+		case *sqlparser.DDL:
+			// 注意: 表名和库名不区分大小写
+			tableName = strings.ToLower(n.Table.Name.String())
+			dbName = strings.ToLower(n.Table.Qualifier.String())
+			switch n.Action {
+			case "rename":
+				if alterExp.MatchString(sql) {
+					common.Log.Debug("rename alterExp: ALTER %v %v", tableName, alterExp.ReplaceAllString(sql, ""))
+					alterStr = fmt.Sprint(alterExp.ReplaceAllString(sql, ""))
+				} else if renameExp.MatchString(sql) {
+					common.Log.Debug("rename renameExp: ALTER %v %v", tableName, alterExp.ReplaceAllString(sql, ""))
+					alterStr = fmt.Sprint(alterExp.ReplaceAllString(sql, ""))
+				} else {
+					common.Log.Warn("rename not match: ALTER %v %v", tableName, sql)
+				}
+			case "alter":
+				if alterExp.MatchString(sql) {
+					common.Log.Debug("rename alterExp: ALTER %v %v", tableName, alterExp.ReplaceAllString(sql, ""))
+					alterStr = fmt.Sprint(alterExp.ReplaceAllString(sql, ""))
+				} else if createIndexExp.MatchString(sql) {
+					buf := createIndexExp.ReplaceAllString(sql, "")
+					idxName := strings.TrimSpace(indexNameExp.FindString(buf))
+					buf = indexColsExp.ReplaceAllString(buf, "")
+					common.Log.Debug("alter createIndexExp: ALTER %v ADD INDEX %v %v", tableName, "ADD INDEX", idxName, buf)
+					alterStr = fmt.Sprint("ADD INDEX", " "+idxName+" ", buf)
+				}
+			default:
+
+			}
+		}
+		if alterStr != "" && tableName != "" && tableName != "dual" {
+			if dbName == "" {
+				alterStrs["`"+tableName+"`"] = append(alterStrs["`"+tableName+"`"], alterStr)
+			} else {
+				alterStrs["`"+dbName+"`.`"+tableName+"`"] = append(alterStrs["`"+dbName+"`.`"+tableName+"`"], alterStr)
+			}
+		}
+	}
+	for k, v := range alterStrs {
+		mergedAlterStr[k] = fmt.Sprintln("ALTER TABLE", k, strings.Join(v, ", "), common.Config.Delimiter)
+	}
+	return mergedAlterStr
+}
+
+// RewriteRuleMatch 检查重写规则是否生效
+func RewriteRuleMatch(name string) bool {
+	for _, r := range common.Config.RewriteRules {
+		if r == name {
+			return true
+		}
+	}
+	return false
+}
diff --git a/ast/rewrite_test.go b/ast/rewrite_test.go
new file mode 100644
index 00000000..f84250f3
--- /dev/null
+++ b/ast/rewrite_test.go
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/XiaoMi/soar/common"
+)
+
+func TestRewrite(t *testing.T) {
+	common.Config.TestDSN.Disable = false
+	testSQL := []map[string]string{
+		{
+			"input":  `SELECT * FROM film`,
+			"output": `select film.film_id, film.title, film.description, film.release_year, film.language_id, film.original_language_id, film.rental_duration from film;`,
+		},
+		{
+			"input":  `SELECT film.*, actor.actor_id FROM film,actor`,
+			"output": `select film.film_id, film.title, film.description, film.release_year, film.language_id, film.original_language_id, film.rental_duration, actor.actor_id from film, actor;`,
+		},
+		{
+			"input":  `insert into film values(1,2,3,4,5)`,
+			"output": `insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5);`,
+		},
+		{
+			"input":  `insert into sakila.film values(1,2)`,
+			"output": `insert into sakila.film(film_id, title) values (1, 2);`,
+		},
+		{
+			"input":  `replace into sakila.film select id from tb`,
+			"output": `replace into sakila.film(film_id) select id from tb;`,
+		},
+		{
+			"input":  `replace into sakila.film select id, title, description from tb`,
+			"output": `replace into sakila.film(film_id, title, description) select id, title, description from tb;`,
+		},
+		{
+			"input":  `insert into film values(1,2,3,4,5)`,
+			"output": `insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5);`,
+		},
+		{
+			"input":  `insert into sakila.film values(1,2)`,
+			"output": `insert into sakila.film(film_id, title) values (1, 2);`,
+		},
+		{
+			"input":  `replace into sakila.film select id from tb`,
+			"output": `replace into sakila.film(film_id) select id from tb;`,
+		},
+		{
+			"input":  `replace into sakila.film select id, title, description from tb`,
+			"output": `replace into sakila.film(film_id, title, description) select id, title, description from tb;`,
+		},
+		{
+			"input":  "DELETE FROM tbl WHERE col1=1 ORDER BY col",
+			"output": "delete from tbl where col1 = 1;",
+		},
+		{
+			"input":  "UPDATE tbl SET col =1 WHERE col1=1 ORDER BY col",
+			"output": "update tbl set col = 1 where col1 = 1;",
+		},
+	}
+
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"])
+		rw.Columns = map[string]map[string][]*common.Column{
+			"sakila": {
+				"film": {
+					{Name: "film_id", Table: "film"},
+					{Name: "title", Table: "film"},
+					{Name: "description", Table: "film"},
+					{Name: "release_year", Table: "film"},
+					{Name: "language_id", Table: "film"},
+					{Name: "original_language_id", Table: "film"},
+					{Name: "rental_duration", Table: "film"},
+				},
+			},
+		}
+		rw.Rewrite()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteStar2Columns(t *testing.T) {
+	common.Config.TestDSN.Disable = false
+	testSQL := []map[string]string{
+		{
+			"input":  `SELECT * FROM film`,
+			"output": `select film.film_id, film.title from film`,
+		},
+		{
+			"input":  `SELECT film.*, actor.actor_id FROM film,actor`,
+			"output": `select film.film_id, film.title, actor.actor_id from film, actor`,
+		},
+	}
+
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"])
+		rw.Columns = map[string]map[string][]*common.Column{
+			"sakila": {
+				"film": {
+					{Name: "film_id", Table: "film"},
+					{Name: "title", Table: "film"},
+				},
+			},
+		}
+		rw.RewriteStar2Columns()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteInsertColumns(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `insert into film values(1,2,3,4,5)`,
+			"output": `insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5)`,
+		},
+		{
+			"input":  `insert into sakila.film values(1,2)`,
+			"output": `insert into sakila.film(film_id, title) values (1, 2)`,
+		},
+		{
+			"input":  `replace into sakila.film select id from tb`,
+			"output": `replace into sakila.film(film_id) select id from tb`,
+		},
+		{
+			"input":  `replace into sakila.film select id, title, description from tb`,
+			"output": `replace into sakila.film(film_id, title, description) select id, title, description from tb`,
+		},
+	}
+
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"])
+		rw.Columns = map[string]map[string][]*common.Column{
+			"sakila": {
+				"film": {
+					{Name: "film_id", Table: "film"},
+					{Name: "title", Table: "film"},
+					{Name: "description", Table: "film"},
+					{Name: "release_year", Table: "film"},
+					{Name: "language_id", Table: "film"},
+					{Name: "original_language_id", Table: "film"},
+					{Name: "rental_duration", Table: "film"},
+				},
+			},
+		}
+		rw.RewriteInsertColumns()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteHaving(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `SELECT state, COUNT(*) FROM Drivers GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state`,
+			"output": "select state, COUNT(*) from Drivers where state in ('GA', 'TX') group by state order by state asc",
+		},
+		{
+			"input":  `SELECT state, COUNT(*) FROM Drivers WHERE col =1 GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state`,
+			"output": "select state, COUNT(*) from Drivers where (col = 1) and state in ('GA', 'TX') group by state order by state asc",
+		},
+		{
+			"input":  `SELECT state, COUNT(*) FROM Drivers WHERE col =1 or col1 =2 GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state`,
+			"output": "select state, COUNT(*) from Drivers where (col = 1 or col1 = 2) and state in ('GA', 'TX') group by state order by state asc",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteHaving()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteAddOrderByNull(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "SELECT sum(col1) FROM tbl GROUP BY col",
+			"output": "select sum(col1) from tbl group by col order by null",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteAddOrderByNull()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteRemoveDMLOrderBy(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "DELETE FROM tbl WHERE col1=1 ORDER BY col",
+			"output": "delete from tbl where col1 = 1",
+		},
+		{
+			"input":  "UPDATE tbl SET col =1 WHERE col1=1 ORDER BY col",
+			"output": "update tbl set col = 1 where col1 = 1",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteRemoveDMLOrderBy()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteGroupByConst(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "select 1;",
+			"output": "select 1 from dual",
+		},
+		/*
+				{
+					"input":  "SELECT col1 FROM tbl GROUP BY 1;",
+					"output": "select col1 from tbl GROUP BY col1",
+				},
+			    {
+					"input":  "SELECT col1, col2 FROM tbl GROUP BY 1, 2;",
+					"output": "select col1, col2 from tbl GROUP BY col1, col2",
+				},
+			    {
+					"input":  "SELECT col1, col2, col3 FROM tbl GROUP BY 1, 3;",
+					"output": "select col1, col2, col3 from tbl GROUP BY col1, col3",
+				},
+		*/
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteGroupByConst()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteStandard(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "SELECT sum(col1) FROM tbl GROUP BY 1;",
+			"output": "select sum(col1) from tbl group by 1",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteStandard()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteCountStar(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "SELECT count(col) FROM tbl GROUP BY 1;",
+			"output": "select count(*) from tbl group by 1",
+		},
+		{
+			"input":  "SELECT COUNT(tb.col) FROM tbl GROUP BY 1;",
+			"output": "select COUNT(tb.*) from tbl group by 1",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteCountStar()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteInnoDB(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT);",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB ",
+		},
+		{
+			"input":  "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=memory ",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB ",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteInnoDB()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteAutoIncrement(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802;",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB auto_increment=1 ",
+		},
+		{
+			"input":  "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteAutoIncrement()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteIntWidth(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "CREATE TABLE t1(id bigint(10) NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802;",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB auto_increment=123802",
+		},
+		{
+			"input":  "CREATE TABLE t1(id bigint NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802;",
+			"output": "create table t1 (\n\tid bigint(20) not null auto_increment\n) ENGINE=InnoDB auto_increment=123802",
+		},
+		{
+			"input":  "create table t1(id int(20) not null auto_increment) ENGINE=InnoDB;",
+			"output": "create table t1 (\n\tid int(10) not null auto_increment\n) ENGINE=InnoDB",
+		},
+		{
+			"input":  "create table t1(id int not null auto_increment) ENGINE=InnoDB;",
+			"output": "create table t1 (\n\tid int not null auto_increment\n) ENGINE=InnoDB",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteIntWidth()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteAlwaysTrue(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1;",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where col=col;",
+			"output": "select count(col) from tbl where col = col",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where col=col2;",
+			"output": "select count(col) from tbl where col = col2",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1>=1;",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1<=1;",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1 and 2=2;",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1 or 2=3;",
+			"output": "select count(col) from tbl where 2 = 3",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1 and 3=3 or 2=3;",
+			"output": "select count(col) from tbl where 2 = 3",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1 and 3=3 or 2!=3;",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 1=1 or 2=3 and 3=3 ;",
+			"output": "select count(col) from tbl where 2 = 3",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where (1=1);",
+			"output": "select count(col) from tbl",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where ('a'= 'a' or 'b' = 'b') and a = 'b';",
+			"output": "select count(col) from tbl where a = 'b'",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where (('a'= 'a' or 'b' = 'b') and a = 'b');",
+			"output": "select count(col) from tbl where (a = 'b')",
+		},
+		{
+			"input":  "SELECT count(col) FROM tbl where 'a'= 'a' or ('b' = 'b' and a = 'b');",
+			"output": "select count(col) from tbl where (a = 'b')",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteAlwaysTrue()
+		if rw == nil {
+			t.Errorf("NoRw")
+		} else if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+// TODO:
+func TestRewriteSubQuery2Join(t *testing.T) {
+	common.Config.TestDSN.Disable = true
+	testSQL := []map[string]string{
+		{
+			// 这个case是官方文档给的,但不一定正确,需要视表结构的定义来进行判断
+			"input":  `SELECT * FROM t1 WHERE id IN (SELECT id FROM t2);`,
+			"output": "",
+			//"output": `SELECT DISTINCT t1.* FROM t1, t2 WHERE t1.id=t2.id;`,
+		},
+		{
+			"input":  `SELECT * FROM t1 WHERE id NOT IN (SELECT id FROM t2);`,
+			"output": "",
+			//"output": `SELECT table1.* FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.id IS NULL;`,
+		},
+		{
+			"input":  `SELECT * FROM t1 WHERE NOT EXISTS (SELECT id FROM t2 WHERE t1.id=t2.id);`,
+			"output": "",
+			//"output": `SELECT table1.* FROM table1 LEFT JOIN table2 ON table1.id=table2.id WHERE table2.id IS NULL;`,
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteSubQuery2Join()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteDML2Select(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  "DELETE city, country FROM city INNER JOIN country using (country_id) WHERE city.city_id = 1;",
+			"output": "select * from city join country using (country_id) where city.city_id = 1",
+		}, {
+			"input":  "DELETE city FROM city LEFT JOIN country ON city.country_id = country.country_id WHERE country.country IS NULL;",
+			"output": "select * from city left join country on city.country_id = country.country_id where country.country is null",
+		}, {
+			"input":  "DELETE a1, a2 FROM city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id",
+			"output": "select * from city as a1 join country as a2 where a1.country_id = a2.country_id",
+		}, {
+			"input":  "DELETE FROM a1, a2 USING city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id",
+			"output": "select * from city as a1 join country as a2 where a1.country_id = a2.country_id",
+		}, {
+			"input":  "DELETE FROM film WHERE length > 100;",
+			"output": "select * from film where length > 100",
+		}, {
+			"input":  "UPDATE city INNER JOIN country USING(country_id) SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;",
+			"output": "select * from city join country using (country_id) where city.city_id = 10",
+		}, {
+			"input":  "UPDATE city INNER JOIN country ON city.country_id = country.country_id INNER JOIN address ON city.city_id = address.city_id SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;",
+			"output": "select * from city join country on city.country_id = country.country_id join address on city.city_id = address.city_id where city.city_id = 10",
+		}, {
+			"input":  "UPDATE city, country SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.country_id = country.country_id AND city.city_id=10;",
+			"output": "select * from city, country where city.country_id = country.country_id and city.city_id = 10",
+		}, {
+			"input":  "UPDATE film SET length = 10 WHERE language_id = 20;",
+			"output": "select * from film where language_id = 20",
+		}, {
+			"input":  "INSERT INTO city (country_id) SELECT country_id FROM country;",
+			"output": "select country_id from country",
+		}, {
+			"input":  "INSERT INTO city (country_id) VALUES (1),(2),(3);",
+			"output": "select 1 from DUAL",
+		}, {
+			"input":  "INSERT INTO city (country_id) VALUES (10);",
+			"output": "select 1 from DUAL",
+		}, {
+			"input":  "INSERT INTO city (country_id) SELECT 10 FROM DUAL;",
+			"output": "select 10 from dual",
+		}, {
+			"input":  "replace INTO city (country_id) SELECT 10 FROM DUAL;",
+			"output": "select 10 from dual",
+		},
+	}
+
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteDML2Select()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteDistinctStar(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `SELECT DISTINCT * FROM film;`,
+			"output": "SELECT * FROM film;",
+		},
+		{
+			"input":  `SELECT COUNT(DISTINCT *) FROM film;`,
+			"output": "SELECT COUNT(*) FROM film;",
+		},
+		{
+			"input":  `SELECT DISTINCT film.* FROM film;`,
+			"output": "SELECT * FROM film;",
+		},
+		{
+			"input":  "SELECT DISTINCT col FROM film;",
+			"output": "SELECT DISTINCT col FROM film;",
+		},
+		{
+			"input":  "SELECT DISTINCT film.* FROM film, tbl;",
+			"output": "SELECT DISTINCT film.* FROM film, tbl;",
+		},
+		{
+
+			"input":  "SELECT DISTINCT * FROM film, tbl;",
+			"output": "SELECT DISTINCT * FROM film, tbl;",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteDistinctStar()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestMergeAlterTables(t *testing.T) {
+	sqls := []string{
+		// ADD|DROP INDEX
+		// TODO: PRIMARY KEY, [UNIQUE|FULLTEXT|SPATIAL] INDEX
+		"CREATE INDEX part_of_name ON customer (name(10));",
+		"alter table `sakila`.`t1` add index `idx_col`(`col`)",
+		"alter table `sakila`.`t1` add UNIQUE index `idx_col`(`col`)",
+		"alter table `sakila`.`t1` add index `idx_ID`(`ID`)",
+
+		// ADD|DROP COLUMN
+		"ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d;",
+		"ALTER TABLE T2 ADD COLUMN C int;",
+		"ALTER TABLE T2 ADD COLUMN D int FIRST;",
+		"ALTER TABLE T2 ADD COLUMN E int AFTER D;",
+
+		// RENAME COLUMN
+		"ALTER TABLE t1 RENAME COLUMN a TO b",
+
+		// RENAME INDEX
+		"ALTER TABLE t1 RENAME INDEX idx_a TO idx_b",
+		"ALTER TABLE t1 RENAME KEY idx_a TO idx_b",
+
+		// RENAME TABLE
+		"ALTER TABLE db.old_table RENAME new_table;",
+		"ALTER TABLE old_table RENAME TO new_table;",
+		"ALTER TABLE old_table RENAME AS new_table;",
+
+		// MODIFY & CHANGE
+		"ALTER TABLE t1 MODIFY col1 BIGINT UNSIGNED DEFAULT 1 COMMENT 'my column';",
+		"ALTER TABLE t1 CHANGE b a INT NOT NULL;",
+	}
+	fmt.Println(MergeAlterTables(sqls...))
+}
+
+func TestRewriteUnionAll(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `select country_id from city union select country_id from country;`,
+			"output": "select country_id from city union all select country_id from country",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteUnionAll()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+func TestRewriteTruncate(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `delete from tbl;`,
+			"output": "truncate table tbl",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteTruncate()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRewriteOr2In(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `select country_id from city where country_id = 1 or country_id = 2 or country_id = 3;`,
+			"output": "select country_id from city where country_id in (1, 2, 3)",
+		},
+		// TODO or中的恒真条件
+		{
+			"input":  `select country_id from city where country_id != 1 or country_id != 2 or country_id = 3;`,
+			"output": "select country_id from city where country_id != 1 or country_id != 2 or country_id = 3",
+		},
+		// col = 1 or col is null不可转为IN
+		{
+			"input":  `select country_id from city where col = 1 or col is null;`,
+			"output": "select country_id from city where col = 1 or col is null",
+		},
+		{
+			"input":  `select country_id from city where col1 = 1 or col2 = 1 or col2 = 2;`,
+			"output": "select country_id from city where col1 = 1 or col2 in (1, 2)",
+		},
+		{
+			"input":  `select country_id from city where col1 = 1 or col2 = 1 or col2 = 2 or col1 = 3;`,
+			"output": "select country_id from city where col2 in (1, 2) or col1 in (1, 3)",
+		},
+		{
+			"input":  `select country_id from city where (col1 = 1 or col2 = 1 or col2 = 2 ) or col1 = 3;`,
+			"output": "select country_id from city where (col1 = 1 or col2 in (1, 2)) or col1 = 3",
+		},
+		{
+			"input":  `select country_id from city where col1 = 1 or (col2 = 1 or col2 = 2 ) or col1 = 3;`,
+			"output": "select country_id from city where (col2 in (1, 2)) or col1 in (1, 3)",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteOr2In()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestRmParenthesis(t *testing.T) {
+	testSQL := []map[string]string{
+		{
+			"input":  `select country_id from city where (country_id = 1);`,
+			"output": "select country_id from city where country_id = 1",
+		},
+		{
+			"input":  `select * from city where a = 1 and (country_id = 1);`,
+			"output": "select * from city where a = 1 and country_id = 1",
+		},
+		{
+			"input":  `select country_id from city where (country_id = 1) or country_id = 1 ;`,
+			"output": "select country_id from city where country_id = 1 or country_id = 1",
+		},
+		{
+			"input":  `select country_id from city where col = 1 or (country_id = 1) or country_id = 1 ;`,
+			"output": "select country_id from city where col = 1 or country_id = 1 or country_id = 1",
+		},
+	}
+	for _, sql := range testSQL {
+		rw := NewRewrite(sql["input"]).RewriteRmParenthesis()
+		if rw.NewSQL != sql["output"] {
+			t.Errorf("want: %s\ngot: %s", sql["output"], rw.NewSQL)
+		}
+	}
+}
+
+func TestListRewriteRules(t *testing.T) {
+	err := common.GoldenDiff(func() {
+		ListRewriteRules(RewriteRules)
+	}, t.Name(), update)
+	if err != nil {
+		t.Error(err)
+	}
+}
diff --git a/ast/testdata/TestListRewriteRules.golden b/ast/testdata/TestListRewriteRules.golden
new file mode 100644
index 00000000..68a821d5
--- /dev/null
+++ b/ast/testdata/TestListRewriteRules.golden
@@ -0,0 +1,272 @@
+# 重写规则
+
+[toc]
+
+## dml2select
+* **Description**:将数据库更新请求转换为只读查询请求,便于执行EXPLAIN
+
+* **Original**:
+
+```sql
+DELETE FROM film WHERE length > 100
+```
+
+* **Suggest**:
+
+```sql
+select * from film where length > 100
+```
+## star2columns
+* **Description**:为SELECT *补全表的列信息
+
+* **Original**:
+
+```sql
+SELECT * FROM film
+```
+
+* **Suggest**:
+
+```sql
+select film.film_id, film.title from film
+```
+## insertcolumns
+* **Description**:为INSERT补全表的列信息
+
+* **Original**:
+
+```sql
+insert into film values(1,2,3,4,5)
+```
+
+* **Suggest**:
+
+```sql
+insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5)
+```
+## having
+* **Description**:将查询的HAVING子句改写为WHERE中的查询条件
+
+* **Original**:
+
+```sql
+SELECT state, COUNT(*) FROM Drivers GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state
+```
+
+* **Suggest**:
+
+```sql
+select state, COUNT(*) from Drivers where state in ('GA', 'TX') group by state order by state asc
+```
+## orderbynull
+* **Description**:如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加ORDER BY NULL
+
+* **Original**:
+
+```sql
+SELECT sum(col1) FROM tbl GROUP BY col
+```
+
+* **Suggest**:
+
+```sql
+select sum(col1) from tbl group by col order by null
+```
+## unionall
+* **Description**:可以接受重复的时间,使用UNION ALL替代UNION以提高查询效率
+
+* **Original**:
+
+```sql
+select country_id from city union select country_id from country
+```
+
+* **Suggest**:
+
+```sql
+select country_id from city union all select country_id from country
+```
+## or2in
+* **Description**:将同一列不同条件的OR查询转写为IN查询
+
+* **Original**:
+
+```sql
+select country_id from city where col1 = 1 or (col2 = 1 or col2 = 2 ) or col1 = 3;
+```
+
+* **Suggest**:
+
+```sql
+select country_id from city where (col2 in (1, 2)) or col1 in (1, 3);
+```
+## dmlorderby
+* **Description**:删除DML更新操作中无意义的ORDER BY
+
+* **Original**:
+
+```sql
+DELETE FROM tbl WHERE col1=1 ORDER BY col
+```
+
+* **Suggest**:
+
+```sql
+delete from tbl where col1 = 1
+```
+## distinctstar
+* **Description**:DISTINCT *对有主键的表没有意义,可以将DISTINCT删掉
+
+* **Original**:
+
+```sql
+SELECT DISTINCT * FROM film;
+```
+
+* **Suggest**:
+
+```sql
+SELECT * FROM film
+```
+## standard
+* **Description**:SQL标准化,如:关键字转换为小写
+
+* **Original**:
+
+```sql
+SELECT sum(col1) FROM tbl GROUP BY 1;
+```
+
+* **Suggest**:
+
+```sql
+select sum(col1) from tbl group by 1
+```
+## mergealter
+* **Description**:合并同一张表的多条ALTER语句
+
+* **Original**:
+
+```sql
+ALTER TABLE t2 DROP COLUMN c;ALTER TABLE t2 DROP COLUMN d;
+```
+
+* **Suggest**:
+
+```sql
+ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d;
+```
+## alwaystrue
+* **Description**:删除无用的恒真判断条件
+
+* **Original**:
+
+```sql
+SELECT count(col) FROM tbl where 'a'= 'a' or ('b' = 'b' and a = 'b');
+```
+
+* **Suggest**:
+
+```sql
+select count(col) from tbl where (a = 'b');
+```
+## countstar
+* **Description**:不建议使用COUNT(col)或COUNT(常量),建议改写为COUNT(*)
+
+* **Original**:
+
+```sql
+SELECT count(col) FROM tbl GROUP BY 1;
+```
+
+* **Suggest**:
+
+```sql
+SELECT count(*) FROM tbl GROUP BY 1;
+```
+## innodb
+* **Description**:建表时建议使用InnoDB引擎,非InnoDB引擎表自动转InnoDB
+
+* **Original**:
+
+```sql
+CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT);
+```
+
+* **Suggest**:
+
+```sql
+create table t1 (
+	id bigint(20) not null auto_increment
+) ENGINE=InnoDB;
+```
+## autoincrement
+* **Description**:将autoincrement初始化为1
+
+* **Original**:
+
+```sql
+CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802;
+```
+
+* **Suggest**:
+
+```sql
+create table t1(id bigint(20) not null auto_increment) ENGINE=InnoDB auto_increment=1;
+```
+## intwidth
+* **Description**:整型数据类型修改默认显示宽度
+
+* **Original**:
+
+```sql
+create table t1 (id int(20) not null auto_increment) ENGINE=InnoDB;
+```
+
+* **Suggest**:
+
+```sql
+create table t1 (id int(10) not null auto_increment) ENGINE=InnoDB;
+```
+## truncate
+* **Description**:不带WHERE条件的DELETE操作建议修改为TRUNCATE
+
+* **Original**:
+
+```sql
+DELETE FROM tbl
+```
+
+* **Suggest**:
+
+```sql
+truncate table tbl
+```
+## rmparenthesis
+* **Description**:去除没有意义的括号
+
+* **Original**:
+
+```sql
+select col from table where (col = 1);
+```
+
+* **Suggest**:
+
+```sql
+select col from table where col = 1;
+```
+## delimiter
+* **Description**:补全DELIMITER
+
+* **Original**:
+
+```sql
+use sakila
+```
+
+* **Suggest**:
+
+```sql
+use sakila;
+```
diff --git a/ast/testdata/TestPretty.golden b/ast/testdata/TestPretty.golden
new file mode 100644
index 00000000..b333aeae
--- /dev/null
+++ b/ast/testdata/TestPretty.golden
@@ -0,0 +1,1470 @@
+select sourcetable, if(f.lastcontent = ?, f.lastupdate, f.lastcontent) as lastactivity, f.totalcount as activity, type.class as type, (f.nodeoptions & ?) as nounsubscribe from node as f inner join contenttype as type on type.contenttypeid = f.contenttypeid inner join subscribed as sd on sd.did = f.nodeid and sd.userid = ? union all select f.name as title, f.userid as keyval, ? as sourcetable, ifnull(f.lastpost, f.joindate) as lastactivity, f.posts as activity, ? as type, ? as nounsubscribe from user as f inner join userlist as ul on ul.relationid = f.userid and ul.userid = ? where ul.type = ? and ul.aq = ? order by title limit ?
+
+SELECT  
+  sourcetable, IF( f. lastcontent  = ?, f. lastupdate, f. lastcontent) as  lastactivity, f. totalcount  as  activity, type. class  as  type, (f. nodeoptions  & ?) as  nounsubscribe  
+FROM  
+  node  as  f  
+  INNER JOIN  contenttype  as  type  on  type. contenttypeid  = f. contenttypeid  
+  INNER JOIN  subscribed  as  sd  on  sd. did  = f. nodeid  
+  AND  sd. userid  = ?  
+UNION ALL  
+SELECT  
+  f. name  as  title, f. userid  as  keyval, ?  as  sourcetable, IFNULL( f. lastpost, f. joindate) as  lastactivity, f. posts  as  activity, ?  as  type, ?  as  nounsubscribe  
+FROM  
+  USER  as  f  
+  INNER JOIN  userlist  as  ul  on  ul. relationid  = f. userid  
+  AND  ul. userid  = ?  
+WHERE  
+  ul. type  = ?  
+  AND  ul. aq  = ?  
+ORDER BY  
+  title  
+LIMIT  
+  ?
+administrator command: Init DB
+administrator  command: Init  DB
+CALL foo(1, 2, 3)
+CALL  foo( 1, 2, 3)
+### Channels ###
+					SELECT sourcetable, IF(f.lastcontent = 0, f.lastupdate, f.lastcontent) AS lastactivity,
+					f.totalcount AS activity, type.class AS type,
+					(f.nodeoptions & 512) AS noUnsubscribe
+					FROM node AS f
+					INNER JOIN contenttype AS type ON type.contenttypeid = f.contenttypeid 
+
+					INNER JOIN subscribed AS sd ON sd.did = f.nodeid AND sd.userid = 15965
+ UNION  ALL 
+
+					### Users ###
+					SELECT f.name AS title, f.userid AS keyval, 'user' AS sourcetable, IFNULL(f.lastpost, f.joindate) AS lastactivity,
+					f.posts as activity, 'Member' AS type,
+					0 AS noUnsubscribe
+					FROM user AS f
+					INNER JOIN userlist AS ul ON ul.relationid = f.userid AND ul.userid = 15965
+					WHERE ul.type = 'f' AND ul.aq = 'yes'
+ ORDER BY title ASC LIMIT 100
+### Channels ###
+SELECT  
+  sourcetable, IF( f. lastcontent  = 0, f. lastupdate, f. lastcontent) AS  lastactivity, f. totalcount  AS  activity, type. class  AS  type, (f. nodeoptions  & 512) AS  noUnsubscribe
+ 
+FROM  
+  node  AS  f
+ 
+  INNER JOIN  contenttype  AS  type  ON  type. contenttypeid  = f. contenttypeid  
+  INNER JOIN  subscribed  AS  sd  ON  sd. did  = f. nodeid  
+  AND  sd. userid  = 15965
+ 
+UNION  
+  ALL  ### Users ###
+SELECT  
+  f. name  AS  title, f. userid  AS  keyval, 'user' AS  sourcetable, IFNULL( f. lastpost, f. joindate) AS  lastactivity, f. posts  as  activity, 'Member' AS  type, 0  AS  noUnsubscribe
+ 
+FROM  
+  USER  AS  f
+ 
+  INNER JOIN  userlist  AS  ul  ON  ul. relationid  = f. userid  
+  AND  ul. userid  = 15965
+ 
+WHERE  
+  ul. type  = 'f' 
+  AND  ul. aq  = 'yes' 
+ORDER BY  
+  title  ASC  
+LIMIT  
+  100
+CREATE DATABASE org235_percona345 COLLATE 'utf8_general_ci'
+CREATE  DATABASE  org235_percona345  COLLATE  'utf8_general_ci'
+insert into abtemp.coxed select foo.bar from foo
+INSERT  into  abtemp. coxed  
+SELECT  
+  foo. bar  
+FROM  
+  foo
+insert into foo(a, b, c) value(2, 4, 5)
+INSERT  into  foo( a, b, c) value( 2, 4, 5)
+insert into foo(a, b, c) values(2, 4, 5)
+INSERT  into  foo( a, b, c) 
+VALUES( 
+  2, 4, 5)
+insert into foo(a, b, c) values(2, 4, 5) , (2,4,5)
+INSERT  into  foo( a, b, c) 
+VALUES( 
+  2, 4, 5), 
+  (2, 4, 5)
+insert into foo values (1, '(2)', 'This is a trick: ). More values.', 4)
+INSERT  into  foo  
+VALUES  
+  (1, '(2)', 
+  'This is a trick: ). More values.', 
+  4)
+insert into tb values (1)
+INSERT  into  tb  
+VALUES  
+  (1)
+INSERT INTO t (ts) VALUES ('()', '\(', '\)')
+INSERT  INTO  t  (ts) 
+VALUES  
+  (
+    '()', '\(', '\)')
+INSERT INTO t (ts) VALUES (NOW())
+INSERT  INTO  t  (ts) 
+VALUES  
+  (
+    NOW()
+  )
+INSERT INTO t () VALUES ()
+INSERT  INTO  t  (
+) 
+  VALUES  
+    (
+)
+insert into t values (1), (2), (3)
+
+	on duplicate key update query_count=1
+INSERT  into  t  
+VALUES  
+  (1), 
+  (2), 
+  (3) on  duplicate  key  
+UPDATE  
+  query_count= 1
+insert into t values (1) on duplicate key update query_count=COALESCE(query_count, 0) + VALUES(query_count)
+INSERT  into  t  
+VALUES  
+  (1) on  duplicate  key  
+UPDATE  
+  query_count= COALESCE( query_count, 0) + 
+VALUES( 
+  query_count)
+LOAD DATA INFILE '/tmp/foo.txt' INTO db.tbl
+LOAD  DATA  INFILE  '/tmp/foo.txt' INTO  db. tbl
+select 0e0, +6e-30, -6.00 from foo where a = 5.5 or b=0.5 or c=.5
+
+SELECT  
+  0e0, + 6e- 30, - 6.00  
+FROM  
+  foo  
+WHERE  
+  a  = 5.5  
+  OR  b= 0.5  
+  OR  c=.5
+select 0x0, x'123', 0b1010, b'10101' from foo
+
+SELECT  
+  0x0, x' 123', 
+  0b1010, b' 10101' 
+FROM  
+  foo
+select 123_foo from 123_foo
+
+SELECT  
+  123_foo  
+FROM  
+  123_foo
+select 123foo from 123foo
+
+SELECT  
+  123foo  
+FROM  
+  123foo
+SELECT 	1 AS one FROM calls USE INDEX(index_name)
+
+SELECT  
+  1  AS  one  
+FROM  
+  calls  USE  INDEX( index_name)
+SELECT /*!40001 SQL_NO_CACHE */ * FROM `film`
+
+SELECT  
+  */ * 
+FROM  
+  `film`
+SELECT 'a' 'b' 'c' 'd' FROM kamil
+
+SELECT  
+  'a' 'b' 'c' 'd' 
+FROM  
+  kamil
+SELECT BENCHMARK(100000000, pow(rand(), rand())), 1 FROM `-hj-7d6-shdj5-7jd-kf-g988h-`.`-aaahj-7d6-shdj5-7&^%$jd-kf-g988h-9+4-5*6ab-`
+
+SELECT  
+  BENCHMARK( 100000000, POW( RAND(
+), 
+RAND(
+)
+)
+), 
+1  
+FROM  
+  `-hj-7d6-shdj5-7jd-kf-g988h-`.`-aaahj-7d6-shdj5-7&^%$jd-kf-g988h-9+4-5*6ab-`
+SELECT c FROM org235.t WHERE id=0xdeadbeaf
+
+SELECT  
+  c  
+FROM  
+  org235. t  
+WHERE  
+  id= 0xdeadbeaf
+select c from t where i=1 order by c asc
+
+SELECT  
+  c  
+FROM  
+  t  
+WHERE  
+  i= 1  
+ORDER BY  
+  c  asc
+SELECT c FROM t WHERE id=0xdeadbeaf
+
+SELECT  
+  c  
+FROM  
+  t  
+WHERE  
+  id= 0xdeadbeaf
+SELECT c FROM t WHERE id=1
+
+SELECT  
+  c  
+FROM  
+  t  
+WHERE  
+  id= 1
+select `col` from `table-1` where `id` = 5
+
+SELECT  
+  `col` 
+FROM  
+  `table-1` 
+WHERE  
+  `id` = 5
+SELECT `db`.*, (CASE WHEN (`date_start` <=  '2014-09-10 09:17:59' AND `date_end` >=  '2014-09-10 09:17:59') THEN 'open' WHEN (`date_start` >  '2014-09-10 09:17:59' AND `date_end` >  '2014-09-10 09:17:59') THEN 'tbd' ELSE 'none' END) AS `status` FROM `foo` AS `db` WHERE (a_b in ('1', '10101'))
+
+SELECT  
+  `db`.*, 
+  (CASE  WHEN  (`date_start` <= '2014-09-10 09:17:59' 
+  AND  `date_end` >= '2014-09-10 09:17:59'
+) THEN  'open' WHEN  (`date_start` > '2014-09-10 09:17:59' 
+AND  `date_end` > '2014-09-10 09:17:59'
+) THEN  'tbd' ELSE  'none' END) AS  `status` 
+FROM  
+  `foo` AS  `db` 
+WHERE  
+  (a_b  in  (
+    '1', '10101')
+  )
+select field from `-master-db-1`.`-table-1-` order by id, ?;
+
+SELECT  
+  FIELD  
+FROM  
+  `-master-db-1`.`-table-1-` 
+ORDER BY  
+  id, ?;
+select   foo
+
+SELECT  
+  foo
+select foo_1 from foo_2_3
+
+SELECT  
+  foo_1  
+FROM  
+  foo_2_3
+select foo -- bar
+
+
+SELECT  
+  foo  -- bar
+select foo-- bar
+,foo
+
+SELECT  
+  foo- - bar
+, 
+  foo
+select '\\' from foo
+
+SELECT  
+  '\\' 
+FROM  
+  foo
+select * from foo limit 5
+
+SELECT  
+  * 
+FROM  
+  foo  
+LIMIT  
+  5
+select * from foo limit 5, 10
+
+SELECT  
+  * 
+FROM  
+  foo  
+LIMIT  
+  5, 10
+select * from foo limit 5 offset 10
+
+SELECT  
+  * 
+FROM  
+  foo  
+LIMIT  
+  5  offset  10
+SELECT * from foo where a = 5
+
+SELECT  
+  * 
+FROM  
+  foo  
+WHERE  
+  a  = 5
+select * from foo where a in (5) and b in (5, 8,9 ,9 , 10)
+
+SELECT  
+  * 
+FROM  
+  foo  
+WHERE  
+  a  in  (5) 
+  AND  b  in  (5, 8, 9, 
+  9, 
+  10)
+SELECT '' '' '' FROM kamil
+
+SELECT  
+  '' '' '' 
+FROM  
+  kamil
+ select  * from
+foo where a = 5
+
+SELECT  
+  * 
+FROM  
+  foo  
+WHERE  
+  a  = 5
+SELECT * FROM prices.rt_5min where id=1
+
+SELECT  
+  * 
+FROM  
+  prices. rt_5min  
+WHERE  
+  id= 1
+SELECT * FROM table WHERE field = 'value' /*arbitrary/31*/ 
+
+SELECT  
+  * 
+FROM  
+  table  
+WHERE  
+  FIELD  = 'value' */
+SELECT * FROM table WHERE field = 'value' /*arbitrary31*/ 
+
+SELECT  
+  * 
+FROM  
+  table  
+WHERE  
+  FIELD  = 'value' */
+SELECT *    FROM t WHERE 1=1 AND id=1
+
+SELECT  
+  * 
+FROM  
+  t  
+WHERE  
+  1= 1  
+  AND  id= 1
+select * from t where (base.nid IN  ('1412', '1410', '1411'))
+
+SELECT  
+  * 
+FROM  
+  t  
+WHERE  
+  (base. nid  IN  (
+    '1412', '1410', '1411')
+  )
+select * from t where i=1      order            by
+             a,  b          ASC, d    DESC,
+
+                                    e asc
+
+SELECT  
+  * 
+FROM  
+  t  
+WHERE  
+  i= 1  order  by
+ a, b  ASC, d  DESC, e  asc
+select * from t where i=1 order by a, b ASC, d DESC, e asc
+
+SELECT  
+  * 
+FROM  
+  t  
+WHERE  
+  i= 1  
+ORDER BY  
+  a, b  ASC, d  DESC, e  asc
+select 'hello'
+
+
+SELECT  
+  'hello'
+select 'hello', '
+hello
+', "hello", '\'' from foo
+
+SELECT  
+  'hello', 
+  '
+hello
+', 
+  "hello", 
+  '\'' 
+FROM  
+  foo
+SELECT ID, name, parent, type FROM posts WHERE _name IN ('perf','caching') AND (type = 'page' OR type = 'attachment')
+
+SELECT  
+  ID, name, parent, type  
+FROM  
+  posts  
+WHERE  
+  _name  IN  (
+    'perf', 'caching') 
+    AND  (type  = 'page' 
+    OR  type  = 'attachment'
+  )
+SELECT name, value FROM variable
+
+SELECT  
+  name, value  
+FROM  
+  variable
+select 
+-- bar
+ foo
+
+SELECT  
+  -- bar
+  foo
+select null, 5.001, 5001. from foo
+
+SELECT  
+  null, 5.001, 5001. 
+FROM  
+  foo
+select sleep(2) from test.n
+
+SELECT  
+  SLEEP( 2) 
+FROM  
+  test. n
+SELECT t FROM field WHERE  (entity_type = 'node') AND (entity_id IN  ('609')) AND (language IN  ('und')) AND (deleted = '0') ORDER BY delta ASC
+
+SELECT  
+  t  
+FROM  
+  FIELD  
+WHERE  
+  (
+    entity_type  = 'node') 
+    AND  (entity_id  IN  (
+      '609')
+    ) 
+    AND  (language  IN  (
+      'und')
+    ) 
+    AND  (
+      deleted  = '0') 
+      ORDER BY  
+        delta  ASC
+select  t.table_schema,t.table_name,engine  from information_schema.tables t  inner join information_schema.columns c  on t.table_schema=c.table_schema and t.table_name=c.table_name group by t.table_schema,t.table_name having  sum(if(column_key in ('PRI','UNI'),1,0))=0
+
+SELECT  
+  t. table_schema, t. table_name, engine  
+FROM  
+  information_schema. tables  t  
+  INNER JOIN  information_schema. columns  c  on  t. table_schema= c. table_schema  
+  AND  t. table_name= c. table_name  
+GROUP BY  
+  t. table_schema, t. table_name  
+HAVING  
+  SUM( IF( column_key  in  (
+    'PRI', 'UNI'), 
+    1, 0)
+  )= 0
+/* -- S++ SU ABORTABLE -- spd_user: rspadim */SELECT SQL_SMALL_RESULT SQL_CACHE DISTINCT centro_atividade FROM est_dia WHERE unidade_id=1001 AND item_id=67 AND item_id_red=573
+*/ 
+SELECT  
+  SQL_SMALL_RESULT  SQL_CACHE  DISTINCT  centro_atividade  
+FROM  
+  est_dia  
+WHERE  
+  unidade_id= 1001  
+  AND  item_id= 67  
+  AND  item_id_red= 573
+UPDATE groups_search SET  charter = '   -------3\'\' XXXXXXXXX.\n    \n    -----------------------------------------------------', show_in_list = 'Y' WHERE group_id='aaaaaaaa'
+
+UPDATE  
+  groups_search  
+SET  
+  charter  = '   -------3\'\' XXXXXXXXX.\n    \n    -----------------------------------------------------', 
+  show_in_list  = 'Y' 
+WHERE  
+  group_id= 'aaaaaaaa'
+use `foo`
+use  `foo`
+select sourcetable, if(f.lastcontent = ?, f.lastupdate, f.lastcontent) as lastactivity, f.totalcount as activity, type.class as type, (f.nodeoptions & ?) as nounsubscribe from node as f inner join contenttype as type on type.contenttypeid = f.contenttypeid inner join subscribed as sd on sd.did = f.nodeid and sd.userid = ? union all select f.name as title, f.userid as keyval, ? as sourcetable, ifnull(f.lastpost, f.joindate) as lastactivity, f.posts as activity, ? as type, ? as nounsubscribe from user as f inner join userlist as ul on ul.relationid = f.userid and ul.userid = ? where ul.type = ? and ul.aq = ? order by title limit ?
+
+SELECT  
+  sourcetable, IF( f. lastcontent  = ?, f. lastupdate, f. lastcontent) as  lastactivity, f. totalcount  as  activity, type. class  as  type, (f. nodeoptions  & ?) as  nounsubscribe  
+FROM  
+  node  as  f  
+  INNER JOIN  contenttype  as  type  on  type. contenttypeid  = f. contenttypeid  
+  INNER JOIN  subscribed  as  sd  on  sd. did  = f. nodeid  
+  AND  sd. userid  = ?  
+UNION ALL  
+SELECT  
+  f. name  as  title, f. userid  as  keyval, ?  as  sourcetable, IFNULL( f. lastpost, f. joindate) as  lastactivity, f. posts  as  activity, ?  as  type, ?  as  nounsubscribe  
+FROM  
+  USER  as  f  
+  INNER JOIN  userlist  as  ul  on  ul. relationid  = f. userid  
+  AND  ul. userid  = ?  
+WHERE  
+  ul. type  = ?  
+  AND  ul. aq  = ?  
+ORDER BY  
+  title  
+LIMIT  
+  ?
+CREATE INDEX part_of_name ON customer (name(10));
+CREATE  INDEX  part_of_name  ON  customer  (
+  name( 10));
+alter table `sakila`.`t1` add index `idx_col`(`col`)
+
+ALTER TABLE  
+  `sakila`.`t1` 
+ADD  
+  index  `idx_col` (
+    `col`)
+alter table `sakila`.`t1` add UNIQUE index `idx_col`(`col`)
+
+ALTER TABLE  
+  `sakila`.`t1` 
+ADD  
+  UNIQUE  index  `idx_col` (
+    `col`)
+alter table `sakila`.`t1` add index `idx_ID`(`ID`)
+
+ALTER TABLE  
+  `sakila`.`t1` 
+ADD  
+  index  `idx_ID` (
+    `ID`)
+ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d;
+
+ALTER TABLE  
+  t2  
+DROP  
+  COLUMN  c, 
+DROP  
+  COLUMN  d;
+ALTER TABLE T2 ADD COLUMN C int;
+
+ALTER TABLE  
+  T2  
+ADD  
+  COLUMN  C  int;
+ALTER TABLE T2 ADD COLUMN D int FIRST;
+
+ALTER TABLE  
+  T2  
+ADD  
+  COLUMN  D  int  FIRST;
+ALTER TABLE T2 ADD COLUMN E int AFTER D;
+
+ALTER TABLE  
+  T2  
+ADD  
+  COLUMN  E  int  
+AFTER  
+  D;
+ALTER TABLE t1 RENAME COLUMN a TO b
+
+ALTER TABLE  
+  t1  RENAME  COLUMN  a  TO  b
+ALTER TABLE t1 RENAME INDEX idx_a TO idx_b
+
+ALTER TABLE  
+  t1  RENAME  INDEX  idx_a  TO  idx_b
+ALTER TABLE t1 RENAME KEY idx_a TO idx_b
+
+ALTER TABLE  
+  t1  RENAME  KEY  idx_a  TO  idx_b
+ALTER TABLE db.old_table RENAME new_table;
+
+ALTER TABLE  
+  db. old_table  RENAME  new_table;
+ALTER TABLE old_table RENAME TO new_table;
+
+ALTER TABLE  
+  old_table  RENAME  TO  new_table;
+ALTER TABLE old_table RENAME AS new_table;
+
+ALTER TABLE  
+  old_table  RENAME  AS  new_table;
+ALTER TABLE t1 MODIFY col1 BIGINT UNSIGNED DEFAULT 1 COMMENT 'my column';
+
+ALTER TABLE  
+  t1  MODIFY  col1  BIGINT  UNSIGNED  DEFAULT  1  COMMENT  'my column';
+ALTER TABLE t1 CHANGE b a INT NOT NULL;
+
+ALTER TABLE  
+  t1  CHANGE  b  a  INT  NOT  NULL;
+SELECT * FROM film WHERE length = 86;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 86;
+SELECT * FROM film WHERE length IS NULL;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  IS  NULL;
+SELECT * FROM film HAVING title = 'abc';
+
+SELECT  
+  * 
+FROM  
+  film  
+HAVING  
+  title  = 'abc';
+SELECT * FROM sakila.film WHERE length >= 60;
+
+SELECT  
+  * 
+FROM  
+  sakila. film  
+WHERE  
+  LENGTH  >= 60;
+SELECT * FROM sakila.film WHERE length >= '60';
+
+SELECT  
+  * 
+FROM  
+  sakila. film  
+WHERE  
+  LENGTH  >= '60';
+SELECT * FROM film WHERE length BETWEEN 60 AND 84;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  BETWEEN  60  
+  AND  84;
+SELECT * FROM film WHERE title LIKE 'AIR%';
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  title  LIKE  'AIR%';
+SELECT * FROM film WHERE title IS NOT NULL;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  title  IS  NOT  NULL;
+SELECT * FROM film WHERE length = 114 and title = 'ALABAMA DEVIL';
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 114  
+  AND  title  = 'ALABAMA DEVIL';
+SELECT * FROM film WHERE length > 100 and title = 'ALABAMA DEVIL';
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+  AND  title  = 'ALABAMA DEVIL';
+SELECT * FROM film WHERE length > 100 and language_id < 10 and title = 'xyz';
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+  AND  language_id  < 10  
+  AND  title  = 'xyz';
+SELECT * FROM film WHERE length > 100 and language_id < 10;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+  AND  language_id  < 10;
+SELECT release_year, sum(length) FROM film WHERE length = 123 AND language_id = 1 GROUP BY release_year;
+
+SELECT  
+  release_year, SUM( LENGTH) 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+  AND  language_id  = 1  
+GROUP BY  
+  release_year;
+SELECT release_year, sum(length) FROM film WHERE length >= 123 GROUP BY release_year;
+
+SELECT  
+  release_year, SUM( LENGTH) 
+FROM  
+  film  
+WHERE  
+  LENGTH  >= 123  
+GROUP BY  
+  release_year;
+SELECT release_year, language_id, sum(length) FROM film GROUP BY release_year, language_id;
+
+SELECT  
+  release_year, language_id, SUM( LENGTH) 
+FROM  
+  film  
+GROUP BY  
+  release_year, language_id;
+SELECT release_year, sum(length) FROM film WHERE length = 123 GROUP BY release_year,(length+language_id);
+
+SELECT  
+  release_year, SUM( LENGTH) 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+GROUP BY  
+  release_year, (LENGTH+ language_id);
+SELECT release_year, sum(film_id) FROM film GROUP BY release_year;
+
+SELECT  
+  release_year, SUM( film_id) 
+FROM  
+  film  
+GROUP BY  
+  release_year;
+SELECT * FROM address GROUP BY address,district;
+
+SELECT  
+  * 
+FROM  
+  address  
+GROUP BY  
+  address, district;
+SELECT title FROM film WHERE ABS(language_id) = 3 GROUP BY title;
+
+SELECT  
+  title  
+FROM  
+  film  
+WHERE  
+  ABS( language_id) = 3  
+GROUP BY  
+  title;
+SELECT language_id FROM film WHERE length = 123 GROUP BY release_year ORDER BY language_id;
+
+SELECT  
+  language_id  
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+GROUP BY  
+  release_year  
+ORDER BY  
+  language_id;
+SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year;
+
+SELECT  
+  release_year  
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+GROUP BY  
+  release_year  
+ORDER BY  
+  release_year;
+SELECT * FROM film WHERE length = 123 ORDER BY release_year ASC, language_id DESC;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+ORDER BY  
+  release_year  ASC, language_id  DESC;
+SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year LIMIT 10;
+
+SELECT  
+  release_year  
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+GROUP BY  
+  release_year  
+ORDER BY  
+  release_year  
+LIMIT  
+  10;
+SELECT * FROM film WHERE length = 123 ORDER BY release_year LIMIT 10;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 123  
+ORDER BY  
+  release_year  
+LIMIT  
+  10;
+SELECT * FROM film ORDER BY release_year LIMIT 10;
+
+SELECT  
+  * 
+FROM  
+  film  
+ORDER BY  
+  release_year  
+LIMIT  
+  10;
+SELECT * FROM film WHERE length > 100 ORDER BY length LIMIT 10;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+ORDER BY  
+  LENGTH  
+LIMIT  
+  10;
+SELECT * FROM film WHERE length < 100 ORDER BY length LIMIT 10;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  < 100  
+ORDER BY  
+  LENGTH  
+LIMIT  
+  10;
+SELECT * FROM customer WHERE address_id in (224,510) ORDER BY last_name;
+
+SELECT  
+  * 
+FROM  
+  customer  
+WHERE  
+  address_id  in  (224, 510) 
+ORDER BY  
+  last_name;
+SELECT * FROM film WHERE release_year = 2016 AND length != 1 ORDER BY title;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  release_year  = 2016  
+  AND  LENGTH  != 1  
+ORDER BY  
+  title;
+SELECT title FROM film WHERE release_year = 1995;
+
+SELECT  
+  title  
+FROM  
+  film  
+WHERE  
+  release_year  = 1995;
+SELECT title, replacement_cost FROM film WHERE language_id = 5 AND length = 70;
+
+SELECT  
+  title, replacement_cost  
+FROM  
+  film  
+WHERE  
+  language_id  = 5  
+  AND  LENGTH  = 70;
+SELECT title FROM film WHERE language_id > 5 AND length > 70;
+
+SELECT  
+  title  
+FROM  
+  film  
+WHERE  
+  language_id  > 5  
+  AND  LENGTH  > 70;
+SELECT * FROM film WHERE length = 100 and title = 'xyz' ORDER BY release_year;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  = 100  
+  AND  title  = 'xyz' 
+ORDER BY  
+  release_year;
+SELECT * FROM film WHERE length > 100 and title = 'xyz' ORDER BY release_year;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+  AND  title  = 'xyz' 
+ORDER BY  
+  release_year;
+SELECT * FROM film WHERE length > 100 ORDER BY release_year;
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  LENGTH  > 100  
+ORDER BY  
+  release_year;
+SELECT * FROM city a INNER JOIN country b ON a.country_id=b.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  INNER JOIN  country  b  ON  a. country_id= b. country_id;
+SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  LEFT JOIN  country  b  ON  a. country_id= b. country_id;
+SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  RIGHT JOIN  country  b  ON  a. country_id= b. country_id;
+SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  LEFT JOIN  country  b  ON  a. country_id= b. country_id  
+WHERE  
+  b. last_update  IS  NULL;
+SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  RIGHT JOIN  country  b  ON  a. country_id= b. country_id  
+WHERE  
+  a. last_update  IS  NULL;
+SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id UNION SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  LEFT JOIN  country  b  ON  a. country_id= b. country_id  
+UNION  
+SELECT  
+  * 
+FROM  
+  city  a  
+  RIGHT JOIN  country  b  ON  a. country_id= b. country_id;
+SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL UNION SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL;
+
+SELECT  
+  * 
+FROM  
+  city  a  
+  RIGHT JOIN  country  b  ON  a. country_id= b. country_id  
+WHERE  
+  a. last_update  IS  NULL  
+UNION  
+SELECT  
+  * 
+FROM  
+  city  a  
+  LEFT JOIN  country  b  ON  a. country_id= b. country_id  
+WHERE  
+  b. last_update  IS  NULL;
+SELECT country_id, last_update FROM city NATURAL JOIN country;
+
+SELECT  
+  country_id, last_update  
+FROM  
+  city  NATURAL  
+  JOIN  country;
+SELECT country_id, last_update FROM city NATURAL LEFT JOIN country;
+
+SELECT  
+  country_id, last_update  
+FROM  
+  city  NATURAL  
+  LEFT JOIN  country;
+SELECT country_id, last_update FROM city NATURAL RIGHT JOIN country;
+
+SELECT  
+  country_id, last_update  
+FROM  
+  city  NATURAL  
+  RIGHT JOIN  country;
+SELECT a.country_id, a.last_update FROM city a STRAIGHT_JOIN country b ON a.country_id=b.country_id;
+
+SELECT  
+  a. country_id, a. last_update  
+FROM  
+  city  a  STRAIGHT_JOIN  country  b  ON  a. country_id= b. country_id;
+SELECT d.deptno,d.dname,d.loc FROM scott.dept d WHERE d.deptno IN  (SELECT e.deptno FROM scott.emp e);
+
+SELECT  
+  d. deptno, d. dname, d. loc  
+FROM  
+  scott. dept  d  
+WHERE  
+  d. deptno  IN  (
+SELECT  
+  e. deptno  
+FROM  
+  scott. emp  e);
+SELECT visitor_id, url FROM (SELECT id FROM log WHERE ip="123.45.67.89" order by tsdesc limit 50, 10) I JOIN log ON (I.id=log.id) JOIN url ON (url.id=log.url_id) order by TS desc;
+
+SELECT  
+  visitor_id, url  
+FROM  
+  (
+SELECT  
+  id  
+FROM  
+  LOG  
+WHERE  
+  ip= "123.45.67.89" 
+ORDER BY  
+  tsdesc  
+LIMIT  
+  50, 10) I  
+  JOIN  LOG  ON  (I. id= LOG. id) 
+  JOIN  url  ON  (url. id= LOG. url_id) 
+ORDER BY  
+  TS  desc;
+DELETE city, country FROM city INNER JOIN country using (country_id) WHERE city.city_id = 1;
+DELETE  city, country  
+FROM  
+  city  
+  INNER JOIN  country  using  (country_id) 
+WHERE  
+  city. city_id  = 1;
+DELETE city FROM city LEFT JOIN country ON city.country_id = country.country_id WHERE country.country IS NULL;
+DELETE  city  
+FROM  
+  city  
+  LEFT JOIN  country  ON  city. country_id  = country. country_id  
+WHERE  
+  country. country  IS  NULL;
+DELETE a1, a2 FROM city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id;
+DELETE  a1, a2  
+FROM  
+  city  AS  a1  
+  INNER JOIN  country  AS  a2  
+WHERE  
+  a1. country_id= a2. country_id;
+DELETE FROM a1, a2 USING city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id;
+
+DELETE FROM  
+  a1, a2  USING  city  AS  a1  
+  INNER JOIN  country  AS  a2  
+WHERE  
+  a1. country_id= a2. country_id;
+DELETE FROM film WHERE length > 100;
+
+DELETE FROM  
+  film  
+WHERE  
+  LENGTH  > 100;
+UPDATE city INNER JOIN country USING(country_id) SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;
+
+UPDATE  
+  city  
+  INNER JOIN  country  USING( country_id) 
+SET  
+  city. city  = 'Abha', 
+  city. last_update  = '2006-02-15 04:45:25', 
+  country. country  = 'Afghanistan' 
+WHERE  
+  city. city_id= 10;
+UPDATE city INNER JOIN country ON city.country_id = country.country_id INNER JOIN address ON city.city_id = address.city_id SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;
+
+UPDATE  
+  city  
+  INNER JOIN  country  ON  city. country_id  = country. country_id  
+  INNER JOIN  address  ON  city. city_id  = address. city_id  
+SET  
+  city. city  = 'Abha', 
+  city. last_update  = '2006-02-15 04:45:25', 
+  country. country  = 'Afghanistan' 
+WHERE  
+  city. city_id= 10;
+UPDATE city, country SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.country_id = country.country_id AND city.city_id=10;
+
+UPDATE  
+  city, country  
+SET  
+  city. city  = 'Abha', 
+  city. last_update  = '2006-02-15 04:45:25', 
+  country. country  = 'Afghanistan' 
+WHERE  
+  city. country_id  = country. country_id  
+  AND  city. city_id= 10;
+UPDATE film SET length = 10 WHERE language_id = 20;
+
+UPDATE  
+  film  
+SET  
+  LENGTH  = 10  
+WHERE  
+  language_id  = 20;
+INSERT INTO city (country_id) SELECT country_id FROM country;
+INSERT  INTO  city  (country_id) 
+SELECT  
+  country_id  
+FROM  
+  country;
+INSERT INTO city (country_id) VALUES (1),(2),(3);
+INSERT  INTO  city  (country_id) 
+VALUES  
+  (1), 
+  (2), 
+  (3);
+INSERT INTO city (country_id) VALUES (10);
+INSERT  INTO  city  (country_id) 
+VALUES  
+  (10);
+INSERT INTO city (country_id) SELECT 10 FROM DUAL;
+INSERT  INTO  city  (country_id) 
+SELECT  
+  10  
+FROM  
+  DUAL;
+REPLACE INTO city (country_id) SELECT country_id FROM country;
+REPLACE  INTO  city  (country_id) 
+SELECT  
+  country_id  
+FROM  
+  country;
+REPLACE INTO city (country_id) VALUES (1),(2),(3);
+REPLACE  INTO  city  (country_id) 
+VALUES  
+  (1), 
+  (2), 
+  (3);
+REPLACE INTO city (country_id) VALUES (10);
+REPLACE  INTO  city  (country_id) 
+VALUES  
+  (10);
+REPLACE INTO city (country_id) SELECT 10 FROM DUAL;
+REPLACE  INTO  city  (country_id) 
+SELECT  
+  10  
+FROM  
+  DUAL;
+SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM  film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film;
+
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  (
+SELECT  
+  film_id  
+FROM  
+  film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film
+) film;
+SELECT * FROM film WHERE language_id = (SELECT language_id FROM language LIMIT 1);
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  language_id  = (
+SELECT  
+  language_id  
+FROM  
+  language  
+LIMIT  
+  1);
+SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  i  
+  LEFT JOIN  country  o  ON  i. city_id= o. country_id  
+UNION  
+SELECT  
+  * 
+FROM  
+  city  i  
+  RIGHT JOIN  country  o  ON  i. city_id= o. country_id;
+SELECT * FROM (SELECT * FROM actor WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE') t WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE' GROUP BY first_name;
+
+SELECT  
+  * 
+FROM  
+  (
+SELECT  
+  * 
+FROM  
+  actor  
+WHERE  
+  last_update= '2006-02-15 04:34:33' 
+  AND  last_name= 'CHASE'
+) t  
+WHERE  
+  last_update= '2006-02-15 04:34:33' 
+  AND  last_name= 'CHASE' 
+GROUP BY  
+  first_name;
+SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id;
+
+SELECT  
+  * 
+FROM  
+  city  i  
+  LEFT JOIN  country  o  ON  i. city_id= o. country_id  
+UNION  
+SELECT  
+  * 
+FROM  
+  city  i  
+  RIGHT JOIN  country  o  ON  i. city_id= o. country_id;
+SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id WHERE o.country_id is null union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id WHERE i.city_id is null;
+
+SELECT  
+  * 
+FROM  
+  city  i  
+  LEFT JOIN  country  o  ON  i. city_id= o. country_id  
+WHERE  
+  o. country_id  is  null  
+UNION  
+SELECT  
+  * 
+FROM  
+  city  i  
+  RIGHT JOIN  country  o  ON  i. city_id= o. country_id  
+WHERE  
+  i. city_id  is  null;
+SELECT first_name,last_name,email FROM customer STRAIGHT_JOIN address ON customer.address_id=address.address_id;
+
+SELECT  
+  first_name, last_name, email  
+FROM  
+  customer  STRAIGHT_JOIN  address  ON  customer. address_id= address. address_id;
+SELECT ID,name FROM (SELECT address FROM customer_list WHERE SID=1 order by phone limit 50,10) a JOIN customer_list l ON (a.address=l.address) JOIN city c ON (c.city=l.city) order by phone desc;
+
+SELECT  
+  ID, name  
+FROM  
+  (
+SELECT  
+  address  
+FROM  
+  customer_list  
+WHERE  
+  SID= 1  
+ORDER BY  
+  phone  
+LIMIT  
+  50, 10) a  
+  JOIN  customer_list  l  ON  (a. address= l. address) 
+  JOIN  city  c  ON  (c. city= l. city) 
+ORDER BY  
+  phone  desc;
+SELECT * FROM film WHERE date(last_update)='2006-02-15';
+
+SELECT  
+  * 
+FROM  
+  film  
+WHERE  
+  DATE( last_update) = '2006-02-15';
+SELECT last_update FROM film GROUP BY date(last_update);
+
+SELECT  
+  last_update  
+FROM  
+  film  
+GROUP BY  
+  DATE( last_update);
+SELECT last_update FROM film order by date(last_update);
+
+SELECT  
+  last_update  
+FROM  
+  film  
+ORDER BY  
+  DATE( last_update);
+SELECT description FROM film WHERE description IN('NEWS','asd') GROUP BY description;
+
+SELECT  
+  description  
+FROM  
+  film  
+WHERE  
+  description  IN( 'NEWS', 
+  'asd'
+) 
+GROUP BY  
+  description;
+alter table address add index idx_city_id(city_id);
+
+ALTER TABLE  
+  address  
+ADD  
+  index  idx_city_id( city_id);
+alter table inventory add index `idx_store_film` (`store_id`,`film_id`);
+
+ALTER TABLE  
+  inventory  
+ADD  
+  index  `idx_store_film` (
+    `store_id`, `film_id`);
+alter table inventory add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`);
+
+ALTER TABLE  
+  inventory  
+ADD  
+  index  `idx_store_film` (
+    `store_id`, `film_id`), 
+      ADD  
+      index  `idx_store_film` (
+        `store_id`, `film_id`), 
+              ADD  
+          index  `idx_store_film` (
+            `store_id`, `film_id`);
diff --git a/ast/tidb.go b/ast/tidb.go
new file mode 100644
index 00000000..d93e23b6
--- /dev/null
+++ b/ast/tidb.go
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"github.com/XiaoMi/soar/common"
+
+	"github.com/kr/pretty"
+	"github.com/pingcap/tidb/ast"
+	"github.com/pingcap/tidb/parser"
+)
+
+// TiParse TiDB 语法解析
+func TiParse(sql, charset, collation string) ([]ast.StmtNode, error) {
+	p := parser.New()
+	return p.Parse(sql, charset, collation)
+}
+
+// PrintPrettyStmtNode 打印TiParse语法树
+func PrintPrettyStmtNode(sql, charset, collation string) {
+	tree, err := TiParse(sql, charset, collation)
+	if err != nil {
+		common.Log.Warning(err.Error())
+	} else {
+		_, err = pretty.Println(tree)
+		common.LogIfWarn(err, "")
+	}
+}
+
+// TiVisitor TODO
+type TiVisitor struct {
+	EnterFunc func(node ast.Node) bool
+	LeaveFunc func(node ast.Node) bool
+}
+
+// Enter TODO
+func (visitor *TiVisitor) Enter(n ast.Node) (node ast.Node, skip bool) {
+	skip = visitor.EnterFunc(n)
+	return
+}
+
+// Leave TODO
+func (visitor *TiVisitor) Leave(n ast.Node) (node ast.Node, ok bool) {
+	ok = visitor.LeaveFunc(n)
+	return
+}
diff --git a/ast/token.go b/ast/token.go
new file mode 100644
index 00000000..43aa7c45
--- /dev/null
+++ b/ast/token.go
@@ -0,0 +1,1009 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode"
+
+	"vitess.io/vitess/go/vt/sqlparser"
+)
+
+// TokenType
+const (
+	TokenTypeWhitespace       = 0
+	TokenTypeWord             = 1
+	TokenTypeQuote            = 2
+	TokenTypeBacktickQuote    = 3
+	TokenTypeReserved         = 4
+	TokenTypeReservedToplevel = 5
+	TokenTypeReservedNewline  = 6
+	TokenTypeBoundary         = 7
+	TokenTypeComment          = 8
+	TokenTypeBlockComment     = 9
+	TokenTypeNumber           = 10
+	TokenTypeError            = 11
+	TokenTypeVariable         = 12
+)
+
+var maxCachekeySize = 15
+var cacheHits int
+var cacheMisses int
+var tokenCache map[string]Token
+
+var tokenBoudaries = []string{",", ";", ":", ")", "(", ".", "=", "<", ">", "+", "-", "*", "/", "!", "^", "%", "|", "&", "#"}
+
+var tokenReserved = []string{
+	"ACCESSIBLE", "ACTION", "AGAINST", "AGGREGATE", "ALGORITHM", "ALL", "ALTER", "ANALYSE", "ANALYZE", "AS", "ASC",
+	"AUTOCOMMIT", "AUTO_INCREMENT", "BACKUP", "BEGIN", "BETWEEN", "BINLOG", "BOTH", "CASCADE", "CASE", "CHANGE", "CHANGED", "CHARACTER SET",
+	"CHARSET", "CHECK", "CHECKSUM", "COLLATE", "COLLATION", "COLUMN", "COLUMNS", "COMMENT", "COMMIT", "COMMITTED", "COMPRESSED", "CONCURRENT",
+	"CONSTRAINT", "CONTAINS", "CONVERT", "CREATE", "CROSS", "CURRENT_TIMESTAMP", "DATABASE", "DATABASES", "DAY", "DAY_HOUR", "DAY_MINUTE",
+	"DAY_SECOND", "DEFAULT", "DEFINER", "DELAYED", "DELETE", "DESC", "DESCRIBE", "DETERMINISTIC", "DISTINCT", "DISTINCTROW", "DIV",
+	"DO", "DUMPFILE", "DUPLICATE", "DYNAMIC", "ELSE", "ENCLOSED", "END", "ENGINE", "ENGINE_TYPE", "ENGINES", "ESCAPE", "ESCAPED", "EVENTS", "EXEC",
+	"EXECUTE", "EXISTS", "EXPLAIN", "EXTENDED", "FAST", "FIELDS", "FILE", "FIRST", "FIXED", "FLUSH", "FOR", "FORCE", "FOREIGN", "FULL", "FULLTEXT",
+	"FUNCTION", "GLOBAL", "GRANT", "GRANTS", "GROUP_CONCAT", "HEAP", "HIGH_PRIORITY", "HOSTS", "HOUR", "HOUR_MINUTE",
+	"HOUR_SECOND", "IDENTIFIED", "IF", "IFNULL", "IGNORE", "IN", "INDEX", "INDEXES", "INFILE", "INSERT", "INSERT_ID", "INSERT_METHOD", "INTERVAL",
+	"INTO", "INVOKER", "IS", "ISOLATION", "KEY", "KEYS", "KILL", "LAST_INSERT_ID", "LEADING", "LEVEL", "LIKE", "LINEAR",
+	"LINES", "LOAD", "LOCAL", "LOCK", "LOCKS", "LOGS", "LOW_PRIORITY", "MARIA", "MASTER", "MASTER_CONNECT_RETRY", "MASTER_HOST", "MASTER_LOG_FILE",
+	"MATCH", "MAX_CONNECTIONS_PER_HOUR", "MAX_QUERIES_PER_HOUR", "MAX_ROWS", "MAX_UPDATES_PER_HOUR", "MAX_USER_CONNECTIONS",
+	"MEDIUM", "MERGE", "MINUTE", "MINUTE_SECOND", "MIN_ROWS", "MODE", "MODIFY",
+	"MONTH", "MRG_MYISAM", "MYISAM", "NAMES", "NATURAL", "NOT", "NOW()", "NULL", "OFFSET", "ON", "OPEN", "OPTIMIZE", "OPTION", "OPTIONALLY",
+	"ON UPDATE", "ON DELETE", "OUTFILE", "PACK_KEYS", "PAGE", "PARTIAL", "PARTITION", "PARTITIONS", "PASSWORD", "PRIMARY", "PRIVILEGES", "PROCEDURE",
+	"PROCESS", "PROCESSLIST", "PURGE", "QUICK", "RANGE", "RAID0", "RAID_CHUNKS", "RAID_CHUNKSIZE", "RAID_TYPE", "READ", "READ_ONLY",
+	"READ_WRITE", "REFERENCES", "REGEXP", "RELOAD", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "REPLICATION", "RESET", "RESTORE", "RESTRICT",
+	"RETURN", "RETURNS", "REVOKE", "RLIKE", "ROLLBACK", "ROW", "ROWS", "ROW_FORMAT", "SECOND", "SECURITY", "SEPARATOR",
+	"SERIALIZABLE", "SESSION", "SHARE", "SHOW", "SHUTDOWN", "SLAVE", "SONAME", "SOUNDS", "SQL", "SQL_AUTO_IS_NULL", "SQL_BIG_RESULT",
+	"SQL_BIG_SELECTS", "SQL_BIG_TABLES", "SQL_BUFFER_RESULT", "SQL_CALC_FOUND_ROWS", "SQL_LOG_BIN", "SQL_LOG_OFF", "SQL_LOG_UPDATE",
+	"SQL_LOW_PRIORITY_UPDATES", "SQL_MAX_JOIN_SIZE", "SQL_QUOTE_SHOW_CREATE", "SQL_SAFE_UPDATES", "SQL_SELECT_LIMIT", "SQL_SLAVE_SKIP_COUNTER",
+	"SQL_SMALL_RESULT", "SQL_WARNINGS", "SQL_CACHE", "SQL_NO_CACHE", "START", "STARTING", "STATUS", "STOP", "STORAGE",
+	"STRAIGHT_JOIN", "STRING", "STRIPED", "SUPER", "TABLE", "TABLES", "TEMPORARY", "TERMINATED", "THEN", "TO", "TRAILING", "TRANSACTIONAL", "TRUE",
+	"TRUNCATE", "TYPE", "TYPES", "UNCOMMITTED", "UNIQUE", "UNLOCK", "UNSIGNED", "USAGE", "USE", "USING", "VARIABLES",
+	"VIEW", "WHEN", "WITH", "WORK", "WRITE", "YEAR_MONTH",
+}
+
+var tokenReservedTopLevel = []string{
+	"SELECT", "FROM", "WHERE", "SET", "ORDER BY", "GROUP BY", "LIMIT", "DROP",
+	"VALUES", "UPDATE", "HAVING", "ADD", "AFTER", "ALTER TABLE", "DELETE FROM", "UNION ALL", "UNION", "EXCEPT", "INTERSECT",
+}
+
+var tokenFunction = []string{
+	"ABS", "ACOS", "ADDDATE", "ADDTIME", "AES_DECRYPT", "AES_ENCRYPT", "AREA", "ASBINARY", "ASCII", "ASIN", "ASTEXT", "ATAN", "ATAN2",
+	"AVG", "BDMPOLYFROMTEXT", "BDMPOLYFROMWKB", "BDPOLYFROMTEXT", "BDPOLYFROMWKB", "BENCHMARK", "BIN", "BIT_AND", "BIT_COUNT", "BIT_LENGTH",
+	"BIT_OR", "BIT_XOR", "BOUNDARY", "BUFFER", "CAST", "CEIL", "CEILING", "CENTROID", "CHAR", "CHARACTER_LENGTH", "CHARSET", "CHAR_LENGTH",
+	"COALESCE", "COERCIBILITY", "COLLATION", "COMPRESS", "CONCAT", "CONCAT_WS", "CONNECTION_ID", "CONTAINS", "CONV", "CONVERT", "CONVERT_TZ",
+	"CONVEXHULL", "COS", "COT", "COUNT", "CRC32", "CROSSES", "CURDATE", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER",
+	"CURTIME", "DATABASE", "DATE", "DATEDIFF", "DATE_ADD", "DATE_DIFF", "DATE_FORMAT", "DATE_SUB", "DAY", "DAYNAME", "DAYOFMONTH", "DAYOFWEEK",
+	"DAYOFYEAR", "DECODE", "DEFAULT", "DEGREES", "DES_DECRYPT", "DES_ENCRYPT", "DIFFERENCE", "DIMENSION", "DISJOINT", "DISTANCE", "ELT", "ENCODE",
+	"ENCRYPT", "ENDPOINT", "ENVELOPE", "EQUALS", "EXP", "EXPORT_SET", "EXTERIORRING", "EXTRACT", "EXTRACTVALUE", "FIELD", "FIND_IN_SET", "FLOOR",
+	"FORMAT", "FOUND_ROWS", "FROM_DAYS", "FROM_UNIXTIME", "GEOMCOLLFROMTEXT", "GEOMCOLLFROMWKB", "GEOMETRYCOLLECTION", "GEOMETRYCOLLECTIONFROMTEXT",
+	"GEOMETRYCOLLECTIONFROMWKB", "GEOMETRYFROMTEXT", "GEOMETRYFROMWKB", "GEOMETRYN", "GEOMETRYTYPE", "GEOMFROMTEXT", "GEOMFROMWKB", "GET_FORMAT",
+	"GET_LOCK", "GLENGTH", "GREATEST", "GROUP_CONCAT", "GROUP_UNIQUE_USERS", "HEX", "HOUR", "IF", "IFNULL", "INET_ATON", "INET_NTOA", "INSERT", "INSTR",
+	"INTERIORRINGN", "INTERSECTION", "INTERSECTS", "INTERVAL", "ISCLOSED", "ISEMPTY", "ISNULL", "ISRING", "ISSIMPLE", "IS_FREE_LOCK", "IS_USED_LOCK",
+	"LAST_DAY", "LAST_INSERT_ID", "LCASE", "LEAST", "LEFT", "LENGTH", "LINEFROMTEXT", "LINEFROMWKB", "LINESTRING", "LINESTRINGFROMTEXT", "LINESTRINGFROMWKB",
+	"LN", "LOAD_FILE", "LOCALTIME", "LOCALTIMESTAMP", "LOCATE", "LOG", "LOG10", "LOG2", "LOWER", "LPAD", "LTRIM", "MAKEDATE", "MAKETIME", "MAKE_SET",
+	"MASTER_POS_WAIT", "MAX", "MBRCONTAINS", "MBRDISJOINT", "MBREQUAL", "MBRINTERSECTS", "MBROVERLAPS", "MBRTOUCHES", "MBRWITHIN", "MD5", "MICROSECOND",
+	"MID", "MIN", "MINUTE", "MLINEFROMTEXT", "MLINEFROMWKB", "MOD", "MONTH", "MONTHNAME", "MPOINTFROMTEXT", "MPOINTFROMWKB", "MPOLYFROMTEXT", "MPOLYFROMWKB",
+	"MULTILINESTRING", "MULTILINESTRINGFROMTEXT", "MULTILINESTRINGFROMWKB", "MULTIPOINT", "MULTIPOINTFROMTEXT", "MULTIPOINTFROMWKB", "MULTIPOLYGON",
+	"MULTIPOLYGONFROMTEXT", "MULTIPOLYGONFROMWKB", "NAME_CONST", "NULLIF", "NUMGEOMETRIES", "NUMINTERIORRINGS", "NUMPOINTS", "OCT", "OCTET_LENGTH",
+	"OLD_PASSWORD", "ORD", "OVERLAPS", "PASSWORD", "PERIOD_ADD", "PERIOD_DIFF", "PI", "POINT", "POINTFROMTEXT", "POINTFROMWKB", "POINTN", "POINTONSURFACE",
+	"POLYFROMTEXT", "POLYFROMWKB", "POLYGON", "POLYGONFROMTEXT", "POLYGONFROMWKB", "POSITION", "POW", "POWER", "QUARTER", "QUOTE", "RADIANS", "RAND",
+	"RELATED", "RELEASE_LOCK", "REPEAT", "REPLACE", "REVERSE", "RIGHT", "ROUND", "ROW_COUNT", "RPAD", "RTRIM", "SCHEMA", "SECOND", "SEC_TO_TIME",
+	"SESSION_USER", "SHA", "SHA1", "SIGN", "SIN", "SLEEP", "SOUNDEX", "SPACE", "SQRT", "SRID", "STARTPOINT", "STD", "STDDEV", "STDDEV_POP", "STDDEV_SAMP",
+	"STRCMP", "STR_TO_DATE", "SUBDATE", "SUBSTR", "SUBSTRING", "SUBSTRING_INDEX", "SUBTIME", "SUM", "SYMDIFFERENCE", "SYSDATE", "SYSTEM_USER", "TAN",
+	"TIME", "TIMEDIFF", "TIMESTAMP", "TIMESTAMPADD", "TIMESTAMPDIFF", "TIME_FORMAT", "TIME_TO_SEC", "TOUCHES", "TO_DAYS", "TRIM", "TRUNCATE", "UCASE",
+	"UNCOMPRESS", "UNCOMPRESSED_LENGTH", "UNHEX", "UNIQUE_USERS", "UNIX_TIMESTAMP", "UPDATEXML", "UPPER", "USER", "UTC_DATE", "UTC_TIME", "UTC_TIMESTAMP",
+	"UUID", "VARIANCE", "VAR_POP", "VAR_SAMP", "VERSION", "WEEK", "WEEKDAY", "WEEKOFYEAR", "WITHIN", "X", "Y", "YEAR", "YEARWEEK",
+}
+
+var tokenReservedNewLine = []string{
+	"LEFT OUTER JOIN", "RIGHT OUTER JOIN", "LEFT JOIN", "RIGHT JOIN", "OUTER JOIN", "INNER JOIN", "JOIN", "XOR", "OR", "AND",
+}
+
+var regBoundariesString string
+var regResrvedToplevelString string
+var regReservedNewlineString string
+var regReservedString string
+var regFunctionString string
+
+func init() {
+	var regs []string
+	for _, reg := range tokenBoudaries {
+		regs = append(regs, regexp.QuoteMeta(reg))
+	}
+	regBoundariesString = "(" + strings.Join(regs, "|") + ")"
+
+	regs = make([]string, 0)
+	for _, reg := range tokenReservedTopLevel {
+		regs = append(regs, regexp.QuoteMeta(reg))
+	}
+	regResrvedToplevelString = "(" + strings.Join(regs, "|") + ")"
+
+	regs = make([]string, 0)
+	for _, reg := range tokenReservedNewLine {
+		regs = append(regs, regexp.QuoteMeta(reg))
+	}
+	regReservedNewlineString = "(" + strings.Join(regs, "|") + ")"
+
+	regs = make([]string, 0)
+	for _, reg := range tokenReserved {
+		regs = append(regs, regexp.QuoteMeta(reg))
+	}
+	regReservedString = "(" + strings.Join(regs, "|") + ")"
+
+	regs = make([]string, 0)
+	for _, reg := range tokenFunction {
+		regs = append(regs, regexp.QuoteMeta(reg))
+	}
+	regFunctionString = "(" + strings.Join(regs, "|") + ")"
+}
+
+// TokenString sqlparser tokens
+var TokenString = map[int]string{
+	sqlparser.LEX_ERROR:               "",
+	sqlparser.UNION:                   "union",
+	sqlparser.SELECT:                  "select",
+	sqlparser.STREAM:                  "stream",
+	sqlparser.INSERT:                  "insert",
+	sqlparser.UPDATE:                  "update",
+	sqlparser.DELETE:                  "delete",
+	sqlparser.FROM:                    "from",
+	sqlparser.WHERE:                   "where",
+	sqlparser.GROUP:                   "group",
+	sqlparser.HAVING:                  "having",
+	sqlparser.ORDER:                   "order",
+	sqlparser.BY:                      "by",
+	sqlparser.LIMIT:                   "limit",
+	sqlparser.OFFSET:                  "offset",
+	sqlparser.FOR:                     "for",
+	sqlparser.ALL:                     "all",
+	sqlparser.DISTINCT:                "distinct",
+	sqlparser.AS:                      "as",
+	sqlparser.EXISTS:                  "exists",
+	sqlparser.ASC:                     "asc",
+	sqlparser.DESC:                    "desc",
+	sqlparser.INTO:                    "into",
+	sqlparser.DUPLICATE:               "duplicate",
+	sqlparser.KEY:                     "key",
+	sqlparser.DEFAULT:                 "default",
+	sqlparser.SET:                     "set",
+	sqlparser.LOCK:                    "lock",
+	sqlparser.KEYS:                    "keys",
+	sqlparser.VALUES:                  "values",
+	sqlparser.LAST_INSERT_ID:          "last_insert_id",
+	sqlparser.NEXT:                    "next",
+	sqlparser.VALUE:                   "value",
+	sqlparser.SHARE:                   "share",
+	sqlparser.MODE:                    "mode",
+	sqlparser.SQL_NO_CACHE:            "sql_no_cache",
+	sqlparser.SQL_CACHE:               "sql_cache",
+	sqlparser.JOIN:                    "join",
+	sqlparser.STRAIGHT_JOIN:           "straight_join",
+	sqlparser.LEFT:                    "left",
+	sqlparser.RIGHT:                   "right",
+	sqlparser.INNER:                   "inner",
+	sqlparser.OUTER:                   "outer",
+	sqlparser.CROSS:                   "cross",
+	sqlparser.NATURAL:                 "natural",
+	sqlparser.USE:                     "use",
+	sqlparser.FORCE:                   "force",
+	sqlparser.ON:                      "on",
+	sqlparser.USING:                   "using",
+	sqlparser.ID:                      "id",
+	sqlparser.HEX:                     "hex",
+	sqlparser.STRING:                  "string",
+	sqlparser.INTEGRAL:                "integral",
+	sqlparser.FLOAT:                   "float",
+	sqlparser.HEXNUM:                  "hexnum",
+	sqlparser.VALUE_ARG:               "?",
+	sqlparser.LIST_ARG:                ":",
+	sqlparser.COMMENT:                 "",
+	sqlparser.COMMENT_KEYWORD:         "comment",
+	sqlparser.BIT_LITERAL:             "bit_literal",
+	sqlparser.NULL:                    "null",
+	sqlparser.TRUE:                    "true",
+	sqlparser.FALSE:                   "false",
+	sqlparser.OR:                      "||",
+	sqlparser.AND:                     "&&",
+	sqlparser.NOT:                     "not",
+	sqlparser.BETWEEN:                 "between",
+	sqlparser.CASE:                    "case",
+	sqlparser.WHEN:                    "when",
+	sqlparser.THEN:                    "then",
+	sqlparser.ELSE:                    "else",
+	sqlparser.END:                     "end",
+	sqlparser.LE:                      "<",
+	sqlparser.GE:                      ">=",
+	sqlparser.NE:                      "<>",
+	sqlparser.NULL_SAFE_EQUAL:         "<=>",
+	sqlparser.IS:                      "is",
+	sqlparser.LIKE:                    "like",
+	sqlparser.REGEXP:                  "regexp",
+	sqlparser.IN:                      "in",
+	sqlparser.SHIFT_LEFT:              "<<",
+	sqlparser.SHIFT_RIGHT:             ">>",
+	sqlparser.DIV:                     "div",
+	sqlparser.MOD:                     "mod",
+	sqlparser.UNARY:                   "unary",
+	sqlparser.COLLATE:                 "collate",
+	sqlparser.BINARY:                  "binary",
+	sqlparser.UNDERSCORE_BINARY:       "_binary",
+	sqlparser.INTERVAL:                "interval",
+	sqlparser.JSON_EXTRACT_OP:         "->>",
+	sqlparser.JSON_UNQUOTE_EXTRACT_OP: "->",
+	sqlparser.CREATE:                  "create",
+	sqlparser.ALTER:                   "alter",
+	sqlparser.DROP:                    "drop",
+	sqlparser.RENAME:                  "rename",
+	sqlparser.ANALYZE:                 "analyze",
+	sqlparser.ADD:                     "add",
+	sqlparser.SCHEMA:                  "schema",
+	sqlparser.TABLE:                   "table",
+	sqlparser.INDEX:                   "index",
+	sqlparser.VIEW:                    "view",
+	sqlparser.TO:                      "to",
+	sqlparser.IGNORE:                  "ignore",
+	sqlparser.IF:                      "if",
+	sqlparser.UNIQUE:                  "unique",
+	sqlparser.PRIMARY:                 "primary",
+	sqlparser.COLUMN:                  "column",
+	sqlparser.CONSTRAINT:              "constraint",
+	sqlparser.SPATIAL:                 "spatial",
+	sqlparser.FULLTEXT:                "fulltext",
+	sqlparser.FOREIGN:                 "foreign",
+	sqlparser.SHOW:                    "show",
+	sqlparser.DESCRIBE:                "describe",
+	sqlparser.EXPLAIN:                 "explain",
+	sqlparser.DATE:                    "date",
+	sqlparser.ESCAPE:                  "escape",
+	sqlparser.REPAIR:                  "repair",
+	sqlparser.OPTIMIZE:                "optimize",
+	sqlparser.TRUNCATE:                "truncate",
+	sqlparser.MAXVALUE:                "maxvalue",
+	sqlparser.PARTITION:               "partition",
+	sqlparser.REORGANIZE:              "reorganize",
+	sqlparser.LESS:                    "less",
+	sqlparser.THAN:                    "than",
+	sqlparser.PROCEDURE:               "procedure",
+	sqlparser.TRIGGER:                 "trigger",
+	sqlparser.VINDEX:                  "vindex",
+	sqlparser.VINDEXES:                "vindexes",
+	sqlparser.STATUS:                  "status",
+	sqlparser.VARIABLES:               "variables",
+	sqlparser.BEGIN:                   "begin",
+	sqlparser.START:                   "start",
+	sqlparser.TRANSACTION:             "transaction",
+	sqlparser.COMMIT:                  "commit",
+	sqlparser.ROLLBACK:                "rollback",
+	sqlparser.BIT:                     "bit",
+	sqlparser.TINYINT:                 "tinyint",
+	sqlparser.SMALLINT:                "smallint",
+	sqlparser.MEDIUMINT:               "mediumint",
+	sqlparser.INT:                     "int",
+	sqlparser.INTEGER:                 "integer",
+	sqlparser.BIGINT:                  "bigint",
+	sqlparser.INTNUM:                  "intnum",
+	sqlparser.REAL:                    "real",
+	sqlparser.DOUBLE:                  "bouble",
+	sqlparser.FLOAT_TYPE:              "float_type",
+	sqlparser.DECIMAL:                 "decimal",
+	sqlparser.NUMERIC:                 "numeric",
+	sqlparser.TIME:                    "time",
+	sqlparser.TIMESTAMP:               "timestamp",
+	sqlparser.DATETIME:                "datetime",
+	sqlparser.YEAR:                    "year",
+	sqlparser.CHAR:                    "char",
+	sqlparser.VARCHAR:                 "varchar",
+	sqlparser.BOOL:                    "bool",
+	sqlparser.CHARACTER:               "character",
+	sqlparser.VARBINARY:               "varbinary",
+	sqlparser.NCHAR:                   "nchar",
+	sqlparser.TEXT:                    "text",
+	sqlparser.TINYTEXT:                "tinytext",
+	sqlparser.MEDIUMTEXT:              "mediumtext",
+	sqlparser.LONGTEXT:                "longtext",
+	sqlparser.BLOB:                    "blob",
+	sqlparser.TINYBLOB:                "tinyblob",
+	sqlparser.MEDIUMBLOB:              "mediumblob",
+	sqlparser.LONGBLOB:                "longblob",
+	sqlparser.JSON:                    "json",
+	sqlparser.ENUM:                    "enum",
+	sqlparser.GEOMETRY:                "geometry",
+	sqlparser.POINT:                   "point",
+	sqlparser.LINESTRING:              "linestring",
+	sqlparser.POLYGON:                 "polygon",
+	sqlparser.GEOMETRYCOLLECTION:      "geometrycollection",
+	sqlparser.MULTIPOINT:              "multipoint",
+	sqlparser.MULTILINESTRING:         "multilinestring",
+	sqlparser.MULTIPOLYGON:            "multipolygon",
+	sqlparser.NULLX:                   "nullx",
+	sqlparser.AUTO_INCREMENT:          "auto_increment",
+	sqlparser.APPROXNUM:               "approxnum",
+	sqlparser.SIGNED:                  "signed",
+	sqlparser.UNSIGNED:                "unsigned",
+	sqlparser.ZEROFILL:                "zerofill",
+	sqlparser.DATABASES:               "databases",
+	sqlparser.TABLES:                  "tables",
+	sqlparser.VITESS_KEYSPACES:        "vitess_keyspaces",
+	sqlparser.VITESS_SHARDS:           "vitess_shards",
+	sqlparser.VITESS_TABLETS:          "vitess_tablets",
+	sqlparser.VSCHEMA_TABLES:          "vschema_tables",
+	sqlparser.NAMES:                   "names",
+	sqlparser.CHARSET:                 "charset",
+	sqlparser.GLOBAL:                  "global",
+	sqlparser.SESSION:                 "session",
+	sqlparser.CURRENT_TIMESTAMP:       "current_timestamp",
+	sqlparser.DATABASE:                "database",
+	sqlparser.CURRENT_DATE:            "current_date",
+	sqlparser.CURRENT_TIME:            "current_time",
+	sqlparser.LOCALTIME:               "localtime",
+	sqlparser.LOCALTIMESTAMP:          "localtimestamp",
+	sqlparser.UTC_DATE:                "utc_date",
+	sqlparser.UTC_TIME:                "utc_time",
+	sqlparser.UTC_TIMESTAMP:           "utc_timestamp",
+	sqlparser.REPLACE:                 "replace",
+	sqlparser.CONVERT:                 "convert",
+	sqlparser.CAST:                    "cast",
+	sqlparser.SUBSTR:                  "substr",
+	sqlparser.SUBSTRING:               "substring",
+	sqlparser.GROUP_CONCAT:            "group_concat",
+	sqlparser.SEPARATOR:               "separator",
+	sqlparser.MATCH:                   "match",
+	sqlparser.AGAINST:                 "against",
+	sqlparser.BOOLEAN:                 "boolean",
+	sqlparser.LANGUAGE:                "language",
+	sqlparser.WITH:                    "with",
+	sqlparser.QUERY:                   "query",
+	sqlparser.EXPANSION:               "expansion",
+	sqlparser.UNUSED:                  "",
+}
+
+// 这个变更从vitess更新过来,如果vitess新开了一个关键字这里也要同步开
+var mySQLKeywords = map[string]string{
+	"add":                "ADD",
+	"against":            "AGAINST",
+	"all":                "ALL",
+	"alter":              "ALTER",
+	"analyze":            "ANALYZE",
+	"and":                "AND",
+	"as":                 "AS",
+	"asc":                "ASC",
+	"auto_increment":     "AUTO_INCREMENT",
+	"begin":              "BEGIN",
+	"between":            "BETWEEN",
+	"bigint":             "BIGINT",
+	"binary":             "BINARY",
+	"_binary":            "UNDERSCORE_BINARY",
+	"bit":                "BIT",
+	"blob":               "BLOB",
+	"bool":               "BOOL",
+	"boolean":            "BOOLEAN",
+	"by":                 "BY",
+	"case":               "CASE",
+	"cast":               "CAST",
+	"char":               "CHAR",
+	"character":          "CHARACTER",
+	"charset":            "CHARSET",
+	"collate":            "COLLATE",
+	"column":             "COLUMN",
+	"comment":            "COMMENT_KEYWORD",
+	"commit":             "COMMIT",
+	"constraint":         "CONSTRAINT",
+	"convert":            "CONVERT",
+	"substr":             "SUBSTR",
+	"substring":          "SUBSTRING",
+	"create":             "CREATE",
+	"cross":              "CROSS",
+	"current_date":       "CURRENT_DATE",
+	"current_time":       "CURRENT_TIME",
+	"current_timestamp":  "CURRENT_TIMESTAMP",
+	"database":           "DATABASE",
+	"databases":          "DATABASES",
+	"date":               "DATE",
+	"datetime":           "DATETIME",
+	"decimal":            "DECIMAL",
+	"default":            "DEFAULT",
+	"delete":             "DELETE",
+	"desc":               "DESC",
+	"describe":           "DESCRIBE",
+	"distinct":           "DISTINCT",
+	"div":                "DIV",
+	"double":             "DOUBLE",
+	"drop":               "DROP",
+	"duplicate":          "DUPLICATE",
+	"else":               "ELSE",
+	"end":                "END",
+	"enum":               "ENUM",
+	"escape":             "ESCAPE",
+	"exists":             "EXISTS",
+	"explain":            "EXPLAIN",
+	"expansion":          "EXPANSION",
+	"false":              "FALSE",
+	"float":              "FLOAT_TYPE",
+	"for":                "FOR",
+	"force":              "FORCE",
+	"foreign":            "FOREIGN",
+	"from":               "FROM",
+	"fulltext":           "FULLTEXT",
+	"geometry":           "GEOMETRY",
+	"geometrycollection": "GEOMETRYCOLLECTION",
+	"global":             "GLOBAL",
+	"group":              "GROUP",
+	"group_concat":       "GROUP_CONCAT",
+	"having":             "HAVING",
+	"if":                 "IF",
+	"ignore":             "IGNORE",
+	"in":                 "IN",
+	"index":              "INDEX",
+	"inner":              "INNER",
+	"insert":             "INSERT",
+	"int":                "INT",
+	"integer":            "INTEGER",
+	"interval":           "INTERVAL",
+	"into":               "INTO",
+	"is":                 "IS",
+	"join":               "JOIN",
+	"json":               "JSON",
+	"key":                "KEY",
+	"keys":               "KEYS",
+	"key_block_size":     "KEY_BLOCK_SIZE",
+	"language":           "LANGUAGE",
+	"last_insert_id":     "LAST_INSERT_ID",
+	"left":               "LEFT",
+	"less":               "LESS",
+	"like":               "LIKE",
+	"limit":              "LIMIT",
+	"linestring":         "LINESTRING",
+	"localtime":          "LOCALTIME",
+	"localtimestamp":     "LOCALTIMESTAMP",
+	"lock":               "LOCK",
+	"longblob":           "LONGBLOB",
+	"longtext":           "LONGTEXT",
+	"match":              "MATCH",
+	"maxvalue":           "MAXVALUE",
+	"mediumblob":         "MEDIUMBLOB",
+	"mediumint":          "MEDIUMINT",
+	"mediumtext":         "MEDIUMTEXT",
+	"mod":                "MOD",
+	"mode":               "MODE",
+	"multilinestring":    "MULTILINESTRING",
+	"multipoint":         "MULTIPOINT",
+	"multipolygon":       "MULTIPOLYGON",
+	"names":              "NAMES",
+	"natural":            "NATURAL",
+	"nchar":              "NCHAR",
+	"next":               "NEXT",
+	"not":                "NOT",
+	"null":               "NULL",
+	"numeric":            "NUMERIC",
+	"offset":             "OFFSET",
+	"on":                 "ON",
+	"optimize":           "OPTIMIZE",
+	"or":                 "OR",
+	"order":              "ORDER",
+	"outer":              "OUTER",
+	"partition":          "PARTITION",
+	"point":              "POINT",
+	"polygon":            "POLYGON",
+	"primary":            "PRIMARY",
+	"procedure":          "PROCEDURE",
+	"query":              "QUERY",
+	"real":               "REAL",
+	"regexp":             "REGEXP",
+	"rename":             "RENAME",
+	"reorganize":         "REORGANIZE",
+	"repair":             "REPAIR",
+	"replace":            "REPLACE",
+	"right":              "RIGHT",
+	"rlike":              "REGEXP",
+	"rollback":           "ROLLBACK",
+	"schema":             "SCHEMA",
+	"select":             "SELECT",
+	"separator":          "SEPARATOR",
+	"session":            "SESSION",
+	"set":                "SET",
+	"share":              "SHARE",
+	"show":               "SHOW",
+	"signed":             "SIGNED",
+	"smallint":           "SMALLINT",
+	"spatial":            "SPATIAL",
+	"sql_cache":          "SQL_CACHE",
+	"sql_no_cache":       "SQL_NO_CACHE",
+	"start":              "START",
+	"status":             "STATUS",
+	"straight_join":      "STRAIGHT_JOIN",
+	"stream":             "STREAM",
+	"table":              "TABLE",
+	"tables":             "TABLES",
+	"text":               "TEXT",
+	"than":               "THAN",
+	"then":               "THEN",
+	"time":               "TIME",
+	"timestamp":          "TIMESTAMP",
+	"tinyblob":           "TINYBLOB",
+	"tinyint":            "TINYINT",
+	"tinytext":           "TINYTEXT",
+	"to":                 "TO",
+	"transaction":        "TRANSACTION",
+	"trigger":            "TRIGGER",
+	"true":               "TRUE",
+	"truncate":           "TRUNCATE",
+	"union":              "UNION",
+	"unique":             "UNIQUE",
+	"unsigned":           "UNSIGNED",
+	"update":             "UPDATE",
+	"use":                "USE",
+	"using":              "USING",
+	"utc_date":           "UTC_DATE",
+	"utc_time":           "UTC_TIME",
+	"utc_timestamp":      "UTC_TIMESTAMP",
+	"values":             "VALUES",
+	"variables":          "VARIABLES",
+	"varbinary":          "VARBINARY",
+	"varchar":            "VARCHAR",
+	"vindex":             "VINDEX",
+	"vindexes":           "VINDEXES",
+	"view":               "VIEW",
+	"vitess_keyspaces":   "VITESS_KEYSPACES",
+	"vitess_shards":      "VITESS_SHARDS",
+	"vitess_tablets":     "VITESS_TABLETS",
+	"vschema_tables":     "VSCHEMA_TABLES",
+	"when":               "WHEN",
+	"where":              "WHERE",
+	"with":               "WITH",
+	"year":               "YEAR",
+	"zerofill":           "ZEROFILL",
+}
+
+// Token 基本定义
+type Token struct {
+	Type int
+	Val  string
+	i    int
+}
+
+// Tokenizer 用于初始化token
+func Tokenizer(sql string) []Token {
+	var tokens []Token
+	tkn := sqlparser.NewStringTokenizer(sql)
+	typ, val := tkn.Scan()
+	for typ != 0 {
+		if val != nil {
+			tokens = append(tokens, Token{Type: typ, Val: string(val)})
+		} else {
+			if typ > 255 {
+				if v, ok := TokenString[typ]; ok {
+					tokens = append(tokens, Token{
+						Type: typ,
+						Val:  v,
+					})
+				} else {
+					tokens = append(tokens, Token{
+						Type: typ,
+						Val:  "",
+					})
+				}
+			} else {
+				tokens = append(tokens, Token{
+					Type: typ,
+					Val:  fmt.Sprintf("%c", typ),
+				})
+			}
+		}
+		typ, val = tkn.Scan()
+	}
+	return tokens
+}
+
+// MysqlEscapeString mysql_real_escape_string
+// https://github.com/liule/golang_escape
+func MysqlEscapeString(source string) (string, error) {
+	var j = 0
+	if len(source) == 0 {
+		return "", errors.New("source is null")
+	}
+	tempStr := source[:]
+	desc := make([]byte, len(tempStr)*2)
+	for i := 0; i < len(tempStr); i++ {
+		flag := false
+		var escape byte
+		switch tempStr[i] {
+		case '\r':
+			flag = true
+			escape = '\r'
+		case '\n':
+			flag = true
+			escape = '\n'
+		case '\\':
+			flag = true
+			escape = '\\'
+		case '\'':
+			flag = true
+			escape = '\''
+		case '"':
+			flag = true
+			escape = '"'
+		case '\032':
+			flag = true
+			escape = 'Z'
+		default:
+		}
+		if flag {
+			desc[j] = '\\'
+			desc[j+1] = escape
+			j = j + 2
+		} else {
+			desc[j] = tempStr[i]
+			j = j + 1
+		}
+	}
+	return string(desc[0:j]), nil
+}
+
+// IsMysqlKeyword 判断是否是关键字
+func IsMysqlKeyword(name string) bool {
+	_, ok := mySQLKeywords[strings.ToLower(strings.TrimSpace(name))]
+	return ok
+}
+
+// getNextToken 从buf中获取token
+func getNextToken(buf string, previous Token) Token {
+	var typ int // TOKEN_TYPE
+
+	// Whitespace
+	whiteSpaceReg := regexp.MustCompile(`^\s+`)
+	if whiteSpaceReg.MatchString(buf) {
+		return Token{
+			Type: TokenTypeWhitespace,
+			Val:  " ",
+		}
+	}
+
+	// Comment (#, --, /**/)
+	if buf[0] == '#' || (len(buf) > 1 && (buf[0] == '-' && buf[1] == '-')) || (buf[0] == '/' && buf[1] == '*') {
+		var last int
+		if buf[0] == '-' || buf[0] == '#' {
+			// Comment until end of line
+			last = strings.Index(buf, "\n")
+			typ = TokenTypeComment
+		} else {
+			// Comment until closing comment tag
+			last = strings.Index(buf[2:], "*/") + 2
+		}
+		if last == 0 {
+			last = len(buf)
+		}
+		return Token{
+			Type: typ,
+			Val:  buf[:last],
+		}
+	}
+
+	// Quoted String
+	if buf[0] == '"' || buf[0] == '\'' || buf[0] == '`' || buf[0] == '[' {
+		var typ int
+		switch buf[0] {
+		case '`', '[':
+			typ = TokenTypeBacktickQuote
+		default:
+			typ = TokenTypeQuote
+		}
+		return Token{
+			Type: typ,
+			Val:  getQuotedString(buf),
+		}
+	}
+
+	// User-defined Variable
+	if (buf[0] == '@' || buf[0] == ':') && len(buf) > 1 {
+		ret := Token{
+			Type: TokenTypeVariable,
+			Val:  "",
+		}
+
+		if buf[1] == '"' || buf[1] == '\'' || buf[1] == '`' {
+			// If the variable name is quoted
+			ret.Val = string(buf[0]) + getQuotedString(buf[1:])
+		} else {
+			// Non-quoted variable name
+			varReg := regexp.MustCompile(`^(` + string(buf[0]) + `[a-zA-Z0-9\._\$]+)`)
+			if varReg.MatchString(buf) {
+				ret.Val = varReg.FindString(buf)
+			}
+		}
+
+		if ret.Val != "" {
+			return ret
+		}
+	}
+
+	// Number(decimal, binary, hex...)
+	numReg := regexp.MustCompile(`^([0-9]+(\.[0-9]+)?|0x[0-9a-fA-F]+|0b[01]+)($|\s|"'` + "`|" + regBoundariesString + ")")
+	if numReg.MatchString(buf) {
+		return Token{
+			Type: TokenTypeNumber,
+			Val:  numReg.FindString(buf),
+		}
+	}
+
+	// Boundary Character(punctuation and symbols)
+	boundaryReg := regexp.MustCompile(`^(` + regBoundariesString + `)`)
+	if boundaryReg.MatchString(buf) {
+		return Token{
+			Type: TokenTypeBoundary,
+			Val:  boundaryReg.FindString(buf),
+		}
+	}
+	sqlUpper := strings.ToUpper(buf)
+	// A reserved word cannot be preceded by a '.'
+	// this makes it so in "mytable.from", "from" is not considered a reserved word
+	if previous.Val != "." {
+		// Top Level Reserved Word
+		reservedToplevelReg := regexp.MustCompile(`^(` + regResrvedToplevelString + `)($|\s|` + regBoundariesString + `)`)
+		if reservedToplevelReg.MatchString(sqlUpper) {
+			return Token{
+				Type: TokenTypeReservedToplevel,
+				Val:  reservedToplevelReg.FindString(sqlUpper),
+			}
+		}
+
+		// Newline Reserved Word
+		reservedNewlineReg := regexp.MustCompile(`^(` + regReservedNewlineString + `)($|\s|` + regBoundariesString + `)`)
+		if reservedNewlineReg.MatchString(sqlUpper) {
+			return Token{
+				Type: TokenTypeReservedNewline,
+				Val:  reservedNewlineReg.FindString(sqlUpper),
+			}
+		}
+
+		// Other Reserved Word
+		reservedReg := regexp.MustCompile(`^(` + regReservedString + `)($|\s|` + regBoundariesString + `)`)
+		if reservedNewlineReg.MatchString(sqlUpper) {
+			return Token{
+				Type: TokenTypeReserved,
+				Val:  reservedReg.FindString(sqlUpper),
+			}
+		}
+
+	}
+
+	// function
+	// A function must be succeeded by '('
+	// this makes it so "count(" is considered a function, but "count" alone is not
+	functionReg := regexp.MustCompile(`^(` + regFunctionString + `)($|\s|` + regBoundariesString + `)`)
+	if functionReg.MatchString(sqlUpper) {
+		return Token{
+			Type: TokenTypeReserved,
+			Val:  functionReg.FindString(sqlUpper),
+		}
+	}
+
+	// Non reserved word
+	noReservedReg := regexp.MustCompile(`(.*?)($|\s|["'` + "`]|" + regBoundariesString + `)`)
+	if noReservedReg.MatchString(buf) {
+		return Token{
+			Type: TokenTypeWord,
+			Val:  noReservedReg.FindString(buf),
+		}
+	}
+	return Token{}
+}
+
+// getQuotedString 获取quote
+func getQuotedString(buf string) string {
+	// This checks for the following patterns:
+	// 1. backtick quoted string using `` to escape
+	// 2. double quoted string using "" or \" to escape
+	// 3. single quoted string using '' or \' to escape
+	start := string(buf[0])
+	switch start {
+	case "\"", "`", "'":
+		reg := fmt.Sprintf(`(^%s[^%s\\]*(?:\\.[^%s\\]*)*(%s|$))+`, start, start, start, start)
+		quotedReg := regexp.MustCompile(reg)
+		if quotedReg.MatchString(buf) {
+			buf = quotedReg.FindString(buf)
+		} else {
+			buf = ""
+		}
+	default:
+		buf = ""
+	}
+	return buf
+}
+
+// Tokenize 序列化token
+func Tokenize(sql string) []Token {
+	var token Token
+	var tokenLength int
+	var tokens []Token
+	tokenCache = make(map[string]Token)
+
+	// Used to make sure the string keeps shrinking on each iteration
+	oldStringLen := len(sql) + 1
+
+	currentLength := len(sql)
+	for currentLength > 0 {
+		// If the string stopped shrinking, there was a problem
+		if oldStringLen <= currentLength {
+			tokens = []Token{
+				{
+					Type: TokenTypeError,
+					Val:  sql,
+				},
+			}
+			return tokens
+		}
+
+		oldStringLen = currentLength
+		cacheKey := ""
+		// Determine if we can use caching
+		if currentLength >= maxCachekeySize {
+			cacheKey = sql[:maxCachekeySize]
+		}
+
+		// See if the token is already cached
+		if _, ok := tokenCache[cacheKey]; ok {
+			// Retrieve from cache
+			token = tokenCache[cacheKey]
+			tokenLength = len(token.Val)
+			cacheHits = cacheHits + 1
+		} else {
+			// Get the next token and the token type
+			token = getNextToken(sql, token)
+			tokenLength = len(token.Val)
+			cacheMisses = cacheMisses + 1
+			// If the token is shorter than the max length, store it in cache
+			if cacheKey != "" && tokenLength < maxCachekeySize {
+				tokenCache[cacheKey] = token
+			}
+		}
+
+		tokens = append(tokens, token)
+
+		// Advance the string
+		sql = sql[tokenLength:]
+		currentLength = currentLength - tokenLength
+	}
+	return tokens
+}
+
+// Compress compress sql
+// this method is inspired by eversql.com
+func Compress(sql string) string {
+	regLineTab := regexp.MustCompile(`(?i)([\n\t])`)
+	regSpace := regexp.MustCompile(`\s\s+`)
+	sql = regSpace.ReplaceAllString(regLineTab.ReplaceAllString(sql, " "), " ")
+	return sql
+}
+
+// SplitStatement SQL切分
+func SplitStatement(buf []byte, delimiter []byte) (string, []byte) {
+	var singleLineComment bool
+	var multiLineComment bool
+	var quoted bool
+	var quoteRune byte
+	var sql string
+
+	for i := 0; i < len(buf); i++ {
+		b := buf[i]
+		// single line comment
+		if b == '-' {
+			if i+2 < len(buf) && buf[i+1] == '-' && buf[i+2] == ' ' {
+				singleLineComment = true
+				i = i + 2
+				continue
+			}
+			if i+2 < len(buf) && i == 0 && buf[i+1] == '-' && (buf[i+2] == '\n' || buf[i+2] == '\r') {
+				sql = "--\n"
+				break
+			}
+		}
+
+		if b == '#' {
+			if !multiLineComment && !quoted && !singleLineComment {
+				singleLineComment = true
+				continue
+			}
+		}
+
+		// new line end single line comment
+		if singleLineComment {
+			if b == '\r' || b == '\n' {
+				sql = string(buf[:i])
+				break
+			}
+		}
+
+		// multi line comment
+		if b == '/' && i+1 < len(buf) && buf[i+1] == '*' {
+			if !multiLineComment && !singleLineComment && !quoted && buf[i+2] != '!' {
+				i = i + 2
+				multiLineComment = true
+				continue
+			}
+		}
+
+		if b == '*' && i+1 < len(buf) && buf[i+1] == '/' {
+			if multiLineComment && !quoted && !singleLineComment {
+				i = i + 2
+				multiLineComment = false
+				continue
+			}
+		}
+
+		// quoted string
+		if b == '`' || b == '\'' || b == '"' {
+			if i > 1 && buf[i-1] != '\\' {
+				if quoted && b == quoteRune {
+					quoted = false
+				} else {
+					quoted = true
+					quoteRune = b
+				}
+			}
+		}
+
+		// delimiter
+		if !quoted && !singleLineComment && !multiLineComment {
+			eof := true
+			for k, c := range delimiter {
+				if len(buf) > i+k && buf[i+k] != c {
+					eof = false
+				}
+			}
+			if eof {
+				i = i + len(delimiter)
+				sql = string(buf[:i])
+				break
+			}
+		}
+
+		// ended of buf
+		if i == len(buf)-1 {
+			sql = string(buf)
+		}
+	}
+	buf = buf[len(sql):]
+	return strings.TrimSuffix(sql, string(delimiter)), buf
+}
+
+// LeftNewLines cal left new lines in space
+func LeftNewLines(buf []byte) int {
+	newLines := 0
+	for _, b := range buf {
+		if !unicode.IsSpace(rune(b)) {
+			break
+		}
+		if b == byte('\n') {
+			newLines++
+		}
+	}
+	return newLines
+}
+
+// NewLines cal all new lines
+func NewLines(buf []byte) int {
+	newLines := 0
+	for _, b := range buf {
+		if b == byte('\n') {
+			newLines++
+		}
+	}
+	return newLines
+}
diff --git a/ast/token_test.go b/ast/token_test.go
new file mode 100644
index 00000000..08ba3ef8
--- /dev/null
+++ b/ast/token_test.go
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ast
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/XiaoMi/soar/common"
+
+	"github.com/kr/pretty"
+)
+
+func TestTokenizer(_ *testing.T) {
+	sqls := []string{
+		"select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1 and t1.c3=t3.c1 where id>1000",
+		"select sourcetable, if(f.lastcontent = ?, f.lastupdate, f.lastcontent) as lastactivity, f.totalcount as activity, type.class as type, (f.nodeoptions & ?) as nounsubscribe from node as f inner join contenttype as type on type.contenttypeid = f.contenttypeid inner join subscribed as sd on sd.did = f.nodeid and sd.userid = ? union all select f.name as title, f.userid as keyval, ? as sourcetable, ifnull(f.lastpost, f.joindate) as lastactivity, f.posts as activity, ? as type, ? as nounsubscribe from user as f inner join userlist as ul on ul.relationid = f.userid and ul.userid = ? where ul.type = ? and ul.aq = ? order by title limit ?",
+		"select c1 from t1 where id>=1000", // test ">="
+		"select SQL_CALC_FOUND_ROWS col from tbl where id>1000",
+		"SELECT * FROM tb WHERE id=?;",
+		"SELECT * FROM tb WHERE id is null;",
+		"SELECT * FROM tb WHERE id is not null;",
+		"SELECT * FROM tb WHERE id between 1 and 3;",
+		"alter table inventory add index idx_store_film` (`store_id`,`film_id`);",
+	}
+	for _, sql := range sqls {
+		pretty.Println(Tokenizer(sql))
+	}
+}
+
+func TestGetQuotedString(t *testing.T) {
+	var str = []string{
+		`"hello world"`,
+		"`hello world`",
+		`'hello world'`,
+		"hello world",
+		`'hello \'world'`,
+		`"hello \"wor\"ld"`,
+		`"hello \"world"`,
+		`""`,
+		`''`,
+		"``",
+		`'hello 'world'`,
+		`"hello "world"`,
+	}
+	for _, s := range str {
+		fmt.Printf("orignal: %s\nquoted: %s\n", s, getQuotedString(s))
+	}
+}
+
+func TestTokenizer2(t *testing.T) {
+	for _, sql := range common.TestSQLs {
+		fmt.Println(sql)
+		fmt.Println(Tokenize(sql))
+	}
+}
+
+func TestCompress(t *testing.T) {
+	for _, sql := range common.TestSQLs {
+		fmt.Println(sql)
+		fmt.Println(Compress(sql))
+	}
+}
+
+func TestFormat(t *testing.T) {
+	for _, sql := range common.TestSQLs {
+		fmt.Println(sql)
+		fmt.Println(format(sql))
+	}
+}
+
+func TestSplitStatement(t *testing.T) {
+	bufs := [][]byte{
+		[]byte("select * from test;hello"),
+		[]byte("select 'asd;fas', col from test;hello"),
+		[]byte("-- select * from test;hello"),
+		[]byte("#select * from test;hello"),
+		[]byte("select * /*comment*/from test;hello"),
+		[]byte("select * /*comment;*/from test;hello"),
+		[]byte(`select * /*comment
+        ;*/
+        from test;hello`),
+		[]byte(`select * from test`),
+	}
+	for _, buf := range bufs {
+		fmt.Println(SplitStatement(buf, []byte(common.Config.Delimiter)))
+	}
+	buf2s := [][]byte{
+		[]byte("select * from test\\Ghello"),
+		[]byte("select 'asd\\Gfas', col from test\\Ghello"),
+		[]byte("-- select * from test\\Ghello"),
+		[]byte("#select * from test\\Ghello"),
+		[]byte("select * /*comment*/from test\\Ghello"),
+		[]byte("select * /*comment;*/from test\\Ghello"),
+		[]byte(`select * /*comment
+        \\G*/
+        from test\\Ghello`),
+	}
+	for _, buf := range buf2s {
+		fmt.Println(SplitStatement(buf, []byte("\\G")))
+	}
+}
+
+func TestLeftNewLines(t *testing.T) {
+	bufs := [][]byte{
+		[]byte(`
+		select * from test;hello`),
+		[]byte(`select * /*comment
+        ;*/
+        from test;hello`),
+		[]byte(`select * from test`),
+	}
+	for _, buf := range bufs {
+		fmt.Println(LeftNewLines(buf))
+	}
+}
+
+func TestNewLines(t *testing.T) {
+	bufs := [][]byte{
+		[]byte(`
+		select * from test;hello`),
+		[]byte(`select * /*comment
+        ;*/
+        from test;hello`),
+		[]byte(`select * from test`),
+	}
+	for _, buf := range bufs {
+		fmt.Println(NewLines(buf))
+	}
+}
diff --git a/common/cases.go b/common/cases.go
new file mode 100644
index 00000000..6775f4c0
--- /dev/null
+++ b/common/cases.go
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+// TestSQLs 测试SQL大集合
+var TestSQLs []string
+
+func init() {
+	// 所有的SQL都要以分号结尾,-list-test-sqls参数会打印这个list,以分号结尾可方便测试
+	// 如:./soar -list-test-sql | ./soar
+	TestSQLs = []string{
+		//  single equality
+		"SELECT * FROM film WHERE length = 86;",    // index(length)
+		"SELECT * FROM film WHERE length IS NULL;", // index(length)
+		"SELECT * FROM film HAVING title = 'abc';", // 无法使用索引
+
+		//single inequality
+		"SELECT * FROM sakila.film WHERE length >= 60;",   // any of <, <=, >=, >; but not <>, !=, IS NOT NULL"
+		"SELECT * FROM sakila.film WHERE length >= '60';", // Implicit Conversion
+		"SELECT * FROM film WHERE length BETWEEN 60 AND 84;",
+		"SELECT * FROM film WHERE title LIKE 'AIR%';", // but not LIKE '%blah'",
+		"SELECT * FROM film WHERE title IS NOT NULL;",
+
+		// multiple equalities
+		"SELECT * FROM film WHERE length = 114 and title = 'ALABAMA DEVIL';", // index(title,length) or index(length,title)",
+
+		// equality and inequality
+		"SELECT * FROM film WHERE length > 100 and title = 'ALABAMA DEVIL';", // index(title, length)",
+
+		// multiple inequality
+		"SELECT * FROM film WHERE length > 100 and language_id < 10 and title = 'xyz';", // index(d, b) or index(d, c) 依赖数据",
+		"SELECT * FROM film WHERE length > 100 and language_id < 10;",                   // index(b) or index(c)",
+
+		// GROUP BY
+		"SELECT release_year, sum(length) FROM film WHERE length = 123 AND language_id = 1 GROUP BY release_year;",  // INDEX(length, language_id, release_year) or INDEX(language_id, length, release_year)",
+		"SELECT release_year, sum(length) FROM film WHERE length >= 123 GROUP BY release_year;",                     // INDEX(length)",
+		"SELECT release_year, language_id, sum(length) FROM film GROUP BY release_year, language_id;",               // INDEX(release_year, language_id) (no WHERE)",
+		"SELECT release_year, sum(length) FROM film WHERE length = 123 GROUP BY release_year,(length+language_id);", // INDEX(length) expression in GROUP BY, so no use including even release_year.",
+		"SELECT release_year, sum(film_id) FROM film GROUP BY release_year;",                                        // INDEX(`release_year`)
+		"SELECT * FROM address GROUP BY address,district;",                                                          // INDEX(address, district)
+		"SELECT title FROM film WHERE ABS(language_id) = 3 GROUP BY title;",                                         // 无法使用索引
+
+		// ORDER BY
+		"SELECT language_id FROM film WHERE length = 123 GROUP BY release_year ORDER BY language_id;",            //  INDEX(length, release_year) should have stopped with Step 2b",
+		"SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year;",          //  INDEX(length, release_year) the release_year will be used for both GROUP BY and ORDER BY",
+		"SELECT * FROM film WHERE length = 123 ORDER BY release_year ASC, language_id DESC;",                     //  INDEX(length) mixture of ASC and DESC.",
+		"SELECT release_year FROM film WHERE length = 123 GROUP BY release_year ORDER BY release_year LIMIT 10;", //  INDEX(length, release_year)",
+		"SELECT * FROM film WHERE length = 123 ORDER BY release_year LIMIT 10;",                                  //  INDEX(length, release_year)",
+		"SELECT * FROM film ORDER BY release_year LIMIT 10;",                                                     //  INDEX(release_year)",
+		"SELECT * FROM film WHERE length > 100 ORDER BY length LIMIT 10;",                                        //  INDEX(length) This "range" is compatible with ORDER BY
+		"SELECT * FROM film WHERE length < 100 ORDER BY length LIMIT 10;",                                        //  INDEX(length) also works
+		"SELECT * FROM customer WHERE address_id in (224,510) ORDER BY last_name;",                               // INDEX(address_id)
+		"SELECT * FROM film WHERE release_year = 2016 AND length != 1 ORDER BY title;",                           // INDEX(`release_year`, `length`, `title`)
+
+		//"Covering" IdxRows
+		"SELECT title FROM film WHERE release_year = 1995;",                               //  INDEX(release_year, title)",
+		"SELECT title, replacement_cost FROM film WHERE language_id = 5 AND length = 70;", //  INDEX(language_id, length, title, replacement_cos film ), title, replacement_cost顺序无关,language_id, length顺序视散粒度情况.
+		"SELECT title FROM film WHERE language_id > 5 AND length > 70;",                   //  INDEX(language_id, length, title) language_id or length first (that's as far as the Algorithm goes), then the other two fields afterwards.
+
+		// equalities and sort
+		"SELECT * FROM film WHERE length = 100 and title = 'xyz' ORDER BY release_year;", // 依赖数据特征,index(length, title, release_year) or index(title, length, release_year)需要评估
+
+		// inequality and sort
+		"SELECT * FROM film WHERE length > 100 and title = 'xyz' ORDER BY release_year;", // 依赖数据特征, index(title, release_year),index(title, length)需要评估
+		"SELECT * FROM film WHERE length > 100 ORDER BY release_year;",                   // 依赖数据特征, index(length),index(release_year)需要评估
+
+		// Join
+		// 内连接 INNER JOIN
+		// 在mysql中,inner join...on , join...on , 逗号...WHERE ,cross join...on是一样的含义。
+		// 但是在标准SQL中,它们并不等价,标准SQL中INNER JOIN与ON共同使用, CROSS JOIN用于其他情况。
+		// 逗号不支持on和using语法, 逗号的优先级要低于INNER JOIN, CROSS JOIN, LEFT JOIN
+		// ON子句的语法格式为:tb1.col1 = tb2.col2列名可以不同,筛选连接后的结果,两表的对应列值相同才在结果集中。
+		// 当模式设计对联接表的列采用了相同的命名样式时,就可以使用 USING 语法来简化 ON 语法
+
+		// join, inner join, cross join等价,优先选择小结果集条件表为驱动表
+		// left [outer] join左表为驱动表
+		// right [outer] join右表为驱动表
+		// 驱动表连接列如果没其他条件可以不考虑加索引,反正是需要foreach
+		// 被驱动表连接列需要加索引。即:left [outer] join的右表连接列需要加索引,right [outer] join的左表连接列需要加索引,inner join结果集较大表的连接列需要加索引
+		// 其他索引添加算法与单表索引优化算法相同
+		// 总结:被驱动表列需要添加索引
+		// 建议:将无索引的表通常作为驱动表
+
+		"SELECT * FROM city a INNER JOIN country b ON a.country_id=b.country_id;",
+
+		// 左外连接 LEFT [OUTER] JOIN
+		"SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id;",
+
+		// 右外连接 RIGHT [OUTER] JOIN
+		"SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id;",
+
+		// 左连接
+		"SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL;",
+
+		// 右连接
+		"SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL;",
+
+		// 全连接 FULL JOIN 因为在mysql中并不支持,所以我们用union实现
+		"SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id " +
+			"UNION " +
+			"SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id;",
+
+		// 两张表中不共同满足的数据集
+		"SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id WHERE a.last_update IS NULL " +
+			"UNION " +
+			"SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id WHERE b.last_update IS NULL;",
+
+		// NATURAL JOIN 默认是同名字段完全匹配的INNER JOIN
+		"SELECT country_id, last_update FROM city NATURAL JOIN country;",
+
+		// NATURAL LEFT JOIN
+		"SELECT country_id, last_update FROM city NATURAL LEFT JOIN country;",
+
+		// NATURAL RIGHT JOIN
+		"SELECT country_id, last_update FROM city NATURAL RIGHT JOIN country;",
+
+		// STRAIGHT_JOIN 实际上与内连接 INNER JOIN 表现完全一致,
+		// 不同的是使用了 STRAIGHT_JOIN后指定表载入的顺序,city先于country载入
+		"SELECT a.country_id, a.last_update FROM city a STRAIGHT_JOIN country b ON a.country_id=b.country_id;",
+
+		// SEMI JOIN
+		// 半连接: 当一张表在另一张表找到匹配的记录之后,半连接(semi-join)返回第一张表中的记录。
+		// 与条件连接相反,即使在右节点中找到几条匹配的记录,左节点的表也只会返回一条记录。
+		// 另外,右节点的表一条记录也不会返回。半连接通常使用IN  或 EXISTS 作为连接条件
+		"SELECT d.deptno,d.dname,d.loc FROM scott.dept d WHERE d.deptno IN  (SELECT e.deptno FROM scott.emp e);",
+
+		// Delayed Join
+		// https://www.percona.com/blog/2007/04/06/using-delayed-join-to-optimize-count-and-limit-queries/
+		`SELECT visitor_id, url FROM (SELECT id FROM log WHERE ip="123.45.67.89" order by tsdesc limit 50, 10) I JOIN log ON (I.id=log.id) JOIN url ON (url.id=log.url_id) order by TS desc;`,
+
+		// DELETE
+		"DELETE city, country FROM city INNER JOIN country using (country_id) WHERE city.city_id = 1;",
+		"DELETE city FROM city LEFT JOIN country ON city.country_id = country.country_id WHERE country.country IS NULL;",
+		"DELETE a1, a2 FROM city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id;",
+		"DELETE FROM a1, a2 USING city AS a1 INNER JOIN country AS a2 WHERE a1.country_id=a2.country_id;",
+		"DELETE FROM film WHERE length > 100;",
+
+		// UPDATE
+		"UPDATE city INNER JOIN country USING(country_id) SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;",
+		"UPDATE city INNER JOIN country ON city.country_id = country.country_id INNER JOIN address ON city.city_id = address.city_id SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.city_id=10;",
+		"UPDATE city, country SET city.city = 'Abha', city.last_update = '2006-02-15 04:45:25', country.country = 'Afghanistan' WHERE city.country_id = country.country_id AND city.city_id=10;",
+		"UPDATE film SET length = 10 WHERE language_id = 20;",
+
+		// INSERT
+		"INSERT INTO city (country_id) SELECT country_id FROM country;",
+		"INSERT INTO city (country_id) VALUES (1),(2),(3);",
+		"INSERT INTO city (country_id) VALUES (10);",
+		"INSERT INTO city (country_id) SELECT 10 FROM DUAL;",
+
+		// REPLACE
+		"REPLACE INTO city (country_id) SELECT country_id FROM country;",
+		"REPLACE INTO city (country_id) VALUES (1),(2),(3);",
+		"REPLACE INTO city (country_id) VALUES (10);",
+		"REPLACE INTO city (country_id) SELECT 10 FROM DUAL;",
+
+		// DEPTH
+		"SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM ( SELECT film_id FROM  film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film ) film;",
+
+		// SUBQUERY
+		"SELECT * FROM film WHERE language_id = (SELECT language_id FROM language LIMIT 1);",
+		//"SELECT COUNT(*) /* no hint */ FROM t2 WHERE NOT EXISTS (SELECT * FROM t3 WHERE ROW(5 * t2.s1, 77) = (SELECT 50, 11 * s1 FROM t4 UNION SELECT 50, 77 FROM (SELECT * FROM t5) AS t5 ) ) ;",
+		"SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id;",
+		"SELECT * FROM (SELECT * FROM actor WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE') t WHERE last_update='2006-02-15 04:34:33' and last_name='CHASE' GROUP BY first_name;",
+		"SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id;",
+		"SELECT * FROM city i left JOIN country o ON i.city_id=o.country_id WHERE o.country_id is null union SELECT * FROM city i right JOIN country o ON i.city_id=o.country_id WHERE i.city_id is null;",
+		"SELECT first_name,last_name,email FROM customer STRAIGHT_JOIN address ON customer.address_id=address.address_id;",
+		"SELECT ID,name FROM (SELECT address FROM customer_list WHERE SID=1 order by phone limit 50,10) a JOIN customer_list l ON (a.address=l.address) JOIN city c ON (c.city=l.city) order by phone desc;",
+
+		// function in conditions
+		"SELECT * FROM film WHERE date(last_update)='2006-02-15';",
+		"SELECT last_update FROM film GROUP BY date(last_update);",
+		"SELECT last_update FROM film order by date(last_update);",
+
+		// CLA.004
+		"SELECT description FROM film WHERE description IN('NEWS','asd') GROUP BY description;",
+
+		// ALTER TABLE ADD INDEX
+		// 已经存在索引的列应该提醒索引已存在
+		"alter table address add index idx_city_id(city_id);",
+		"alter table inventory add index `idx_store_film` (`store_id`,`film_id`);",
+		"alter table inventory add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`),add index `idx_store_film` (`store_id`,`film_id`);",
+	}
+}
diff --git a/common/config.go b/common/config.go
new file mode 100644
index 00000000..212bab4c
--- /dev/null
+++ b/common/config.go
@@ -0,0 +1,822 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"regexp"
+	"strings"
+
+	"gopkg.in/yaml.v2"
+)
+
+// BlackList 黑名单中的SQL不会被评审
+var BlackList []string
+var hasParsed bool
+
+// Configration 配置文件定义结构体
+type Configration struct {
+	// +++++++++++++++测试环境+++++++++++++++++
+	OnlineDSN               *dsn   `yaml:"online-dsn"`                // 线上环境数据库配置
+	TestDSN                 *dsn   `yaml:"test-dsn"`                  // 测试环境数据库配置
+	AllowOnlineAsTest       bool   `yaml:"allow-online-as-test"`      // 允许Online环境也可以当作Test环境
+	DropTestTemporary       bool   `yaml:"drop-test-temporary"`       // 是否清理Test环境产生的临时库表
+	OnlySyntaxCheck         bool   `yaml:"only-syntax-check"`         // 只做语法检查不输出优化建议
+	SamplingStatisticTarget int    `yaml:"sampling-statistic-target"` // 数据采样因子,对应postgres的default_statistics_target
+	Sampling                bool   `yaml:"sampling"`                  // 数据采样开关
+	Profiling               bool   `yaml:"profiling"`                 // 在开启数据采样的情况下,在测试环境执行进行profile
+	Trace                   bool   `yaml:"trace"`                     // 在开启数据采样的情况下,在测试环境执行进行Trace
+	Explain                 bool   `yaml:"explain"`                   // Explain开关
+	ConnTimeOut             int    `yaml:"conn-time-out"`             // 数据库连接超时时间,单位秒
+	QueryTimeOut            int    `yaml:"query-time-out"`            // 数据库SQL执行超时时间,单位秒
+	Delimiter               string `yaml:"delimiter"`                 // SQL分隔符
+
+	// +++++++++++++++日志相关+++++++++++++++++
+	// 日志级别,这里使用了beego的log包
+	// [0:Emergency, 1:Alert, 2:Critical, 3:Error, 4:Warning, 5:Notice, 6:Informational, 7:Debug]
+	LogLevel int `yaml:"log-level"`
+	// 日志输出位置,默认日志输出到控制台
+	// 目前只支持['console', 'file']两种形式,如非console形式这里需要指定文件的路径,可以是相对路径
+	LogOutput string `yaml:"log-output"`
+	// 优化建议输出格式,目前支持: json, text, markdown格式,如指定其他格式会给pretty.Println的输出
+	ReportType string `yaml:"report-type"`
+	// 当ReportType为html格式时使用的css风格,如不指定会提供一个默认风格。CSS可以是本地文件,也可以是一个URL
+	ReportCSS string `yaml:"report-css"`
+	// 当ReportType为html格式时使用的javascript脚本,如不指定默认会加载SQL pretty使用的javascript。像CSS一样可以是本地文件,也可以是一个URL
+	ReportJavascript string `yaml:"report-javascript"`
+	// 当ReportType为html格式时,HTML的title
+	ReportTitle string `yaml:"report-title"`
+	// blackfriday markdown2html config
+	MarkdownExtensions int `yaml:"markdown-extensions"` // markdown转html支持的扩展包, 参考blackfriday
+	MarkdownHTMLFlags  int `yaml:"markdown-html-flags"` // markdown转html支持的flag, 参考blackfriday, default 0
+
+	// ++++++++++++++优化建议相关++++++++++++++
+	IgnoreRules          []string `yaml:"ignore-rules"`              // 忽略的优化建议规则
+	RewriteRules         []string `yaml:"rewrite-rules"`             // 生效的重写规则
+	BlackList            string   `yaml:"blacklist"`                 // blacklist中的SQL不会被评审,可以是指纹,也可以是正则
+	MaxJoinTableCount    int      `yaml:"max-join-table-count"`      // 单条SQL中JOIN表的最大数量
+	MaxGroupByColsCount  int      `yaml:"max-group-by-cols-count"`   // 单条SQL中GroupBy包含列的最大数量
+	MaxDistinctCount     int      `yaml:"max-distinct-count"`        // 单条SQL中Distinct的最大数量
+	MaxIdxColsCount      int      `yaml:"max-index-cols-count"`      // 复合索引中包含列的最大数量
+	MaxTotalRows         int64    `yaml:"max-total-rows"`            // 计算散粒度时,当数据行数大于 MaxTotalRows即开启数据库保护模式,散粒度返回结果可信度下降
+	MaxQueryCost         int64    `yaml:"max-query-cost"`            // last_query_cost 超过该值时将给予警告
+	SpaghettiQueryLength int      `yaml:"spaghetti-query-length"`    // SQL最大长度警告,超过该长度会给警告
+	AllowDropIndex       bool     `yaml:"allow-drop-index"`          // 允许输出删除重复索引的建议
+	MaxInCount           int      `yaml:"max-in-count"`              // IN()最大数量
+	MaxIdxBytesPerColumn int      `yaml:"max-index-bytes-percolumn"` // 索引中单列最大字节数,默认767
+	MaxIdxBytes          int      `yaml:"max-index-bytes"`           // 索引总长度限制,默认3072
+	TableAllowCharsets   []string `yaml:"table-allow-charsets"`      // Table允许使用的DEFAULT CHARSET
+	TableAllowEngines    []string `yaml:"table-allow-engines"`       // Table允许使用的Engine
+	MaxIdxCount          int      `yaml:"max-index-count"`           // 单张表允许最多索引数
+	MaxColCount          int      `yaml:"max-column-count"`          // 单张表允许最大列数
+	IdxPrefix            string   `yaml:"index-prefix"`              // 普通索引建议使用的前缀
+	UkPrefix             string   `yaml:"unique-key-prefix"`         // 唯一键建议使用的前缀
+	MaxSubqueryDepth     int      `yaml:"max-subquery-depth"`        // 子查询最大尝试
+	MaxVarcharLength     int      `yaml:"max-varchar-length"`        // varchar最大长度
+
+	// ++++++++++++++EXPLAIN检查项+++++++++++++
+	ExplainSQLReportType   string   `yaml:"explain-sql-report-type"`  // EXPLAIN markdown格式输出SQL样式,支持sample, fingerprint, pretty
+	ExplainType            string   `yaml:"explain-type"`             // EXPLAIN方式 [traditional, extended, partitions]
+	ExplainFormat          string   `yaml:"explain-format"`           // FORMAT=[json, traditional]
+	ExplainWarnSelectType  []string `yaml:"explain-warn-select-type"` // 哪些select_type不建议使用
+	ExplainWarnAccessType  []string `yaml:"explain-warn-access-type"` // 哪些access type不建议使用
+	ExplainMaxKeyLength    int      `yaml:"explain-max-keys"`         // 最大key_len
+	ExplainMinPossibleKeys int      `yaml:"explain-min-keys"`         // 最小possible_keys警告
+	ExplainMaxRows         int      `yaml:"explain-max-rows"`         // 最大扫描行数警告
+	ExplainWarnExtra       []string `yaml:"explain-warn-extra"`       // 哪些extra信息会给警告
+	ExplainMaxFiltered     float64  `yaml:"explain-max-filtered"`     // filtered大于该配置给出警告
+	ExplainWarnScalability []string `yaml:"explain-warn-scalability"` // 复杂度警告名单
+	ShowWarnings           bool     `yaml:"show-warnings"`            // explain extended with show warnings
+	ShowLastQueryCost      bool     `yaml:"show-last-query-cost"`     // switch with show status like 'last_query_cost'
+	// ++++++++++++++其他配置项+++++++++++++++
+	Query              string `yaml:"query"`                 // 需要进行调优的SQL
+	ListHeuristicRules bool   `yaml:"list-heuristic-rules"`  // 打印支持的评审规则列表
+	ListRewriteRules   bool   `yaml:"list-rewrite-rules"`    // 打印重写规则
+	ListTestSqls       bool   `yaml:"list-test-sqls"`        // 打印测试case用于测试
+	ListReportTypes    bool   `yaml:"list-report-types"`     // 打印支持的报告输出类型
+	Verbose            bool   `yaml:"verbose"`               // verbose模式,会多输出一些信息
+	DryRun             bool   `yaml:"dry-run"`               // 是否在预演环境执行
+	MaxPrettySQLLength int    `yaml:"max-pretty-sql-length"` // 超出该长度的SQL会转换成指纹输出
+}
+
+// Config 默认设置
+var Config = &Configration{
+	OnlineDSN: &dsn{
+		Schema:  "information_schema",
+		Charset: "utf8mb4",
+		Disable: true,
+		Version: 999,
+	},
+	TestDSN: &dsn{
+		Schema:  "information_schema",
+		Charset: "utf8mb4",
+		Disable: true,
+		Version: 999,
+	},
+	AllowOnlineAsTest:       false,
+	DropTestTemporary:       true,
+	DryRun:                  true,
+	OnlySyntaxCheck:         false,
+	SamplingStatisticTarget: 100,
+	Sampling:                false,
+	Profiling:               false,
+	Trace:                   false,
+	Explain:                 true,
+	ConnTimeOut:             3,
+	QueryTimeOut:            30,
+	Delimiter:               ";",
+
+	MaxJoinTableCount:    5,
+	MaxGroupByColsCount:  5,
+	MaxDistinctCount:     5,
+	MaxIdxColsCount:      5,
+	MaxIdxBytesPerColumn: 767,
+	MaxIdxBytes:          3072,
+	MaxTotalRows:         9999999,
+	MaxQueryCost:         9999,
+	SpaghettiQueryLength: 2048,
+	AllowDropIndex:       false,
+	LogLevel:             3,
+	LogOutput:            "/dev/stderr",
+	ReportType:           "markdown",
+	ReportCSS:            "",
+	ReportJavascript:     "",
+	ReportTitle:          "SQL优化分析报告",
+	BlackList:            "",
+	TableAllowCharsets:   []string{"utf8", "utf8mb4"},
+	TableAllowEngines:    []string{"innodb"},
+	MaxIdxCount:          10,
+	MaxColCount:          40,
+	MaxInCount:           10,
+	IdxPrefix:            "idx_",
+	UkPrefix:             "uk_",
+	MaxSubqueryDepth:     5,
+	MaxVarcharLength:     1024,
+
+	MarkdownExtensions: 94,
+	MarkdownHTMLFlags:  0,
+
+	ExplainSQLReportType:   "pretty",
+	ExplainType:            "extended",
+	ExplainFormat:          "traditional",
+	ExplainWarnSelectType:  []string{""},
+	ExplainWarnAccessType:  []string{"ALL"},
+	ExplainMaxKeyLength:    3,
+	ExplainMinPossibleKeys: 0,
+	ExplainMaxRows:         10000,
+	ExplainWarnExtra:       []string{"Using temporary", "Using filesort"},
+	ExplainMaxFiltered:     100.0,
+	ExplainWarnScalability: []string{"O(n)"},
+	ShowWarnings:           false,
+	ShowLastQueryCost:      false,
+
+	IgnoreRules: []string{
+		"COL.011",
+	},
+	RewriteRules: []string{
+		"delimiter",
+		"orderbynull",
+		"groupbyconst",
+		"dmlorderby",
+		"having",
+		"star2columns",
+		"insertcolumns",
+		"distinctstar",
+	},
+
+	ListHeuristicRules: false,
+	ListRewriteRules:   false,
+	ListTestSqls:       false,
+	ListReportTypes:    false,
+	MaxPrettySQLLength: 1024,
+}
+
+type dsn struct {
+	Addr   string `yaml:"addr"`
+	Schema string `yaml:"schema"`
+
+	// 数据库用户名和密码可以通过系统环境变量的形式赋值
+	User     string `yaml:"user"`
+	Password string `yaml:"password"`
+	Charset  string `yaml:"charset"`
+	Disable  bool   `yaml:"disable"`
+
+	Version int `yaml:"-"` // 版本自动检查,不可配置
+}
+
+// 解析命令行DSN输入
+func parseDSN(odbc string, d *dsn) *dsn {
+	var addr, user, password, schema, charset string
+	if d != nil {
+		addr = d.Addr
+		user = d.User
+		password = d.Password
+		schema = d.Schema
+		charset = d.Charset
+	}
+
+	// 设置为空表示禁用环境
+	odbc = strings.TrimSpace(odbc)
+	if odbc == "" {
+		return &dsn{Disable: true}
+	}
+
+	// username:password@ip:port/dbname
+	l1 := strings.Split(odbc, "@")
+	if len(l1) < 2 {
+		if strings.HasPrefix(l1[0], ":") {
+			// ":port/database"
+			l2 := strings.Split(strings.TrimLeft(l1[0], ":"), "/")
+			if l2[0] == "" {
+				addr = strings.Split(addr, ":")[0] + ":3306"
+				if len(l2) > 1 {
+					schema = strings.Split(l2[1], "?")[0]
+				}
+			} else {
+				addr = strings.Split(addr, ":")[0] + ":" + l2[0]
+				if len(l2) > 1 {
+					schema = strings.Split(l2[1], "?")[0]
+				}
+			}
+		} else if strings.HasPrefix(l1[0], "/") {
+			// "/database"
+			l2 := strings.TrimLeft(l1[0], "/")
+			schema = l2
+		} else {
+			// ip:port/dbname
+			l2 := strings.Split(l1[0], "/")
+			if len(l2) == 2 {
+				addr = l2[0]
+				schema = strings.Split(l2[1], "?")[0]
+			} else {
+				addr = l2[0]
+			}
+		}
+	} else {
+		// user:password
+		l2 := strings.Split(l1[0], ":")
+		if len(l2) == 2 {
+			user = l2[0]
+			password = l2[1]
+		} else {
+			user = l2[0]
+		}
+		// ip:port/dbname
+		l3 := strings.Split(l1[1], "/")
+		if len(l3) == 2 {
+			addr = l3[0]
+			schema = strings.Split(l3[1], "?")[0]
+		} else {
+			addr = l3[0]
+		}
+	}
+
+	// 其他flag参数,目前只支持charset :(
+	if len(strings.Split(odbc, "?")) > 1 {
+		flags := strings.Split(strings.Split(odbc, "?")[1], "&")
+		for _, f := range flags {
+			attr := strings.Split(f, "=")
+			if len(attr) > 1 {
+				arg := strings.TrimSpace(attr[0])
+				val := strings.TrimSpace(attr[1])
+				switch arg {
+				case "charset":
+					charset = val
+				default:
+				}
+			}
+		}
+	}
+
+	// 自动补端口
+	if !strings.Contains(addr, ":") {
+		addr = addr + ":3306"
+	} else {
+		if strings.HasSuffix(addr, ":") {
+			addr = addr + "3306"
+		}
+	}
+
+	// 默认走127.0.0.1
+	if strings.HasPrefix(addr, ":") {
+		addr = "127.0.0.1" + addr
+	}
+
+	// 默认用information_schema库
+	if schema == "" {
+		schema = "information_schema"
+	}
+
+	// 默认utf8mb4使用字符集
+	if charset == "" {
+		charset = "utf8mb4"
+	}
+
+	dsn := &dsn{
+		Addr:     addr,
+		User:     user,
+		Password: password,
+		Schema:   schema,
+		Charset:  charset,
+		Disable:  false,
+		Version:  999,
+	}
+	return dsn
+}
+
+// FormatDSN 格式化打印DSN
+func FormatDSN(env *dsn) string {
+	if env.Disable {
+		return ""
+	}
+	// username:password@ip:port/schema?charset=xxx
+	return fmt.Sprintf("%s:%s@%s/%s?charset=%s", env.User, env.Password, env.Addr, env.Schema, env.Charset)
+}
+
+func version() {
+	fmt.Println("Version:", Version)
+	fmt.Println("Branch:", Branch)
+	fmt.Println("Compile:", Compile)
+	fmt.Println("GitDirty:", GitDirty)
+}
+
+// 因为vitess sqlparser使用了glog中也会使用flag,为了不让用户困扰我们单独写一个usage
+func usage() {
+	regPwd := regexp.MustCompile(`:.*@`)
+	vitessHelp := []string{
+		"-alsologtostderr",
+		"log to standard error as well as files",
+		"-log_backtrace_at value",
+		"when logging hits line file:N, emit a stack trace",
+		"-log_dir string",
+		"If non-empty, write log files in this directory",
+		"-logtostderr",
+		"log to standard error instead of files",
+		"-sql-max-length-errors int",
+		"truncate queries in error logs to the given length (default unlimited)",
+		"-sql-max-length-ui int",
+		"truncate queries in debug UIs to the given length (default 512) (default 512)",
+		"-stderrthreshold value",
+		"logs at or above this threshold go to stderr",
+		"-v value",
+		"log level for V logs",
+		"-vmodule value",
+		"comma-separated list of pattern=N settings for file-filtered logging",
+	}
+
+	// io redirect
+	restoreStdout := os.Stdout
+	restoreStderr := os.Stderr
+	stdin, stdout, _ := os.Pipe()
+	os.Stderr = stdout
+	os.Stdout = stdout
+
+	flag.PrintDefaults()
+
+	// copy the output in a separate goroutine so printing can't block indefinitely
+	outC := make(chan string)
+	go func() {
+		var buf bytes.Buffer
+		_, err := io.Copy(&buf, stdin)
+		if err != nil {
+			fmt.Println(err.Error())
+		}
+		outC <- buf.String()
+	}()
+
+	// back to normal state
+	stdout.Close()
+	os.Stdout = restoreStdout // restoring the real stderr
+	os.Stderr = restoreStderr
+
+	fmt.Printf("Usage of %s:\n", os.Args[0])
+	// reading our temp stdout
+	out := <-outC
+	for _, line := range strings.Split(out, "\n") {
+		found := false
+		for _, ignore := range vitessHelp {
+			if strings.TrimSpace(line) == strings.TrimSpace(ignore) {
+				found = true
+			}
+			if regPwd.MatchString(line) && !Config.Verbose {
+				line = regPwd.ReplaceAllString(line, ":********@")
+			}
+		}
+		if !found {
+			fmt.Println(line)
+		}
+	}
+}
+
+// 加载配置文件
+func (conf *Configration) readConfigFile(path string) error {
+	configFile, err := os.Open(path)
+	if err != nil {
+		Log.Warning("readConfigFile(%s) os.Open failed: %v", path, err)
+		return err
+	}
+	defer configFile.Close()
+
+	content, err := ioutil.ReadAll(configFile)
+	if err != nil {
+		Log.Warning("readConfigFile(%s) ioutil.ReadAll failed: %v", path, err)
+		return err
+	}
+
+	err = yaml.Unmarshal(content, Config)
+	if err != nil {
+		Log.Warning("readConfigFile(%s) yaml.Unmarshal failed: %v", path, err)
+		return err
+	}
+	return nil
+}
+
+// 从命令行参数读配置
+func readCmdFlags() error {
+	if hasParsed {
+		Log.Debug("Skip read cmd flags.")
+		return nil
+	}
+
+	config := flag.String("config", "", "Config file path")
+	// +++++++++++++++测试环境+++++++++++++++++
+	onlineDSN := flag.String("online-dsn", FormatDSN(Config.OnlineDSN), "OnlineDSN, 线上环境数据库配置, username:password@ip:port/schema")
+	testDSN := flag.String("test-dsn", FormatDSN(Config.TestDSN), "TestDSN, 测试环境数据库配置, username:password@ip:port/schema")
+	allowOnlineAsTest := flag.Bool("allow-online-as-test", Config.AllowOnlineAsTest, "AllowOnlineAsTest, 允许线上环境也可以当作测试环境")
+	dropTestTemporary := flag.Bool("drop-test-temporary", Config.DropTestTemporary, "DropTestTemporary, 是否清理测试环境产生的临时库表")
+	onlySyntaxCheck := flag.Bool("only-syntax-check", Config.OnlySyntaxCheck, "OnlySyntaxCheck, 只做语法检查不输出优化建议")
+	profiling := flag.Bool("profiling", Config.Profiling, "Profiling, 开启数据采样的情况下在测试环境执行Profile")
+	trace := flag.Bool("trace", Config.Trace, "Trace, 开启数据采样的情况下在测试环境执行Trace")
+	explain := flag.Bool("explain", Config.Explain, "Explain, 是否开启Exaplin执行计划分析")
+	sampling := flag.Bool("sampling", Config.Sampling, "Sampling, 数据采样开关")
+	samplingStatisticTarget := flag.Int("sampling-statistic-target", Config.SamplingStatisticTarget, "SamplingStatisticTarget, 数据采样因子,对应postgres的default_statistics_target")
+	connTimeOut := flag.Int("conn-time-out", Config.ConnTimeOut, "ConnTimeOut, 数据库连接超时时间,单位秒")
+	queryTimeOut := flag.Int("query-time-out", Config.QueryTimeOut, "QueryTimeOut, 数据库SQL执行超时时间,单位秒")
+	delimiter := flag.String("delimiter", Config.Delimiter, "Delimiter, SQL分隔符")
+	// +++++++++++++++日志相关+++++++++++++++++
+	logLevel := flag.Int("log-level", Config.LogLevel, "LogLevel, 日志级别, [0:Emergency, 1:Alert, 2:Critical, 3:Error, 4:Warning, 5:Notice, 6:Informational, 7:Debug]")
+	logOutput := flag.String("log-output", Config.LogOutput, "LogOutput, 日志输出位置")
+	reportType := flag.String("report-type", Config.ReportType, "ReportType, 化建议输出格式,目前支持: json, text, markdown, html等")
+	reportCSS := flag.String("report-css", Config.ReportCSS, "ReportCSS, 当ReportType为html格式时使用的css风格,如不指定会提供一个默认风格。CSS可以是本地文件,也可以是一个URL")
+	reportJavascript := flag.String("report-javascript", Config.ReportJavascript, "ReportJavascript, 当ReportType为html格式时使用的javascript脚本,如不指定默认会加载SQL pretty使用的javascript。像CSS一样可以是本地文件,也可以是一个URL")
+	reportTitle := flag.String("report-title", Config.ReportTitle, "ReportTitle, 当ReportType为html格式时,HTML的title")
+	// +++++++++++++++markdown+++++++++++++++++
+	markdownExtensions := flag.Int("markdown-extensions", Config.MarkdownExtensions, "MarkdownExtensions, markdown转html支持的扩展包, 参考blackfriday")
+	markdownHTMLFlags := flag.Int("markdown-html-flags", Config.MarkdownHTMLFlags, "MarkdownHTMLFlags, markdown转html支持的flag, 参考blackfriday")
+	// ++++++++++++++优化建议相关++++++++++++++
+	ignoreRules := flag.String("ignore-rules", strings.Join(Config.IgnoreRules, ","), "IgnoreRules, 忽略的优化建议规则")
+	rewriteRules := flag.String("rewrite-rules", strings.Join(Config.RewriteRules, ","), "RewriteRules, 生效的重写规则")
+	blackList := flag.String("blacklist", Config.BlackList, "blacklist中的SQL不会被评审,可以是指纹,也可以是正则")
+	maxJoinTableCount := flag.Int("max-join-table-count", Config.MaxJoinTableCount, "MaxJoinTableCount, 单条SQL中JOIN表的最大数量")
+	maxGroupByColsCount := flag.Int("max-group-by-cols-count", Config.MaxGroupByColsCount, "MaxGroupByColsCount, 单条SQL中GroupBy包含列的最大数量")
+	maxDistinctCount := flag.Int("max-distinct-count", Config.MaxDistinctCount, "MaxDistinctCount, 单条SQL中Distinct的最大数量")
+	maxIdxColsCount := flag.Int("max-index-cols-count", Config.MaxIdxColsCount, "MaxIdxColsCount, 复合索引中包含列的最大数量")
+	maxTotalRows := flag.Int64("max-total-rows", Config.MaxTotalRows, "MaxTotalRows, 计算散粒度时,当数据行数大于MaxTotalRows即开启数据库保护模式,不计算散粒度")
+	maxQueryCost := flag.Int64("max-query-cost", Config.MaxQueryCost, "MaxQueryCost, last_query_cost 超过该值时将给予警告")
+	spaghettiQueryLength := flag.Int("spaghetti-query-length", Config.SpaghettiQueryLength, "SpaghettiQueryLength, SQL最大长度警告,超过该长度会给警告")
+	allowDropIdx := flag.Bool("allow-drop-index", Config.AllowDropIndex, "AllowDropIndex, 允许输出删除重复索引的建议")
+	maxInCount := flag.Int("max-in-count", Config.MaxInCount, "MaxInCount, IN()最大数量")
+	maxIdxBytesPerColumn := flag.Int("max-index-bytes-percolumn", Config.MaxIdxBytesPerColumn, "MaxIdxBytesPerColumn, 索引中单列最大字节数")
+	maxIdxBytes := flag.Int("max-index-bytes", Config.MaxIdxBytes, "MaxIdxBytes, 索引总长度限制")
+	tableAllowCharsets := flag.String("table-allow-charsets", strings.ToLower(strings.Join(Config.TableAllowCharsets, ",")), "TableAllowCharsets")
+	tableAllowEngines := flag.String("table-allow-engines", strings.ToLower(strings.Join(Config.TableAllowEngines, ",")), "TableAllowEngines")
+	maxIdxCount := flag.Int("max-index-count", Config.MaxIdxCount, "MaxIdxCount, 单表最大索引个数")
+	maxColCount := flag.Int("max-column-count", Config.MaxColCount, "MaxColCount, 单表允许的最大列数")
+	idxPrefix := flag.String("index-prefix", Config.IdxPrefix, "IdxPrefix")
+	ukPrefix := flag.String("unique-key-prefix", Config.UkPrefix, "UkPrefix")
+	maxSubqueryDepth := flag.Int("max-subquery-depth", Config.MaxSubqueryDepth, "MaxSubqueryDepth")
+	maxVarcharLength := flag.Int("max-varchar-length", Config.MaxVarcharLength, "MaxVarcharLength")
+	// ++++++++++++++EXPLAIN检查项+++++++++++++
+	explainSQLReportType := flag.String("explain-sql-report-type", strings.ToLower(Config.ExplainSQLReportType), "ExplainSQLReportType [pretty, sample, fingerprint]")
+	explainType := flag.String("explain-type", strings.ToLower(Config.ExplainType), "ExplainType [extended, partitions, traditional]")
+	explainFormat := flag.String("explain-format", strings.ToLower(Config.ExplainFormat), "ExplainFormat [json, traditional]")
+	explainWarnSelectType := flag.String("explain-warn-select-type", strings.Join(Config.ExplainWarnSelectType, ","), "ExplainWarnSelectType, 哪些select_type不建议使用")
+	explainWarnAccessType := flag.String("explain-warn-access-type", strings.Join(Config.ExplainWarnAccessType, ","), "ExplainWarnAccessType, 哪些access type不建议使用")
+	explainMaxKeyLength := flag.Int("explain-max-keys", Config.ExplainMaxKeyLength, "ExplainMaxKeyLength, 最大key_len")
+	explainMinPossibleKeys := flag.Int("explain-min-keys", Config.ExplainMinPossibleKeys, "ExplainMinPossibleKeys, 最小possible_keys警告")
+	explainMaxRows := flag.Int("explain-max-rows", Config.ExplainMaxRows, "ExplainMaxRows, 最大扫描行数警告")
+	explainWarnExtra := flag.String("explain-warn-extra", strings.Join(Config.ExplainWarnExtra, ","), "ExplainWarnExtra, 哪些extra信息会给警告")
+	explainMaxFiltered := flag.Float64("explain-max-filtered", Config.ExplainMaxFiltered, "ExplainMaxFiltered, filtered大于该配置给出警告")
+	explainWarnScalability := flag.String("explain-warn-scalability", strings.Join(Config.ExplainWarnScalability, ","), "ExplainWarnScalability, 复杂度警告名单, 支持O(n),O(log n),O(1),O(?)")
+	showWarnings := flag.Bool("show-warnings", Config.ShowWarnings, "ShowWarnings")
+	showLastQueryCost := flag.Bool("show-last-query-cost", Config.ShowLastQueryCost, "ShowLastQueryCost")
+	// +++++++++++++++++其他+++++++++++++++++++
+	printConfig := flag.Bool("print-config", false, "Print configs")
+	ver := flag.Bool("version", false, "Print version info")
+	query := flag.String("query", Config.Query, "Queries for analyzing")
+	listHeuristicRules := flag.Bool("list-heuristic-rules", Config.ListHeuristicRules, "ListHeuristicRules, 打印支持的评审规则列表")
+	listRewriteRules := flag.Bool("list-rewrite-rules", Config.ListRewriteRules, "ListRewriteRules, 打印支持的重写规则列表")
+	listTestSQLs := flag.Bool("list-test-sqls", Config.ListTestSqls, "ListTestSqls, 打印测试case用于测试")
+	listReportTypes := flag.Bool("list-report-types", Config.ListReportTypes, "ListReportTypes, 打印支持的报告输出类型")
+	verbose := flag.Bool("verbose", Config.Verbose, "Verbose")
+	dryrun := flag.Bool("dry-run", Config.DryRun, "是否在预演环境执行")
+	maxPrettySQLLength := flag.Int("max-pretty-sql-length", Config.MaxPrettySQLLength, "MaxPrettySQLLength, 超出该长度的SQL会转换成指纹输出")
+	// 一个不存在log-level,用于更新usage。
+	// 因为vitess里面也用了flag,这些vitess的参数我们不需要关注
+	if !Config.Verbose {
+		flag.Usage = usage
+	}
+	flag.Parse()
+
+	if *config != "" {
+		err := Config.readConfigFile(*config)
+		if err != nil {
+			fmt.Println(err.Error())
+		}
+	}
+
+	Config.OnlineDSN = parseDSN(*onlineDSN, Config.OnlineDSN)
+	Config.TestDSN = parseDSN(*testDSN, Config.OnlineDSN)
+	Config.AllowOnlineAsTest = *allowOnlineAsTest
+	Config.DropTestTemporary = *dropTestTemporary
+	Config.OnlySyntaxCheck = *onlySyntaxCheck
+	Config.Profiling = *profiling
+	Config.Trace = *trace
+	Config.Explain = *explain
+	Config.Sampling = *sampling
+	Config.SamplingStatisticTarget = *samplingStatisticTarget
+	Config.ConnTimeOut = *connTimeOut
+	Config.QueryTimeOut = *queryTimeOut
+
+	Config.LogLevel = *logLevel
+	if strings.HasPrefix(*logOutput, "/") {
+		Config.LogOutput = *logOutput
+	} else {
+		if BaseDir == "" {
+			Config.LogOutput = *logOutput
+		} else {
+			Config.LogOutput = BaseDir + "/" + *logOutput
+		}
+	}
+	Config.ReportType = strings.ToLower(*reportType)
+	Config.ReportCSS = *reportCSS
+	Config.ReportJavascript = *reportJavascript
+	Config.ReportTitle = *reportTitle
+	Config.MarkdownExtensions = *markdownExtensions
+	Config.MarkdownHTMLFlags = *markdownHTMLFlags
+	Config.IgnoreRules = strings.Split(*ignoreRules, ",")
+	Config.RewriteRules = strings.Split(*rewriteRules, ",")
+	*blackList = strings.TrimSpace(*blackList)
+	if strings.HasPrefix(*blackList, "/") || *blackList == "" {
+		Config.BlackList = *blackList
+	} else {
+		Config.BlackList = BaseDir + "/" + *blackList
+	}
+	Config.MaxJoinTableCount = *maxJoinTableCount
+	Config.MaxGroupByColsCount = *maxGroupByColsCount
+	Config.MaxDistinctCount = *maxDistinctCount
+
+	if *maxIdxColsCount < 16 {
+		Config.MaxIdxColsCount = *maxIdxColsCount
+	} else {
+		Config.MaxIdxColsCount = 16
+	}
+
+	Config.MaxIdxBytesPerColumn = *maxIdxBytesPerColumn
+	Config.MaxIdxBytes = *maxIdxBytes
+	Config.TableAllowCharsets = strings.Split(strings.ToLower(*tableAllowCharsets), ",")
+	Config.TableAllowEngines = strings.Split(strings.ToLower(*tableAllowEngines), ",")
+	Config.MaxIdxCount = *maxIdxCount
+	Config.MaxColCount = *maxColCount
+	Config.IdxPrefix = *idxPrefix
+	Config.UkPrefix = *ukPrefix
+	Config.MaxSubqueryDepth = *maxSubqueryDepth
+	Config.MaxTotalRows = *maxTotalRows
+	Config.MaxQueryCost = *maxQueryCost
+	Config.AllowDropIndex = *allowDropIdx
+	Config.MaxInCount = *maxInCount
+	Config.SpaghettiQueryLength = *spaghettiQueryLength
+	Config.Query = *query
+	Config.Delimiter = *delimiter
+
+	Config.ExplainSQLReportType = strings.ToLower(*explainSQLReportType)
+	Config.ExplainType = strings.ToLower(*explainType)
+	Config.ExplainFormat = strings.ToLower(*explainFormat)
+	Config.ExplainWarnSelectType = strings.Split(*explainWarnSelectType, ",")
+	Config.ExplainWarnAccessType = strings.Split(*explainWarnAccessType, ",")
+	Config.ExplainMaxKeyLength = *explainMaxKeyLength
+	Config.ExplainMinPossibleKeys = *explainMinPossibleKeys
+	Config.ExplainMaxRows = *explainMaxRows
+	Config.ExplainWarnExtra = strings.Split(*explainWarnExtra, ",")
+	Config.ExplainMaxFiltered = *explainMaxFiltered
+	Config.ExplainWarnScalability = strings.Split(*explainWarnScalability, ",")
+	Config.ShowWarnings = *showWarnings
+	Config.ShowLastQueryCost = *showLastQueryCost
+	Config.ListHeuristicRules = *listHeuristicRules
+	Config.ListRewriteRules = *listRewriteRules
+	Config.ListTestSqls = *listTestSQLs
+	Config.ListReportTypes = *listReportTypes
+	Config.Verbose = *verbose
+	Config.DryRun = *dryrun
+	Config.MaxPrettySQLLength = *maxPrettySQLLength
+	Config.MaxVarcharLength = *maxVarcharLength
+
+	if *ver {
+		version()
+		os.Exit(0)
+	}
+
+	if *printConfig {
+		// 打印配置的时候密码不显示
+		if !Config.Verbose {
+			Config.OnlineDSN.Password = "********"
+			Config.TestDSN.Password = "********"
+		}
+		data, _ := yaml.Marshal(Config)
+		fmt.Print(string(data))
+		os.Exit(0)
+	}
+
+	hasParsed = true
+	return nil
+}
+
+// ParseConfig 加载配置文件和命令行参数
+func ParseConfig(configFile string) error {
+	var err error
+	var configs []string
+	// 指定了配置文件优先读配置文件,未指定配置文件按如下顺序加载,先找到哪个加载哪个
+	if configFile == "" {
+		configs = []string{
+			"/etc/soar.yaml",
+			BaseDir + "/etc/soar.yaml",
+			BaseDir + "/soar.yaml",
+		}
+	} else {
+		configs = []string{
+			configFile,
+		}
+	}
+
+	for _, config := range configs {
+		if _, err = os.Stat(config); err == nil {
+			err = Config.readConfigFile(config)
+			if err != nil {
+				Log.Error("ParseConfig Config.readConfigFile Error: %v", err)
+			}
+			break
+		}
+	}
+
+	err = readCmdFlags()
+	if err != nil {
+		Log.Error("ParseConfig readCmdFlags Error: %v", err)
+	}
+
+	// parse blacklist & ignore blacklist file parse error
+	if _, e := os.Stat(Config.BlackList); e == nil {
+		var blFd *os.File
+		blFd, err = os.Open(Config.BlackList)
+		if err == nil {
+			bl := bufio.NewReader(blFd)
+			for {
+				rule, e := bl.ReadString('\n')
+				if e != nil {
+					break
+				}
+				rule = strings.TrimSpace(rule)
+				if strings.HasPrefix(rule, "#") || rule == "" {
+					continue
+				}
+				BlackList = append(BlackList, rule)
+			}
+		}
+		defer blFd.Close()
+	}
+	LoggerInit()
+	return err
+}
+
+// ReportType 元数据结构定义
+type ReportType struct {
+	Name        string `json:"Name"`
+	Description string `json:"Description"`
+	Example     string `json:"Example"`
+}
+
+// ReportTypes 命令行-report-type支持的形式
+var ReportTypes = []ReportType{
+	{
+		Name:        "lint",
+		Description: "参考sqlint格式,以插件形式集成到代码编辑器,显示输出更加友好",
+		Example:     `soar -report-type lint -query test.sql`,
+	},
+	{
+		Name:        "markdown",
+		Description: "该格式为默认输出格式,以markdown格式展现,可以用网页浏览器插件直接打开,也可以用markdown编辑器打开",
+		Example:     `echo "select * from film" | soar`,
+	},
+	{
+		Name:        "rewrite",
+		Description: "SQL重写功能,配合-rewrite-rules参数一起使用,可以通过-list-rewrite-rules查看所有支持的SQL重写规则",
+		Example:     `echo "select * from film" | soar -rewrite-rules star2columns,delimiter -report-type rewrite`,
+	},
+	{
+		Name:        "ast",
+		Description: "输出SQL的抽象语法树,主要用于测试",
+		Example:     `echo "select * from film" | soar -report-type ast`,
+	},
+	{
+		Name:        "tiast",
+		Description: "输出SQL的TiDB抽象语法树,主要用于测试",
+		Example:     `echo "select * from film" | soar -report-type tiast`,
+	},
+	{
+		Name:        "fingerprint",
+		Description: "输出SQL的指纹",
+		Example:     `echo "select * from film where language_id=1" | soar -report-type fingerprint`,
+	},
+	{
+		Name:        "md2html",
+		Description: "markdown格式转html格式小工具",
+		Example:     `soar -list-heuristic-rules | soar -report-type md2html > heuristic_rules.html`,
+	},
+	{
+		Name:        "explain-digest",
+		Description: "输入为EXPLAIN的表格,JSON或Vertical格式,对其进行分析,给出分析结果",
+		Example: `soar -report-type explain-digest << EOF
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+| id | select_type | table | type | possible_keys | key  | key_len | ref  | rows | Extra |
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+|  1 | SIMPLE      | film  | ALL  | NULL          | NULL | NULL    | NULL | 1131 |       |
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+EOF`,
+	},
+	{
+		Name:        "duplicate-key-checker",
+		Description: "对OnlineDsn中指定的DB进行索引重复检查",
+		Example:     `soar -report-type duplicate-key-checker -online-dsn user:passwd@127.0.0.1:3306/db`,
+	},
+	{
+		Name:        "html",
+		Description: "以HTML格式输出报表",
+		Example:     `echo "select * from film" | soar -report-type html`,
+	},
+	{
+		Name:        "json",
+		Description: "输出JSON格式报表,方便应用程序处理",
+		Example:     `echo "select * from film" | soar -report-type json`,
+	},
+	{
+		Name:        "tokenize",
+		Description: "对SQL进行切词,主要用于测试",
+		Example:     `echo "select * from film" | soar -report-type tokenize`,
+	},
+	{
+		Name:        "compress",
+		Description: "SQL压缩小工具,使用内置SQL压缩逻辑,测试中的功能",
+		Example: `echo "select
+*
+from
+  film" | soar -report-type compress`,
+	},
+	{
+		Name:        "pretty",
+		Description: "使用kr/pretty打印报告,主要用于测试",
+		Example:     `echo "select * from film" | soar -report-type pretty`,
+	},
+	{
+		Name:        "remove-comment",
+		Description: "去除SQL语句中的注释,支持单行多行注释的去除",
+		Example:     `echo "select/*comment*/ * from film" | soar -report-type remove-comment`,
+	},
+}
+
+// ListReportTypes 查看所有支持的report-type
+func ListReportTypes() {
+	switch Config.ReportType {
+	case "json":
+		js, err := json.MarshalIndent(ReportTypes, "", "  ")
+		if err == nil {
+			fmt.Println(string(js))
+		}
+	default:
+		fmt.Print("# 支持的报告类型\n\n[toc]\n\n")
+		for _, r := range ReportTypes {
+			fmt.Print("## ", MarkdownEscape(r.Name),
+				"\n* **Description**:", r.Description+"\n",
+				"\n* **Example**:\n\n```bash\n", r.Example, "\n```\n")
+		}
+	}
+}
diff --git a/common/config_test.go b/common/config_test.go
new file mode 100644
index 00000000..80dcf7d7
--- /dev/null
+++ b/common/config_test.go
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"flag"
+	"testing"
+
+	"github.com/kr/pretty"
+)
+
+var update = flag.Bool("update", false, "update .golden files")
+
+func TestParseConfig(t *testing.T) {
+	err := ParseConfig("")
+	if err != nil {
+		t.Error("sqlparser.Parse Error:", err)
+	}
+}
+
+func TestReadConfigFile(t *testing.T) {
+	if Config == nil {
+		Config = new(Configration)
+	}
+	Config.readConfigFile("../soar.yaml")
+}
+
+func TestParseDSN(t *testing.T) {
+	var dsns = []string{
+		"",
+		"user:password@hostname:3307/database",
+		"user:password@hostname:3307",
+		"user:password@hostname:/database",
+		"user:password@:3307/database",
+		"user:password@",
+		"hostname:3307/database",
+		"@hostname:3307/database",
+		"@hostname",
+		"hostname",
+		"@/database",
+		"@hostname:3307",
+		"@:3307/database",
+		":3307/database",
+		"/database",
+	}
+
+	GoldenDiff(func() {
+		for _, dsn := range dsns {
+			pretty.Println(parseDSN(dsn, nil))
+		}
+	}, t.Name(), update)
+}
+
+func TestListReportTypes(t *testing.T) {
+	err := GoldenDiff(func() { ListReportTypes() }, t.Name(), update)
+	if nil != err {
+		t.Fatal(err)
+	}
+}
diff --git a/common/doc.go b/common/doc.go
new file mode 100644
index 00000000..95b0973c
--- /dev/null
+++ b/common/doc.go
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package common contain many useful functions for logging, formatting and so on.
+package common
diff --git a/common/example_test.go b/common/example_test.go
new file mode 100644
index 00000000..8baeb710
--- /dev/null
+++ b/common/example_test.go
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import "fmt"
+
+func ExampleFormatDSN() {
+	dsxExp := &dsn{
+		Addr:     "127.0.0.1:3306",
+		Schema:   "mysql",
+		User:     "root",
+		Password: "1t'sB1g3rt",
+		Charset:  "utf8mb4",
+		Disable:  false,
+	}
+
+	// 根据 &dsn 生成 dsnStr
+	fmt.Println(FormatDSN(dsxExp))
+
+	// Output: root:1t'sB1g3rt@127.0.0.1:3306/mysql?charset=utf8mb4
+}
+
+func ExampleIsColsPart() {
+	// IsColsPart() 会 按照顺序 检查两个Column队列是否是包含(或相等)关系。
+	a := []*Column{{Name: "1"}, {Name: "2"}, {Name: "3"}}
+	b := []*Column{{Name: "1"}, {Name: "2"}}
+	c := []*Column{{Name: "1"}, {Name: "3"}}
+	d := []*Column{{Name: "1"}, {Name: "2"}, {Name: "3"}, {Name: "4"}}
+
+	ab := IsColsPart(a, b)
+	ac := IsColsPart(a, c)
+	ad := IsColsPart(a, d)
+
+	fmt.Println(ab, ac, ad)
+	// Output: true false true
+}
+
+func ExampleSortedKey() {
+	ages := map[string]int{
+		"a": 1,
+		"c": 3,
+		"d": 4,
+		"b": 2,
+	}
+	for _, name := range SortedKey(ages) {
+		fmt.Print(ages[name])
+	}
+	// Output: 1234
+}
diff --git a/common/logger.go b/common/logger.go
new file mode 100644
index 00000000..2d71f856
--- /dev/null
+++ b/common/logger.go
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"fmt"
+	"regexp"
+	"runtime"
+	"strings"
+
+	"github.com/astaxie/beego/logs"
+)
+
+// Log 使用beego的log库
+var Log *logs.BeeLogger
+
+// BaseDir 日志打印在binary的根路径
+var BaseDir string
+
+func init() {
+	Log = logs.NewLogger(0)
+	Log.EnableFuncCallDepth(true)
+}
+
+// LoggerInit Log配置初始化
+func LoggerInit() {
+	Log.SetLevel(Config.LogLevel)
+	if Config.LogOutput == "console" {
+		err := Log.SetLogger("console")
+		if err != nil {
+			fmt.Println(err.Error())
+		}
+	} else {
+		err := Log.SetLogger("file", fmt.Sprintf(`{"filename":"%s","level":7,"maxlines":0,"maxsize":0,"daily":false,"maxdays":0}`, Config.LogOutput))
+		if err != nil {
+			fmt.Println(err.Error())
+		}
+	}
+}
+
+// Caller returns the caller of the function that called it :)
+// https://stackoverflow.com/questions/35212985/is-it-possible-get-information-about-caller-function-in-golang
+func Caller() string {
+	// we get the callers as uintptrs - but we just need 1
+	fpcs := make([]uintptr, 1)
+
+	// skip 3 levels to get to the caller of whoever called Caller()
+	n := runtime.Callers(3, fpcs)
+	if n == 0 {
+		return "n/a" // proper error her would be better
+	}
+
+	// get the info of the actual function that's in the pointer
+	fun := runtime.FuncForPC(fpcs[0] - 1)
+	if fun == nil {
+		return "n/a"
+	}
+
+	// return its name
+	return fun.Name()
+}
+
+// GetFunctionName 获取调当前函数名
+func GetFunctionName() string {
+	// Skip this function, and fetch the PC and file for its parent
+	pc, _, _, _ := runtime.Caller(1)
+	// Retrieve a Function object this functions parent
+	functionObject := runtime.FuncForPC(pc)
+	// Regex to extract just the function name (and not the module path)
+	extractFnName := regexp.MustCompile(`^.*\.(.*)$`)
+	fnName := extractFnName.ReplaceAllString(functionObject.Name(), "$1")
+	return fnName
+}
+
+// fileName get filename from path
+func fileName(original string) string {
+	i := strings.LastIndex(original, "/")
+	if i == -1 {
+		return original
+	}
+	return original[i+1:]
+}
+
+// LogIfError 简化if err != nil打Error日志代码长度
+func LogIfError(err error, format string, v ...interface{}) {
+	if err != nil {
+		_, fn, line, _ := runtime.Caller(1)
+		if format == "" {
+			format = "[%s:%d] %s"
+			Log.Error(format, fileName(fn), line, err.Error())
+		} else {
+			format = "[%s:%d] " + format + " Error: %s"
+			Log.Error(format, fileName(fn), line, v, err.Error())
+		}
+	}
+}
+
+// LogIfWarn 简化if err != nil打Warn日志代码长度
+func LogIfWarn(err error, format string, v ...interface{}) {
+	if err != nil {
+		_, fn, line, _ := runtime.Caller(1)
+		if format == "" {
+			format = "[%s:%d] %s"
+			Log.Warn(format, fileName(fn), line, err.Error())
+		} else {
+			format = "[%s:%d] " + format + " Error: %s"
+			Log.Warn(format, fileName(fn), line, v, err.Error())
+		}
+	}
+}
diff --git a/common/logger_test.go b/common/logger_test.go
new file mode 100644
index 00000000..d2e3a122
--- /dev/null
+++ b/common/logger_test.go
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"errors"
+	"testing"
+)
+
+func init() {
+	BaseDir = DevPath
+}
+
+func TestLogger(t *testing.T) {
+	Log.Info("info")
+	Log.Debug("debug")
+	Log.Warning("warning")
+	Log.Error("error")
+}
+
+func TestCaller(t *testing.T) {
+	caller := Caller()
+	if caller != "testing.tRunner" {
+		t.Error("get caller failer")
+	}
+}
+
+func TestGetFunctionName(t *testing.T) {
+	f := GetFunctionName()
+	if f != "TestGetFunctionName" {
+		t.Error("get functionname failer")
+	}
+}
+
+func TestIfError(t *testing.T) {
+	err := errors.New("test")
+	LogIfError(err, "")
+	LogIfError(err, "func %s", "func_test")
+}
+
+func TestIfWarn(t *testing.T) {
+	err := errors.New("test")
+	LogIfWarn(err, "")
+	LogIfWarn(err, "func %s", "func_test")
+}
diff --git a/common/markdown.go b/common/markdown.go
new file mode 100644
index 00000000..dd107f8c
--- /dev/null
+++ b/common/markdown.go
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"encoding/base64"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+
+	"github.com/russross/blackfriday"
+)
+
+// BuiltinCSS 内置HTML风格
+var BuiltinCSS = `
+a:link,a:visited{text-decoration:none}h3,h4{margin-top:2em}h5,h6{margin-top:20px}h3,h4,h5,h6{margin-bottom:.5em;color:#000}body,h1,h2,h3,h4,h5,h6{color:#000}ol,ul{margin:0 0 0 30px;padding:0 0 12px 6px}ol,ol ol{list-style-position:outside}table td p,table th p{margin-bottom:0}input,select{vertical-align:middle;padding:0}h5,h6,input,select{padding:0}hr,table,textarea{width:100%}body{margin:20px auto;width:800px;background-color:#fff;font:13px "Myriad Pro","Lucida Grande",Lucida,Verdana,sans-serif}h1,table th p{font-weight:700}a:link{color:#00f}a:visited{color:#00a}a:active,a:hover{color:#f60;text-decoration:underline}* html code,* html pre{font-size:101%}code,pre{font-size:11px;font-family:monaco,courier,consolas,monospace}pre{border:1px solid #c7cfd5;background:#f1f5f9;margin:20px 0;padding:8px;text-align:left}hr{color:#919699;size:1;noshade:"noshade"}h1,h2,h3,h4,h5,h6{font-family:"Myriad Pro","Lucida Grande",Lucida,Verdana,sans-serif;font-weight:700}h1{margin-top:1em;margin-bottom:25px;font-size:30px}h2{margin-top:2.5em;font-size:24px;padding-bottom:2px;border-bottom:1px solid #919699}h3{font-size:17px}h4{font-size:15px}h5{font-size:13px}h6{font-size:11px}table td,table th{font-size:12px;border-bottom:1px solid #919699;border-right:1px solid #919699}p{margin-top:0;margin-bottom:10px}ul{list-style:square}li{margin-top:7px}ol{list-style-type:decimal}ol ol{list-style-type:lower-alpha;margin:7px 0 0 30px;padding:0 0 0 10px}ul ul{margin-left:40px;padding:0 0 0 6px}li>p{display:inline}li>a+p,li>p+p{display:block}table{border-top:1px solid #919699;border-left:1px solid #919699;border-spacing:0}table th{padding:4px 8px;background:#E2E2E2}table td{padding:8px;vertical-align:top}table td p+p,table td p+p+p{margin-top:5px}form{margin:0}button{margin:3px 0 10px}input{margin:0 0 5px}select{margin:0 0 3px}textarea{margin:0 0 10px}
+`
+
+// BuiltinJavascript 内置SQL美化Javascript脚本
+var BuiltinJavascript = `!function(e,E){"object"==typeof exports&&"object"==typeof module?module.exports=E():"function"==typeof define&&define.amd?define([],E):"object"==typeof exports?exports.sqlFormatter=E():e.sqlFormatter=E()}(this,function(){return function(e){function E(n){if(t[n])return t[n].exports;var r=t[n]={exports:{},id:n,loaded:!1};return e[n].call(r.exports,r,r.exports,E),r.loaded=!0,r.exports}var t={};return E.m=e,E.c=t,E.p="",E(0)}([function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(18),T=n(r),R=t(19),o=n(R),N=t(20),A=n(N),I=t(21),O=n(I);E["default"]={format:function(e,E){switch(E=E||{},E.language){case"db2":return new T["default"](E).format(e);case"n1ql":return new o["default"](E).format(e);case"pl/sql":return new A["default"](E).format(e);case"sql":case void 0:return new O["default"](E).format(e);default:throw Error("Unsupported SQL dialect: "+E.language)}}},e.exports=E["default"]},function(e,E){"use strict";E.__esModule=!0,E["default"]=function(e,E){if(!(e instanceof E))throw new TypeError("Cannot call a class as a function")}},function(e,E,t){var n=t(39),r="object"==typeof self&&self&&self.Object===Object&&self,T=n||r||Function("return this")();e.exports=T},function(e,E,t){function n(e,E){var t=T(e,E);return r(t)?t:void 0}var r=t(33),T=t(41);e.exports=n},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(66),o=n(R),N=t(7),A=n(N),I=t(15),O=n(I),i=t(16),S=n(i),u=t(17),L=n(u),C=function(){function e(E,t){(0,T["default"])(this,e),this.cfg=E||{},this.indentation=new O["default"](this.cfg.indent),this.inlineBlock=new S["default"],this.params=new L["default"](this.cfg.params),this.tokenizer=t,this.previousReservedWord={}}return e.prototype.format=function(e){var E=this.tokenizer.tokenize(e),t=this.getFormattedQueryFromTokens(E);return t.trim()},e.prototype.getFormattedQueryFromTokens=function(e){var E=this,t="";return e.forEach(function(n,r){n.type!==A["default"].WHITESPACE&&(n.type===A["default"].LINE_COMMENT?t=E.formatLineComment(n,t):n.type===A["default"].BLOCK_COMMENT?t=E.formatBlockComment(n,t):n.type===A["default"].RESERVED_TOPLEVEL?(t=E.formatToplevelReservedWord(n,t),E.previousReservedWord=n):n.type===A["default"].RESERVED_NEWLINE?(t=E.formatNewlineReservedWord(n,t),E.previousReservedWord=n):n.type===A["default"].RESERVED?(t=E.formatWithSpaces(n,t),E.previousReservedWord=n):t=n.type===A["default"].OPEN_PAREN?E.formatOpeningParentheses(e,r,t):n.type===A["default"].CLOSE_PAREN?E.formatClosingParentheses(n,t):n.type===A["default"].PLACEHOLDER?E.formatPlaceholder(n,t):","===n.value?E.formatComma(n,t):":"===n.value?E.formatWithSpaceAfter(n,t):"."===n.value||";"===n.value?E.formatWithoutSpaces(n,t):E.formatWithSpaces(n,t))}),t},e.prototype.formatLineComment=function(e,E){return this.addNewline(E+e.value)},e.prototype.formatBlockComment=function(e,E){return this.addNewline(this.addNewline(E)+this.indentComment(e.value))},e.prototype.indentComment=function(e){return e.replace(/\n/g,"\n"+this.indentation.getIndent())},e.prototype.formatToplevelReservedWord=function(e,E){return this.indentation.decreaseTopLevel(),E=this.addNewline(E),this.indentation.increaseToplevel(),E+=this.equalizeWhitespace(e.value),this.addNewline(E)},e.prototype.formatNewlineReservedWord=function(e,E){return this.addNewline(E)+this.equalizeWhitespace(e.value)+" "},e.prototype.equalizeWhitespace=function(e){return e.replace(/\s+/g," ")},e.prototype.formatOpeningParentheses=function(e,E,t){var n=e[E-1];return n&&n.type!==A["default"].WHITESPACE&&n.type!==A["default"].OPEN_PAREN&&(t=(0,o["default"])(t)),t+=e[E].value,this.inlineBlock.beginIfPossible(e,E),this.inlineBlock.isActive()||(this.indentation.increaseBlockLevel(),t=this.addNewline(t)),t},e.prototype.formatClosingParentheses=function(e,E){return this.inlineBlock.isActive()?(this.inlineBlock.end(),this.formatWithSpaceAfter(e,E)):(this.indentation.decreaseBlockLevel(),this.formatWithSpaces(e,this.addNewline(E)))},e.prototype.formatPlaceholder=function(e,E){return E+this.params.get(e)+" "},e.prototype.formatComma=function(e,E){return E=(0,o["default"])(E)+e.value+" ",this.inlineBlock.isActive()?E:/^LIMIT$/i.test(this.previousReservedWord.value)?E:this.addNewline(E)},e.prototype.formatWithSpaceAfter=function(e,E){return(0,o["default"])(E)+e.value+" "},e.prototype.formatWithoutSpaces=function(e,E){return(0,o["default"])(E)+e.value},e.prototype.formatWithSpaces=function(e,E){return E+e.value+" "},e.prototype.addNewline=function(e){return(0,o["default"])(e)+"\n"+this.indentation.getIndent()},e}();E["default"]=C,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(58),o=n(R),N=t(53),A=n(N),I=t(7),O=n(I),i=function(){function e(E){(0,T["default"])(this,e),this.WHITESPACE_REGEX=/^(\s+)/,this.NUMBER_REGEX=/^((-\s*)?[0-9]+(\.[0-9]+)?|0x[0-9a-fA-F]+|0b[01]+)\b/,this.OPERATOR_REGEX=/^(!=|<>|==|<=|>=|!<|!>|\|\||::|->>|->|~~\*|~~|!~~\*|!~~|~\*|!~\*|!~|.)/,this.BLOCK_COMMENT_REGEX=/^(\/\*[^]*?(?:\*\/|$))/,this.LINE_COMMENT_REGEX=this.createLineCommentRegex(E.lineCommentTypes),this.RESERVED_TOPLEVEL_REGEX=this.createReservedWordRegex(E.reservedToplevelWords),this.RESERVED_NEWLINE_REGEX=this.createReservedWordRegex(E.reservedNewlineWords),this.RESERVED_PLAIN_REGEX=this.createReservedWordRegex(E.reservedWords),this.WORD_REGEX=this.createWordRegex(E.specialWordChars),this.STRING_REGEX=this.createStringRegex(E.stringTypes),this.OPEN_PAREN_REGEX=this.createParenRegex(E.openParens),this.CLOSE_PAREN_REGEX=this.createParenRegex(E.closeParens),this.INDEXED_PLACEHOLDER_REGEX=this.createPlaceholderRegex(E.indexedPlaceholderTypes,"[0-9]*"),this.IDENT_NAMED_PLACEHOLDER_REGEX=this.createPlaceholderRegex(E.namedPlaceholderTypes,"[a-zA-Z0-9._$]+"),this.STRING_NAMED_PLACEHOLDER_REGEX=this.createPlaceholderRegex(E.namedPlaceholderTypes,this.createStringPattern(E.stringTypes))}return e.prototype.createLineCommentRegex=function(e){return RegExp("^((?:"+e.map(function(e){return(0,A["default"])(e)}).join("|")+").*?(?:\n|$))")},e.prototype.createReservedWordRegex=function(e){var E=e.join("|").replace(/ /g,"\\s+");return RegExp("^("+E+")\\b","i")},e.prototype.createWordRegex=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];return RegExp("^([\\w"+e.join("")+"]+)")},e.prototype.createStringRegex=function(e){return RegExp("^("+this.createStringPattern(e)+")")},e.prototype.createStringPattern=function(e){var E={"``":"((`[^`]*($|`))+)","[]":"((\\[[^\\]]*($|\\]))(\\][^\\]]*($|\\]))*)",'""':'(("[^"\\\\]*(?:\\\\.[^"\\\\]*)*("|$))+)',"''":"(('[^'\\\\]*(?:\\\\.[^'\\\\]*)*('|$))+)","N''":"((N'[^N'\\\\]*(?:\\\\.[^N'\\\\]*)*('|$))+)"};return e.map(function(e){return E[e]}).join("|")},e.prototype.createParenRegex=function(e){var E=this;return RegExp("^("+e.map(function(e){return E.escapeParen(e)}).join("|")+")","i")},e.prototype.escapeParen=function(e){return 1===e.length?(0,A["default"])(e):"\\b"+e+"\\b"},e.prototype.createPlaceholderRegex=function(e,E){if((0,o["default"])(e))return!1;var t=e.map(A["default"]).join("|");return RegExp("^((?:"+t+")(?:"+E+"))")},e.prototype.tokenize=function(e){for(var E=[],t=void 0;e.length;)t=this.getNextToken(e,t),e=e.substring(t.value.length),E.push(t);return E},e.prototype.getNextToken=function(e,E){return this.getWhitespaceToken(e)||this.getCommentToken(e)||this.getStringToken(e)||this.getOpenParenToken(e)||this.getCloseParenToken(e)||this.getPlaceholderToken(e)||this.getNumberToken(e)||this.getReservedWordToken(e,E)||this.getWordToken(e)||this.getOperatorToken(e)},e.prototype.getWhitespaceToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].WHITESPACE,regex:this.WHITESPACE_REGEX})},e.prototype.getCommentToken=function(e){return this.getLineCommentToken(e)||this.getBlockCommentToken(e)},e.prototype.getLineCommentToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].LINE_COMMENT,regex:this.LINE_COMMENT_REGEX})},e.prototype.getBlockCommentToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].BLOCK_COMMENT,regex:this.BLOCK_COMMENT_REGEX})},e.prototype.getStringToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].STRING,regex:this.STRING_REGEX})},e.prototype.getOpenParenToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].OPEN_PAREN,regex:this.OPEN_PAREN_REGEX})},e.prototype.getCloseParenToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].CLOSE_PAREN,regex:this.CLOSE_PAREN_REGEX})},e.prototype.getPlaceholderToken=function(e){return this.getIdentNamedPlaceholderToken(e)||this.getStringNamedPlaceholderToken(e)||this.getIndexedPlaceholderToken(e)},e.prototype.getIdentNamedPlaceholderToken=function(e){return this.getPlaceholderTokenWithKey({input:e,regex:this.IDENT_NAMED_PLACEHOLDER_REGEX,parseKey:function(e){return e.slice(1)}})},e.prototype.getStringNamedPlaceholderToken=function(e){var E=this;return this.getPlaceholderTokenWithKey({input:e,regex:this.STRING_NAMED_PLACEHOLDER_REGEX,parseKey:function(e){return E.getEscapedPlaceholderKey({key:e.slice(2,-1),quoteChar:e.slice(-1)})}})},e.prototype.getIndexedPlaceholderToken=function(e){return this.getPlaceholderTokenWithKey({input:e,regex:this.INDEXED_PLACEHOLDER_REGEX,parseKey:function(e){return e.slice(1)}})},e.prototype.getPlaceholderTokenWithKey=function(e){var E=e.input,t=e.regex,n=e.parseKey,r=this.getTokenOnFirstMatch({input:E,regex:t,type:O["default"].PLACEHOLDER});return r&&(r.key=n(r.value)),r},e.prototype.getEscapedPlaceholderKey=function(e){var E=e.key,t=e.quoteChar;return E.replace(RegExp((0,A["default"])("\\")+t,"g"),t)},e.prototype.getNumberToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].NUMBER,regex:this.NUMBER_REGEX})},e.prototype.getOperatorToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].OPERATOR,regex:this.OPERATOR_REGEX})},e.prototype.getReservedWordToken=function(e,E){if(!E||!E.value||"."!==E.value)return this.getToplevelReservedToken(e)||this.getNewlineReservedToken(e)||this.getPlainReservedToken(e)},e.prototype.getToplevelReservedToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].RESERVED_TOPLEVEL,regex:this.RESERVED_TOPLEVEL_REGEX})},e.prototype.getNewlineReservedToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].RESERVED_NEWLINE,regex:this.RESERVED_NEWLINE_REGEX})},e.prototype.getPlainReservedToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].RESERVED,regex:this.RESERVED_PLAIN_REGEX})},e.prototype.getWordToken=function(e){return this.getTokenOnFirstMatch({input:e,type:O["default"].WORD,regex:this.WORD_REGEX})},e.prototype.getTokenOnFirstMatch=function(e){var E=e.input,t=e.type,n=e.regex,r=E.match(n);if(r)return{type:t,value:r[1]}},e}();E["default"]=i,e.exports=E["default"]},function(e,E){function t(e){var E=typeof e;return null!=e&&("object"==E||"function"==E)}e.exports=t},function(e,E){"use strict";E.__esModule=!0,E["default"]={WHITESPACE:"whitespace",WORD:"word",STRING:"string",RESERVED:"reserved",RESERVED_TOPLEVEL:"reserved-toplevel",RESERVED_NEWLINE:"reserved-newline",OPERATOR:"operator",OPEN_PAREN:"open-paren",CLOSE_PAREN:"close-paren",LINE_COMMENT:"line-comment",BLOCK_COMMENT:"block-comment",NUMBER:"number",PLACEHOLDER:"placeholder"},e.exports=E["default"]},function(e,E,t){function n(e){return null!=e&&T(e.length)&&!r(e)}var r=t(12),T=t(59);e.exports=n},function(e,E,t){function n(e){return null==e?"":r(e)}var r=t(10);e.exports=n},function(e,E,t){function n(e){if("string"==typeof e)return e;if(T(e))return N?N.call(e):"";var E=e+"";return"0"==E&&1/e==-R?"-0":E}var r=t(26),T=t(14),R=1/0,o=r?r.prototype:void 0,N=o?o.toString:void 0;e.exports=n},function(e,E){function t(e){if(null!=e){try{return r.call(e)}catch(E){}try{return e+""}catch(E){}}return""}var n=Function.prototype,r=n.toString;e.exports=t},function(e,E,t){function n(e){var E=r(e)?N.call(e):"";return E==T||E==R}var r=t(6),T="[object Function]",R="[object GeneratorFunction]",o=Object.prototype,N=o.toString;e.exports=n},function(e,E){function t(e){return null!=e&&"object"==typeof e}e.exports=t},function(e,E,t){function n(e){return"symbol"==typeof e||r(e)&&o.call(e)==T}var r=t(13),T="[object Symbol]",R=Object.prototype,o=R.toString;e.exports=n},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(61),o=n(R),N=t(60),A=n(N),I="top-level",O="block-level",i=function(){function e(E){(0,T["default"])(this,e),this.indent=E||"  ",this.indentTypes=[]}return e.prototype.getIndent=function(){return(0,o["default"])(this.indent,this.indentTypes.length)},e.prototype.increaseToplevel=function(){this.indentTypes.push(I)},e.prototype.increaseBlockLevel=function(){this.indentTypes.push(O)},e.prototype.decreaseTopLevel=function(){(0,A["default"])(this.indentTypes)===I&&this.indentTypes.pop()},e.prototype.decreaseBlockLevel=function(){for(;this.indentTypes.length>0;){var e=this.indentTypes.pop();if(e!==I)break}},e}();E["default"]=i,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(7),o=n(R),N=50,A=function(){function e(){(0,T["default"])(this,e),this.level=0}return e.prototype.beginIfPossible=function(e,E){0===this.level&&this.isInlineBlock(e,E)?this.level=1:this.level>0?this.level++:this.level=0},e.prototype.end=function(){this.level--},e.prototype.isActive=function(){return this.level>0},e.prototype.isInlineBlock=function(e,E){for(var t=0,n=0,r=E;e.length>r;r++){var T=e[r];if(t+=T.value.length,t>N)return!1;if(T.type===o["default"].OPEN_PAREN)n++;else if(T.type===o["default"].CLOSE_PAREN&&(n--,0===n))return!0;if(this.isForbiddenToken(T))return!1}return!1},e.prototype.isForbiddenToken=function(e){var E=e.type,t=e.value;return E===o["default"].RESERVED_TOPLEVEL||E===o["default"].RESERVED_NEWLINE||E===o["default"].COMMENT||E===o["default"].BLOCK_COMMENT||";"===t},e}();E["default"]=A,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=function(){function e(E){(0,T["default"])(this,e),this.params=E,this.index=0}return e.prototype.get=function(e){var E=e.key,t=e.value;return this.params?E?this.params[E]:this.params[this.index++]:t},e}();E["default"]=R,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(4),o=n(R),N=t(5),A=n(N),I=["ABS","ACTIVATE","ALIAS","ALL","ALLOCATE","ALLOW","ALTER","ANY","ARE","ARRAY","AS","ASC","ASENSITIVE","ASSOCIATE","ASUTIME","ASYMMETRIC","AT","ATOMIC","ATTRIBUTES","AUDIT","AUTHORIZATION","AUX","AUXILIARY","AVG","BEFORE","BEGIN","BETWEEN","BIGINT","BINARY","BLOB","BOOLEAN","BOTH","BUFFERPOOL","BY","CACHE","CALL","CALLED","CAPTURE","CARDINALITY","CASCADED","CASE","CAST","CCSID","CEIL","CEILING","CHAR","CHARACTER","CHARACTER_LENGTH","CHAR_LENGTH","CHECK","CLOB","CLONE","CLOSE","CLUSTER","COALESCE","COLLATE","COLLECT","COLLECTION","COLLID","COLUMN","COMMENT","COMMIT","CONCAT","CONDITION","CONNECT","CONNECTION","CONSTRAINT","CONTAINS","CONTINUE","CONVERT","CORR","CORRESPONDING","COUNT","COUNT_BIG","COVAR_POP","COVAR_SAMP","CREATE","CROSS","CUBE","CUME_DIST","CURRENT","CURRENT_DATE","CURRENT_DEFAULT_TRANSFORM_GROUP","CURRENT_LC_CTYPE","CURRENT_PATH","CURRENT_ROLE","CURRENT_SCHEMA","CURRENT_SERVER","CURRENT_TIME","CURRENT_TIMESTAMP","CURRENT_TIMEZONE","CURRENT_TRANSFORM_GROUP_FOR_TYPE","CURRENT_USER","CURSOR","CYCLE","DATA","DATABASE","DATAPARTITIONNAME","DATAPARTITIONNUM","DATE","DAY","DAYS","DB2GENERAL","DB2GENRL","DB2SQL","DBINFO","DBPARTITIONNAME","DBPARTITIONNUM","DEALLOCATE","DEC","DECIMAL","DECLARE","DEFAULT","DEFAULTS","DEFINITION","DELETE","DENSERANK","DENSE_RANK","DEREF","DESCRIBE","DESCRIPTOR","DETERMINISTIC","DIAGNOSTICS","DISABLE","DISALLOW","DISCONNECT","DISTINCT","DO","DOCUMENT","DOUBLE","DROP","DSSIZE","DYNAMIC","EACH","EDITPROC","ELEMENT","ELSE","ELSEIF","ENABLE","ENCODING","ENCRYPTION","END","END-EXEC","ENDING","ERASE","ESCAPE","EVERY","EXCEPTION","EXCLUDING","EXCLUSIVE","EXEC","EXECUTE","EXISTS","EXIT","EXP","EXPLAIN","EXTENDED","EXTERNAL","EXTRACT","FALSE","FENCED","FETCH","FIELDPROC","FILE","FILTER","FINAL","FIRST","FLOAT","FLOOR","FOR","FOREIGN","FREE","FULL","FUNCTION","FUSION","GENERAL","GENERATED","GET","GLOBAL","GOTO","GRANT","GRAPHIC","GROUP","GROUPING","HANDLER","HASH","HASHED_VALUE","HINT","HOLD","HOUR","HOURS","IDENTITY","IF","IMMEDIATE","IN","INCLUDING","INCLUSIVE","INCREMENT","INDEX","INDICATOR","INDICATORS","INF","INFINITY","INHERIT","INNER","INOUT","INSENSITIVE","INSERT","INT","INTEGER","INTEGRITY","INTERSECTION","INTERVAL","INTO","IS","ISOBID","ISOLATION","ITERATE","JAR","JAVA","KEEP","KEY","LABEL","LANGUAGE","LARGE","LATERAL","LC_CTYPE","LEADING","LEAVE","LEFT","LIKE","LINKTYPE","LN","LOCAL","LOCALDATE","LOCALE","LOCALTIME","LOCALTIMESTAMP","LOCATOR","LOCATORS","LOCK","LOCKMAX","LOCKSIZE","LONG","LOOP","LOWER","MAINTAINED","MATCH","MATERIALIZED","MAX","MAXVALUE","MEMBER","MERGE","METHOD","MICROSECOND","MICROSECONDS","MIN","MINUTE","MINUTES","MINVALUE","MOD","MODE","MODIFIES","MODULE","MONTH","MONTHS","MULTISET","NAN","NATIONAL","NATURAL","NCHAR","NCLOB","NEW","NEW_TABLE","NEXTVAL","NO","NOCACHE","NOCYCLE","NODENAME","NODENUMBER","NOMAXVALUE","NOMINVALUE","NONE","NOORDER","NORMALIZE","NORMALIZED","NOT","NULL","NULLIF","NULLS","NUMERIC","NUMPARTS","OBID","OCTET_LENGTH","OF","OFFSET","OLD","OLD_TABLE","ON","ONLY","OPEN","OPTIMIZATION","OPTIMIZE","OPTION","ORDER","OUT","OUTER","OVER","OVERLAPS","OVERLAY","OVERRIDING","PACKAGE","PADDED","PAGESIZE","PARAMETER","PART","PARTITION","PARTITIONED","PARTITIONING","PARTITIONS","PASSWORD","PATH","PERCENTILE_CONT","PERCENTILE_DISC","PERCENT_RANK","PIECESIZE","PLAN","POSITION","POWER","PRECISION","PREPARE","PREVVAL","PRIMARY","PRIQTY","PRIVILEGES","PROCEDURE","PROGRAM","PSID","PUBLIC","QUERY","QUERYNO","RANGE","RANK","READ","READS","REAL","RECOVERY","RECURSIVE","REF","REFERENCES","REFERENCING","REFRESH","REGR_AVGX","REGR_AVGY","REGR_COUNT","REGR_INTERCEPT","REGR_R2","REGR_SLOPE","REGR_SXX","REGR_SXY","REGR_SYY","RELEASE","RENAME","REPEAT","RESET","RESIGNAL","RESTART","RESTRICT","RESULT","RESULT_SET_LOCATOR","RETURN","RETURNS","REVOKE","RIGHT","ROLE","ROLLBACK","ROLLUP","ROUND_CEILING","ROUND_DOWN","ROUND_FLOOR","ROUND_HALF_DOWN","ROUND_HALF_EVEN","ROUND_HALF_UP","ROUND_UP","ROUTINE","ROW","ROWNUMBER","ROWS","ROWSET","ROW_NUMBER","RRN","RUN","SAVEPOINT","SCHEMA","SCOPE","SCRATCHPAD","SCROLL","SEARCH","SECOND","SECONDS","SECQTY","SECURITY","SENSITIVE","SEQUENCE","SESSION","SESSION_USER","SIGNAL","SIMILAR","SIMPLE","SMALLINT","SNAN","SOME","SOURCE","SPECIFIC","SPECIFICTYPE","SQL","SQLEXCEPTION","SQLID","SQLSTATE","SQLWARNING","SQRT","STACKED","STANDARD","START","STARTING","STATEMENT","STATIC","STATMENT","STAY","STDDEV_POP","STDDEV_SAMP","STOGROUP","STORES","STYLE","SUBMULTISET","SUBSTRING","SUM","SUMMARY","SYMMETRIC","SYNONYM","SYSFUN","SYSIBM","SYSPROC","SYSTEM","SYSTEM_USER","TABLE","TABLESAMPLE","TABLESPACE","THEN","TIME","TIMESTAMP","TIMEZONE_HOUR","TIMEZONE_MINUTE","TO","TRAILING","TRANSACTION","TRANSLATE","TRANSLATION","TREAT","TRIGGER","TRIM","TRUE","TRUNCATE","TYPE","UESCAPE","UNDO","UNIQUE","UNKNOWN","UNNEST","UNTIL","UPPER","USAGE","USER","USING","VALIDPROC","VALUE","VARCHAR","VARIABLE","VARIANT","VARYING","VAR_POP","VAR_SAMP","VCAT","VERSION","VIEW","VOLATILE","VOLUMES","WHEN","WHENEVER","WHILE","WIDTH_BUCKET","WINDOW","WITH","WITHIN","WITHOUT","WLM","WRITE","XMLELEMENT","XMLEXISTS","XMLNAMESPACES","YEAR","YEARS"],O=["ADD","AFTER","ALTER COLUMN","ALTER TABLE","DELETE FROM","EXCEPT","FETCH FIRST","FROM","GROUP BY","GO","HAVING","INSERT INTO","INTERSECT","LIMIT","ORDER BY","SELECT","SET CURRENT SCHEMA","SET SCHEMA","SET","UNION ALL","UPDATE","VALUES","WHERE"],i=["AND","CROSS JOIN","INNER JOIN","JOIN","LEFT JOIN","LEFT OUTER JOIN","OR","OUTER JOIN","RIGHT JOIN","RIGHT OUTER JOIN"],S=void 0,u=function(){function e(E){(0,T["default"])(this,e),this.cfg=E}return e.prototype.format=function(e){return S||(S=new A["default"]({reservedWords:I,reservedToplevelWords:O,reservedNewlineWords:i,stringTypes:['""',"''","``","[]"],openParens:["("],closeParens:[")"],indexedPlaceholderTypes:["?"],namedPlaceholderTypes:[":"],lineCommentTypes:["--"],specialWordChars:["#","@"]})),new o["default"](this.cfg,S).format(e)},e}();E["default"]=u,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(4),o=n(R),N=t(5),A=n(N),I=["ALL","ALTER","ANALYZE","AND","ANY","ARRAY","AS","ASC","BEGIN","BETWEEN","BINARY","BOOLEAN","BREAK","BUCKET","BUILD","BY","CALL","CASE","CAST","CLUSTER","COLLATE","COLLECTION","COMMIT","CONNECT","CONTINUE","CORRELATE","COVER","CREATE","DATABASE","DATASET","DATASTORE","DECLARE","DECREMENT","DELETE","DERIVED","DESC","DESCRIBE","DISTINCT","DO","DROP","EACH","ELEMENT","ELSE","END","EVERY","EXCEPT","EXCLUDE","EXECUTE","EXISTS","EXPLAIN","FALSE","FETCH","FIRST","FLATTEN","FOR","FORCE","FROM","FUNCTION","GRANT","GROUP","GSI","HAVING","IF","IGNORE","ILIKE","IN","INCLUDE","INCREMENT","INDEX","INFER","INLINE","INNER","INSERT","INTERSECT","INTO","IS","JOIN","KEY","KEYS","KEYSPACE","KNOWN","LAST","LEFT","LET","LETTING","LIKE","LIMIT","LSM","MAP","MAPPING","MATCHED","MATERIALIZED","MERGE","MINUS","MISSING","NAMESPACE","NEST","NOT","NULL","NUMBER","OBJECT","OFFSET","ON","OPTION","OR","ORDER","OUTER","OVER","PARSE","PARTITION","PASSWORD","PATH","POOL","PREPARE","PRIMARY","PRIVATE","PRIVILEGE","PROCEDURE","PUBLIC","RAW","REALM","REDUCE","RENAME","RETURN","RETURNING","REVOKE","RIGHT","ROLE","ROLLBACK","SATISFIES","SCHEMA","SELECT","SELF","SEMI","SET","SHOW","SOME","START","STATISTICS","STRING","SYSTEM","THEN","TO","TRANSACTION","TRIGGER","TRUE","TRUNCATE","UNDER","UNION","UNIQUE","UNKNOWN","UNNEST","UNSET","UPDATE","UPSERT","USE","USER","USING","VALIDATE","VALUE","VALUED","VALUES","VIA","VIEW","WHEN","WHERE","WHILE","WITH","WITHIN","WORK","XOR"],O=["DELETE FROM","EXCEPT ALL","EXCEPT","EXPLAIN DELETE FROM","EXPLAIN UPDATE","EXPLAIN UPSERT","FROM","GROUP BY","HAVING","INFER","INSERT INTO","INTERSECT ALL","INTERSECT","LET","LIMIT","MERGE","NEST","ORDER BY","PREPARE","SELECT","SET CURRENT SCHEMA","SET SCHEMA","SET","UNION ALL","UNION","UNNEST","UPDATE","UPSERT","USE KEYS","VALUES","WHERE"],i=["AND","INNER JOIN","JOIN","LEFT JOIN","LEFT OUTER JOIN","OR","OUTER JOIN","RIGHT JOIN","RIGHT OUTER JOIN","XOR"],S=void 0,u=function(){function e(E){(0,T["default"])(this,e),this.cfg=E}return e.prototype.format=function(e){return S||(S=new A["default"]({reservedWords:I,reservedToplevelWords:O,reservedNewlineWords:i,stringTypes:['""',"''","``"],openParens:["(","[","{"],closeParens:[")","]","}"],namedPlaceholderTypes:["$"],lineCommentTypes:["#","--"]})),new o["default"](this.cfg,S).format(e)},e}();E["default"]=u,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(4),o=n(R),N=t(5),A=n(N),I=["A","ACCESSIBLE","AGENT","AGGREGATE","ALL","ALTER","ANY","ARRAY","AS","ASC","AT","ATTRIBUTE","AUTHID","AVG","BETWEEN","BFILE_BASE","BINARY_INTEGER","BINARY","BLOB_BASE","BLOCK","BODY","BOOLEAN","BOTH","BOUND","BULK","BY","BYTE","C","CALL","CALLING","CASCADE","CASE","CHAR_BASE","CHAR","CHARACTER","CHARSET","CHARSETFORM","CHARSETID","CHECK","CLOB_BASE","CLONE","CLOSE","CLUSTER","CLUSTERS","COALESCE","COLAUTH","COLLECT","COLUMNS","COMMENT","COMMIT","COMMITTED","COMPILED","COMPRESS","CONNECT","CONSTANT","CONSTRUCTOR","CONTEXT","CONTINUE","CONVERT","COUNT","CRASH","CREATE","CREDENTIAL","CURRENT","CURRVAL","CURSOR","CUSTOMDATUM","DANGLING","DATA","DATE_BASE","DATE","DAY","DECIMAL","DEFAULT","DEFINE","DELETE","DESC","DETERMINISTIC","DIRECTORY","DISTINCT","DO","DOUBLE","DROP","DURATION","ELEMENT","ELSIF","EMPTY","ESCAPE","EXCEPTIONS","EXCLUSIVE","EXECUTE","EXISTS","EXIT","EXTENDS","EXTERNAL","EXTRACT","FALSE","FETCH","FINAL","FIRST","FIXED","FLOAT","FOR","FORALL","FORCE","FROM","FUNCTION","GENERAL","GOTO","GRANT","GROUP","HASH","HEAP","HIDDEN","HOUR","IDENTIFIED","IF","IMMEDIATE","IN","INCLUDING","INDEX","INDEXES","INDICATOR","INDICES","INFINITE","INSTANTIABLE","INT","INTEGER","INTERFACE","INTERVAL","INTO","INVALIDATE","IS","ISOLATION","JAVA","LANGUAGE","LARGE","LEADING","LENGTH","LEVEL","LIBRARY","LIKE","LIKE2","LIKE4","LIKEC","LIMITED","LOCAL","LOCK","LONG","MAP","MAX","MAXLEN","MEMBER","MERGE","MIN","MINUS","MINUTE","MLSLABEL","MOD","MODE","MONTH","MULTISET","NAME","NAN","NATIONAL","NATIVE","NATURAL","NATURALN","NCHAR","NEW","NEXTVAL","NOCOMPRESS","NOCOPY","NOT","NOWAIT","NULL","NULLIF","NUMBER_BASE","NUMBER","OBJECT","OCICOLL","OCIDATE","OCIDATETIME","OCIDURATION","OCIINTERVAL","OCILOBLOCATOR","OCINUMBER","OCIRAW","OCIREF","OCIREFCURSOR","OCIROWID","OCISTRING","OCITYPE","OF","OLD","ON","ONLY","OPAQUE","OPEN","OPERATOR","OPTION","ORACLE","ORADATA","ORDER","ORGANIZATION","ORLANY","ORLVARY","OTHERS","OUT","OVERLAPS","OVERRIDING","PACKAGE","PARALLEL_ENABLE","PARAMETER","PARAMETERS","PARENT","PARTITION","PASCAL","PCTFREE","PIPE","PIPELINED","PLS_INTEGER","PLUGGABLE","POSITIVE","POSITIVEN","PRAGMA","PRECISION","PRIOR","PRIVATE","PROCEDURE","PUBLIC","RAISE","RANGE","RAW","READ","REAL","RECORD","REF","REFERENCE","RELEASE","RELIES_ON","REM","REMAINDER","RENAME","RESOURCE","RESULT_CACHE","RESULT","RETURN","RETURNING","REVERSE","REVOKE","ROLLBACK","ROW","ROWID","ROWNUM","ROWTYPE","SAMPLE","SAVE","SAVEPOINT","SB1","SB2","SB4","SECOND","SEGMENT","SELF","SEPARATE","SEQUENCE","SERIALIZABLE","SHARE","SHORT","SIZE_T","SIZE","SMALLINT","SOME","SPACE","SPARSE","SQL","SQLCODE","SQLDATA","SQLERRM","SQLNAME","SQLSTATE","STANDARD","START","STATIC","STDDEV","STORED","STRING","STRUCT","STYLE","SUBMULTISET","SUBPARTITION","SUBSTITUTABLE","SUBTYPE","SUCCESSFUL","SUM","SYNONYM","SYSDATE","TABAUTH","TABLE","TDO","THE","THEN","TIME","TIMESTAMP","TIMEZONE_ABBR","TIMEZONE_HOUR","TIMEZONE_MINUTE","TIMEZONE_REGION","TO","TRAILING","TRANSACTION","TRANSACTIONAL","TRIGGER","TRUE","TRUSTED","TYPE","UB1","UB2","UB4","UID","UNDER","UNIQUE","UNPLUG","UNSIGNED","UNTRUSTED","USE","USER","USING","VALIDATE","VALIST","VALUE","VARCHAR","VARCHAR2","VARIABLE","VARIANCE","VARRAY","VARYING","VIEW","VIEWS","VOID","WHENEVER","WHILE","WITH","WORK","WRAPPED","WRITE","YEAR","ZONE"],O=["ADD","ALTER COLUMN","ALTER TABLE","BEGIN","CONNECT BY","DECLARE","DELETE FROM","DELETE","END","EXCEPT","EXCEPTION","FETCH FIRST","FROM","GROUP BY","HAVING","INSERT INTO","INSERT","INTERSECT","LIMIT","LOOP","MODIFY","ORDER BY","SELECT","SET CURRENT SCHEMA","SET SCHEMA","SET","START WITH","UNION ALL","UNION","UPDATE","VALUES","WHERE"],i=["AND","CROSS APPLY","CROSS JOIN","ELSE","END","INNER JOIN","JOIN","LEFT JOIN","LEFT OUTER JOIN","OR","OUTER APPLY","OUTER JOIN","RIGHT JOIN","RIGHT OUTER JOIN","WHEN","XOR"],S=void 0,u=function(){function e(E){(0,T["default"])(this,e),this.cfg=E}return e.prototype.format=function(e){return S||(S=new A["default"]({reservedWords:I,reservedToplevelWords:O,reservedNewlineWords:i,stringTypes:['""',"N''","''","``"],openParens:["(","CASE"],closeParens:[")","END"],indexedPlaceholderTypes:["?"],namedPlaceholderTypes:[":"],lineCommentTypes:["--"],specialWordChars:["_","$","#",".","@"]})),new o["default"](this.cfg,S).format(e)},e}();E["default"]=u,e.exports=E["default"]},function(e,E,t){"use strict";function n(e){return e&&e.__esModule?e:{"default":e}}E.__esModule=!0;var r=t(1),T=n(r),R=t(4),o=n(R),N=t(5),A=n(N),I=["ACCESSIBLE","ACTION","AGAINST","AGGREGATE","ALGORITHM","ALL","ALTER","ANALYSE","ANALYZE","AS","ASC","AUTOCOMMIT","AUTO_INCREMENT","BACKUP","BEGIN","BETWEEN","BINLOG","BOTH","CASCADE","CASE","CHANGE","CHANGED","CHARACTER SET","CHARSET","CHECK","CHECKSUM","COLLATE","COLLATION","COLUMN","COLUMNS","COMMENT","COMMIT","COMMITTED","COMPRESSED","CONCURRENT","CONSTRAINT","CONTAINS","CONVERT","CREATE","CROSS","CURRENT_TIMESTAMP","DATABASE","DATABASES","DAY","DAY_HOUR","DAY_MINUTE","DAY_SECOND","DEFAULT","DEFINER","DELAYED","DELETE","DESC","DESCRIBE","DETERMINISTIC","DISTINCT","DISTINCTROW","DIV","DO","DROP","DUMPFILE","DUPLICATE","DYNAMIC","ELSE","ENCLOSED","END","ENGINE","ENGINES","ENGINE_TYPE","ESCAPE","ESCAPED","EVENTS","EXEC","EXECUTE","EXISTS","EXPLAIN","EXTENDED","FAST","FETCH","FIELDS","FILE","FIRST","FIXED","FLUSH","FOR","FORCE","FOREIGN","FULL","FULLTEXT","FUNCTION","GLOBAL","GRANT","GRANTS","GROUP_CONCAT","HEAP","HIGH_PRIORITY","HOSTS","HOUR","HOUR_MINUTE","HOUR_SECOND","IDENTIFIED","IF","IFNULL","IGNORE","IN","INDEX","INDEXES","INFILE","INSERT","INSERT_ID","INSERT_METHOD","INTERVAL","INTO","INVOKER","IS","ISOLATION","KEY","KEYS","KILL","LAST_INSERT_ID","LEADING","LEVEL","LIKE","LINEAR","LINES","LOAD","LOCAL","LOCK","LOCKS","LOGS","LOW_PRIORITY","MARIA","MASTER","MASTER_CONNECT_RETRY","MASTER_HOST","MASTER_LOG_FILE","MATCH","MAX_CONNECTIONS_PER_HOUR","MAX_QUERIES_PER_HOUR","MAX_ROWS","MAX_UPDATES_PER_HOUR","MAX_USER_CONNECTIONS","MEDIUM","MERGE","MINUTE","MINUTE_SECOND","MIN_ROWS","MODE","MODIFY","MONTH","MRG_MYISAM","MYISAM","NAMES","NATURAL","NOT","NOW()","NULL","OFFSET","ON DELETE","ON UPDATE","ON","ONLY","OPEN","OPTIMIZE","OPTION","OPTIONALLY","OUTFILE","PACK_KEYS","PAGE","PARTIAL","PARTITION","PARTITIONS","PASSWORD","PRIMARY","PRIVILEGES","PROCEDURE","PROCESS","PROCESSLIST","PURGE","QUICK","RAID0","RAID_CHUNKS","RAID_CHUNKSIZE","RAID_TYPE","RANGE","READ","READ_ONLY","READ_WRITE","REFERENCES","REGEXP","RELOAD","RENAME","REPAIR","REPEATABLE","REPLACE","REPLICATION","RESET","RESTORE","RESTRICT","RETURN","RETURNS","REVOKE","RLIKE","ROLLBACK","ROW","ROWS","ROW_FORMAT","SECOND","SECURITY","SEPARATOR","SERIALIZABLE","SESSION","SHARE","SHOW","SHUTDOWN","SLAVE","SONAME","SOUNDS","SQL","SQL_AUTO_IS_NULL","SQL_BIG_RESULT","SQL_BIG_SELECTS","SQL_BIG_TABLES","SQL_BUFFER_RESULT","SQL_CACHE","SQL_CALC_FOUND_ROWS","SQL_LOG_BIN","SQL_LOG_OFF","SQL_LOG_UPDATE","SQL_LOW_PRIORITY_UPDATES","SQL_MAX_JOIN_SIZE","SQL_NO_CACHE","SQL_QUOTE_SHOW_CREATE","SQL_SAFE_UPDATES","SQL_SELECT_LIMIT","SQL_SLAVE_SKIP_COUNTER","SQL_SMALL_RESULT","SQL_WARNINGS","START","STARTING","STATUS","STOP","STORAGE","STRAIGHT_JOIN","STRING","STRIPED","SUPER","TABLE","TABLES","TEMPORARY","TERMINATED","THEN","TO","TRAILING","TRANSACTIONAL","TRUE","TRUNCATE","TYPE","TYPES","UNCOMMITTED","UNIQUE","UNLOCK","UNSIGNED","USAGE","USE","USING","VARIABLES","VIEW","WHEN","WITH","WORK","WRITE","YEAR_MONTH"],O=["ADD","AFTER","ALTER COLUMN","ALTER TABLE","DELETE FROM","EXCEPT","FETCH FIRST","FROM","GROUP BY","GO","HAVING","INSERT INTO","INSERT","INTERSECT","LIMIT","MODIFY","ORDER BY","SELECT","SET CURRENT SCHEMA","SET SCHEMA","SET","UNION ALL","UNION","UPDATE","VALUES","WHERE"],i=["AND","CROSS APPLY","CROSS JOIN","ELSE","INNER JOIN","JOIN","LEFT JOIN","LEFT OUTER JOIN","OR","OUTER APPLY","OUTER JOIN","RIGHT JOIN","RIGHT OUTER JOIN","WHEN","XOR"],S=void 0,u=function(){function e(E){(0,T["default"])(this,e),this.cfg=E}return e.prototype.format=function(e){return S||(S=new A["default"]({reservedWords:I,reservedToplevelWords:O,reservedNewlineWords:i,stringTypes:['""',"N''","''","``","[]"],openParens:["(","CASE"],closeParens:[")","END"],indexedPlaceholderTypes:["?"],namedPlaceholderTypes:["@",":"],lineCommentTypes:["#","--"]})),new o["default"](this.cfg,S).format(e)},e}();E["default"]=u,e.exports=E["default"]},function(e,E,t){var n=t(3),r=t(2),T=n(r,"DataView");e.exports=T},function(e,E,t){var n=t(3),r=t(2),T=n(r,"Map");e.exports=T},function(e,E,t){var n=t(3),r=t(2),T=n(r,"Promise");e.exports=T},function(e,E,t){var n=t(3),r=t(2),T=n(r,"Set");e.exports=T},function(e,E,t){var n=t(2),r=n.Symbol;e.exports=r},function(e,E,t){var n=t(3),r=t(2),T=n(r,"WeakMap");e.exports=T},function(e,E){function t(e){return e.split("")}e.exports=t},function(e,E){function t(e,E,t,n){for(var r=e.length,T=t+(n?1:-1);n?T--:++T<r;)if(E(e[T],T,e))return T;
return-1}e.exports=t},function(e,E){function t(e){return r.call(e)}var n=Object.prototype,r=n.toString;e.exports=t},function(e,E,t){function n(e,E,t){return E===E?R(e,E,t):r(e,T,t)}var r=t(29),T=t(32),R=t(49);e.exports=n},function(e,E){function t(e){return e!==e}e.exports=t},function(e,E,t){function n(e){if(!R(e)||T(e))return!1;var E=r(e)?u:A;return E.test(o(e))}var r=t(12),T=t(45),R=t(6),o=t(11),N=/[\\^$.*+?()[\]{}|]/g,A=/^\[object .+?Constructor\]$/,I=Function.prototype,O=Object.prototype,i=I.toString,S=O.hasOwnProperty,u=RegExp("^"+i.call(S).replace(N,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");e.exports=n},function(e,E){function t(e,E){var t="";if(!e||1>E||E>n)return t;do E%2&&(t+=e),E=r(E/2),E&&(e+=e);while(E);return t}var n=9007199254740991,r=Math.floor;e.exports=t},function(e,E){function t(e,E,t){var n=-1,r=e.length;0>E&&(E=-E>r?0:r+E),t=t>r?r:t,0>t&&(t+=r),r=E>t?0:t-E>>>0,E>>>=0;for(var T=Array(r);++n<r;)T[n]=e[n+E];return T}e.exports=t},function(e,E,t){function n(e,E,t){var n=e.length;return t=void 0===t?n:t,E||n>t?r(e,E,t):e}var r=t(35);e.exports=n},function(e,E,t){function n(e,E){for(var t=e.length;t--&&r(E,e[t],0)>-1;);return t}var r=t(31);e.exports=n},function(e,E,t){var n=t(2),r=n["__core-js_shared__"];e.exports=r},function(e,E){(function(E){var t="object"==typeof E&&E&&E.Object===Object&&E;e.exports=t}).call(E,function(){return this}())},function(e,E,t){var n=t(22),r=t(23),T=t(24),R=t(25),o=t(27),N=t(30),A=t(11),I="[object Map]",O="[object Object]",i="[object Promise]",S="[object Set]",u="[object WeakMap]",L="[object DataView]",C=Object.prototype,s=C.toString,a=A(n),f=A(r),c=A(T),p=A(R),l=A(o),D=N;(n&&D(new n(new ArrayBuffer(1)))!=L||r&&D(new r)!=I||T&&D(T.resolve())!=i||R&&D(new R)!=S||o&&D(new o)!=u)&&(D=function(e){var E=s.call(e),t=E==O?e.constructor:void 0,n=t?A(t):void 0;if(n)switch(n){case a:return L;case f:return I;case c:return i;case p:return S;case l:return u}return E}),e.exports=D},function(e,E){function t(e,E){return null==e?void 0:e[E]}e.exports=t},function(e,E){function t(e){return N.test(e)}var n="\\ud800-\\udfff",r="\\u0300-\\u036f\\ufe20-\\ufe23",T="\\u20d0-\\u20f0",R="\\ufe0e\\ufe0f",o="\\u200d",N=RegExp("["+o+n+r+T+R+"]");e.exports=t},function(e,E){function t(e,E){return E=null==E?n:E,!!E&&("number"==typeof e||r.test(e))&&e>-1&&e%1==0&&E>e}var n=9007199254740991,r=/^(?:0|[1-9]\d*)$/;e.exports=t},function(e,E,t){function n(e,E,t){if(!o(t))return!1;var n=typeof E;return!!("number"==n?T(t)&&R(E,t.length):"string"==n&&E in t)&&r(t[E],e)}var r=t(52),T=t(8),R=t(43),o=t(6);e.exports=n},function(e,E,t){function n(e){return!!T&&T in e}var r=t(38),T=function(){var e=/[^.]+$/.exec(r&&r.keys&&r.keys.IE_PROTO||"");return e?"Symbol(src)_1."+e:""}();e.exports=n},function(e,E){function t(e){var E=e&&e.constructor,t="function"==typeof E&&E.prototype||n;return e===t}var n=Object.prototype;e.exports=t},function(e,E,t){var n=t(48),r=n(Object.keys,Object);e.exports=r},function(e,E){function t(e,E){return function(t){return e(E(t))}}e.exports=t},function(e,E){function t(e,E,t){for(var n=t-1,r=e.length;++n<r;)if(e[n]===E)return n;return-1}e.exports=t},function(e,E,t){function n(e){return T(e)?R(e):r(e)}var r=t(28),T=t(42),R=t(51);e.exports=n},function(e,E){function t(e){return e.match(c)||[]}var n="\\ud800-\\udfff",r="\\u0300-\\u036f\\ufe20-\\ufe23",T="\\u20d0-\\u20f0",R="\\ufe0e\\ufe0f",o="["+n+"]",N="["+r+T+"]",A="\\ud83c[\\udffb-\\udfff]",I="(?:"+N+"|"+A+")",O="[^"+n+"]",i="(?:\\ud83c[\\udde6-\\uddff]){2}",S="[\\ud800-\\udbff][\\udc00-\\udfff]",u="\\u200d",L=I+"?",C="["+R+"]?",s="(?:"+u+"(?:"+[O,i,S].join("|")+")"+C+L+")*",a=C+L+s,f="(?:"+[O+N+"?",N,i,S,o].join("|")+")",c=RegExp(A+"(?="+A+")|"+f+a,"g");e.exports=t},function(e,E){function t(e,E){return e===E||e!==e&&E!==E}e.exports=t},function(e,E,t){function n(e){return e=r(e),e&&R.test(e)?e.replace(T,"\\$&"):e}var r=t(9),T=/[\\^$.*+?()[\]{}|]/g,R=RegExp(T.source);e.exports=n},function(e,E,t){function n(e){return r(e)&&o.call(e,"callee")&&(!A.call(e,"callee")||N.call(e)==T)}var r=t(56),T="[object Arguments]",R=Object.prototype,o=R.hasOwnProperty,N=R.toString,A=R.propertyIsEnumerable;e.exports=n},function(e,E){var t=Array.isArray;e.exports=t},function(e,E,t){function n(e){return T(e)&&r(e)}var r=t(8),T=t(13);e.exports=n},function(e,E,t){(function(e){var n=t(2),r=t(62),T="object"==typeof E&&E&&!E.nodeType&&E,R=T&&"object"==typeof e&&e&&!e.nodeType&&e,o=R&&R.exports===T,N=o?n.Buffer:void 0,A=N?N.isBuffer:void 0,I=A||r;e.exports=I}).call(E,t(67)(e))},function(e,E,t){function n(e){if(o(e)&&(R(e)||"string"==typeof e||"function"==typeof e.splice||N(e)||T(e)))return!e.length;var E=r(e);if(E==O||E==i)return!e.size;if(A(e))return!I(e).length;for(var t in e)if(u.call(e,t))return!1;return!0}var r=t(40),T=t(54),R=t(55),o=t(8),N=t(57),A=t(46),I=t(47),O="[object Map]",i="[object Set]",S=Object.prototype,u=S.hasOwnProperty;e.exports=n},function(e,E){function t(e){return"number"==typeof e&&e>-1&&e%1==0&&n>=e}var n=9007199254740991;e.exports=t},function(e,E){function t(e){var E=e?e.length:0;return E?e[E-1]:void 0}e.exports=t},function(e,E,t){function n(e,E,t){return E=(t?T(e,E,t):void 0===E)?1:R(E),r(o(e),E)}var r=t(34),T=t(44),R=t(64),o=t(9);e.exports=n},function(e,E){function t(){return!1}e.exports=t},function(e,E,t){function n(e){if(!e)return 0===e?e:0;if(e=r(e),e===T||e===-T){var E=0>e?-1:1;return E*R}return e===e?e:0}var r=t(65),T=1/0,R=1.7976931348623157e308;e.exports=n},function(e,E,t){function n(e){var E=r(e),t=E%1;return E===E?t?E-t:E:0}var r=t(63);e.exports=n},function(e,E,t){function n(e){if("number"==typeof e)return e;if(T(e))return R;if(r(e)){var E="function"==typeof e.valueOf?e.valueOf():e;e=r(E)?E+"":E}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(o,"");var t=A.test(e);return t||I.test(e)?O(e.slice(2),t?2:8):N.test(e)?R:+e}var r=t(6),T=t(14),R=NaN,o=/^\s+|\s+$/g,N=/^[-+]0x[0-9a-f]+$/i,A=/^0b[01]+$/i,I=/^0o[0-7]+$/i,O=parseInt;e.exports=n},function(e,E,t){function n(e,E,t){if(e=N(e),e&&(t||void 0===E))return e.replace(A,"");if(!e||!(E=r(E)))return e;var n=o(e),I=R(n,o(E))+1;return T(n,0,I).join("")}var r=t(10),T=t(36),R=t(37),o=t(50),N=t(9),A=/\s+$/;e.exports=n},function(e,E){e.exports=function(e){return e.webpackPolyfill||(e.deprecate=function(){},e.paths=[],e.children=[],e.webpackPolyfill=1),e}}])});

		function escape2Html(str) {
    	    var arrEntities = {'lt': '<', 'gt': '>', 'nbsp': '', 'amp': '&', 'quot': '"'};
    	    return str.replace(/&(lt|gt|nbsp|amp|quot);/ig, function (all, t) {
    	        return arrEntities[t];
    	    });
    	}
	
    	function load() {
    	    let codeList = document.getElementsByClassName('language-sql');
	
    	    for (let i = 0 ;i<codeList.length;i++) {
    	        codeList[i].innerHTML = window.sqlFormatter.format(escape2Html(codeList[i].innerHTML))
    	    }
    	};

+`
+
+// MarkdownEscape markdown格式转义,原样输出
+func MarkdownEscape(str string) string {
+	for _, b := range "_`*" {
+		str = strings.Replace(str, string(b), "\\"+string(b), -1)
+	}
+	return str
+}
+
+//
+func loadExternalResource(resource string) string {
+	var content string
+	var body []byte
+	if strings.HasPrefix(resource, "http") {
+		resp, err := http.Get(resource)
+		if err == nil {
+			body, err = ioutil.ReadAll(resp.Body)
+			if err == nil {
+				content = string(body)
+			} else {
+				Log.Debug("ioutil.ReadAll %s Error: %v", resource, err)
+			}
+		} else {
+			Log.Debug("http.Get %s Error: %v", resource, err)
+		}
+		defer resp.Body.Close()
+	} else {
+		fd, err := os.Open(resource)
+		defer func() {
+			err = fd.Close()
+			if err != nil {
+				Log.Error("loadExternalResource(%s) fd.Close failed: %s", resource, err.Error())
+			}
+		}()
+		if err == nil {
+			body, err = ioutil.ReadAll(fd)
+			if err != nil {
+				Log.Debug("ioutil.ReadAll %s Error: %v", resource, err)
+			} else {
+				content = string(body)
+			}
+		} else {
+			Log.Debug("os.Open %s Error: %v", resource, err)
+		}
+	}
+	return content
+}
+
+// MarkdownHTMLHeader markdown转HTML输出时添加HTML头
+func MarkdownHTMLHeader() string {
+	// load css
+	var css string
+	if Config.ReportCSS == "" {
+		css = BuiltinCSS
+	} else {
+		css = loadExternalResource(Config.ReportCSS)
+	}
+
+	// load javascript
+	var js string
+	if Config.ReportJavascript == "" {
+		decode, _ := base64.StdEncoding.DecodeString(BuiltinJavascript)
+		js = string(decode)
+	} else {
+		js = loadExternalResource(Config.ReportJavascript)
+	}
+
+	header := `
+
+` + Config.ReportTitle + `
+
+
+
+
+`
+	return header
+}
+
+// Markdown2HTML markdown转HTML输出
+func Markdown2HTML(buf string) string {
+	// extensions default: 94
+	// extensions |= blackfriday.EXTENSION_TABLES
+	// extensions |= blackfriday.EXTENSION_FENCED_CODE
+	// extensions |= blackfriday.EXTENSION_AUTOLINK
+	// extensions |= blackfriday.EXTENSION_STRIKETHROUGH
+	// extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+	extensions := Config.MarkdownExtensions
+
+	// htmlFlags
+	htmlFlags := Config.MarkdownHTMLFlags
+
+	renderer := blackfriday.HtmlRenderer(htmlFlags, "", "")
+	buf = string(blackfriday.Markdown([]byte(buf), renderer, extensions))
+	return buf
+}
+
+// Score SQL评审打分
+func Score(score int) string {
+	// 不需要打分的功能
+	switch Config.ReportType {
+	case "duplicate-key-checker", "explain-digest":
+		return ""
+	}
+	s1, s2 := "★ ", "☆ "
+	if score > 100 {
+		score = 100
+		Log.Debug("Score Error: score larger than 100, %d", score)
+	}
+	if score < 0 {
+		score = 0
+		Log.Debug("Score Warn: score less than 0, %d", score)
+	}
+	s1Count := score / 20
+	s2Count := 5 - s1Count
+	str := fmt.Sprintf("%s %d分", strings.TrimSpace(strings.Repeat(s1, s1Count)+strings.Repeat(s2, s2Count)), score)
+	return str
+}
diff --git a/common/markdown_test.go b/common/markdown_test.go
new file mode 100644
index 00000000..b5fd65bc
--- /dev/null
+++ b/common/markdown_test.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestMarkdownEscape(_ *testing.T) {
+	var strs = []string{
+		"a`bc",
+		"abc",
+		"a'bc",
+		"a\"bc",
+	}
+	for _, str := range strs {
+		fmt.Println(MarkdownEscape(str))
+	}
+}
+
+func TestMarkdown2Html(t *testing.T) {
+	md := filepath.Join("testdata", t.Name()+".md")
+	buf, err := ioutil.ReadFile(md)
+	if err != nil {
+		t.Error(err.Error())
+	}
+	err = GoldenDiff(func() {
+		fmt.Println(Markdown2HTML(string(buf)))
+	}, t.Name(), update)
+	if nil != err {
+		t.Fatal(err)
+	}
+
+	// golden文件拷贝成html文件,这步是给人看的
+	gd, err := os.OpenFile("testdata/"+t.Name()+".golden", os.O_RDONLY, 0666)
+	if nil != err {
+		t.Fatal(err)
+	}
+	html, err := os.OpenFile("testdata/"+t.Name()+".html", os.O_CREATE|os.O_RDWR, 0666)
+	if nil != err {
+		t.Fatal(err)
+	}
+	io.Copy(html, gd)
+}
+
+func TestScore(t *testing.T) {
+	score := Score(50)
+	if score != "★ ★ ☆ ☆ ☆ 50分" {
+		t.Error(score)
+	}
+}
+
+func TestLoadExternalResource(t *testing.T) {
+	buf := loadExternalResource("../doc/themes/github.css")
+	if buf == "" {
+		t.Error("loadExternalResource local error")
+	}
+	buf = loadExternalResource("http://www.baidu.com")
+	if buf == "" {
+		t.Error("loadExternalResource http error")
+	}
+}
+
+func TestMarkdownHTMLHeader(t *testing.T) {
+	err := GoldenDiff(func() {
+		MarkdownHTMLHeader()
+	}, t.Name(), update)
+	if err != nil {
+		t.Error(err)
+	}
+}
diff --git a/common/meta.go b/common/meta.go
new file mode 100644
index 00000000..bfa18e9f
--- /dev/null
+++ b/common/meta.go
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"strconv"
+	"strings"
+)
+
+// Meta 以'database'为key, DB的map,按db->table->column组织的元数据
+type Meta map[string]*DB
+
+// DB 数据库相关的结构体
+type DB struct {
+	Name  string
+	Table map[string]*Table // ['table_name']*Table
+}
+
+// NewDB 用于初始化*DB
+func NewDB(db string) *DB {
+	return &DB{
+		Name:  db,
+		Table: make(map[string]*Table),
+	}
+}
+
+// Table 含有表的属性
+type Table struct {
+	TableName    string
+	TableAliases []string
+	Column       map[string]*Column
+}
+
+// NewTable 初始化*Table
+func NewTable(tb string) *Table {
+	return &Table{
+		TableName:    tb,
+		TableAliases: make([]string, 0),
+		Column:       make(map[string]*Column),
+	}
+}
+
+// KeyType 用于标志每个Key的类别
+type KeyType int
+
+// Column 含有列的定义属性
+type Column struct {
+	Name        string   `json:"col_name"`    // 列名
+	Alias       []string `json:"alias"`       // 别名
+	Table       string   `json:"tb_name"`     // 表名
+	DB          string   `json:"db_name"`     // 数据库名称
+	DataType    string   `json:"data_type"`   // 数据类型
+	Character   string   `json:"character"`   // 字符集
+	Collation   string   `json:"collation"`   // collation
+	Cardinality float64  `json:"cardinality"` // 散粒度
+	Null        string   `json:"null"`        // 是否为空: YES/NO
+	Key         string   `json:"key"`         // 键类型
+	Default     string   `json:"default"`     // 默认值
+	Extra       string   `json:"extra"`       // 其他
+	Comment     string   `json:"comment"`     // 备注
+	Privileges  string   `json:"privileges"`  // 权限
+}
+
+// TableColumns 这个结构体中的元素是有序的  map[db]map[table][]columns
+type TableColumns map[string]map[string][]*Column
+
+// Equal 判断两个column是否相等
+func (col *Column) Equal(column *Column) bool {
+	return col.Name == column.Name &&
+		col.Table == column.Table &&
+		col.DB == column.DB
+}
+
+// IsColsPart 判断两个column队列是否是包含关系(包括相等)
+func IsColsPart(a, b []*Column) bool {
+	times := len(a)
+	if len(b) < times {
+		times = len(b)
+	}
+
+	for i := 0; i < times; i++ {
+		if a[i].DB != b[i].DB || a[i].Table != b[i].Table || a[i].Name != b[i].Name {
+			return false
+		}
+	}
+
+	return true
+}
+
+// JoinColumnsName 将所有的列合并
+func JoinColumnsName(cols []*Column, sep string) string {
+	name := ""
+	for _, col := range cols {
+		name += col.Name + sep
+	}
+	return strings.Trim(name, sep)
+}
+
+// Tables 获取Meta中指定db的所有表名
+// Input:数据库名
+// Output:表名组成的list
+func (b Meta) Tables(db string) []string {
+	var result []string
+	if b[db] != nil {
+		for tb := range b[db].Table {
+			result = append(result, tb)
+		}
+
+	}
+	return result
+}
+
+// SetDefault 设置默认值
+func (b Meta) SetDefault(defaultDB string) Meta {
+	if defaultDB == "" {
+		return b
+	}
+
+	for db := range b {
+		if db == "" {
+			// 当获取到的join中的DB为空的时候,说明SQL未显示的指定DB,即使用的是rEnv默认DB,需要将表合并到原DB中
+			if _, ok := b[defaultDB]; ok {
+				for tbName, table := range b[""].Table {
+					if _, ok := b[defaultDB].Table[tbName]; ok {
+						b[defaultDB].Table[tbName].TableAliases = append(
+							b[defaultDB].Table[tbName].TableAliases,
+							table.TableAliases...,
+						)
+						continue
+					}
+					b[defaultDB].Table[tbName] = table
+				}
+				delete(b, "")
+			}
+
+			// 如果没有出现DB指定不一致的情况,直接进行合并
+			b[defaultDB] = b[""]
+			delete(b, "")
+		}
+	}
+
+	return b
+}
+
+// MergeColumn 将使用到的列按db->table组织去重
+// 注意:Column中的db, table信息可能为空,需要提前通过env环境补齐再调用该函数。
+// @input: 目标列list, 源列list(可以将多个源合并到一个目标列list)
+// @output: 合并后的列list
+func MergeColumn(dst []*Column, src ...*Column) []*Column {
+	var tmp []*Column
+	for _, newCol := range src {
+		if len(dst) == 0 {
+			tmp = append(tmp, newCol)
+			continue
+		}
+
+		has := false
+		for _, oldCol := range dst {
+			if (newCol.Name == oldCol.Name) && (newCol.Table == oldCol.Table) && (newCol.DB == oldCol.DB) {
+				has = true
+			}
+		}
+
+		if !has {
+			tmp = append(tmp, newCol)
+		}
+
+	}
+	return append(dst, tmp...)
+}
+
+// ColumnSort 通过散粒度对 colList 进行排序, 散粒度排序由大到小
+func ColumnSort(colList []*Column) []*Column {
+	// 使用冒泡排序保持相等情况下左右两边顺序不变
+	if len(colList) < 2 {
+		return colList
+	}
+
+	for i := 0; i < len(colList)-1; i++ {
+		for j := i + 1; j < len(colList); j++ {
+			if colList[i].Cardinality < colList[j].Cardinality {
+				colList[i], colList[j] = colList[j], colList[i]
+			}
+		}
+	}
+
+	return colList
+}
+
+// GetDataTypeBase 获取dataType中的数据类型,忽略长度
+func GetDataTypeBase(dataType string) string {
+	if i := strings.Index(dataType, "("); i > 0 {
+		return dataType[0:i]
+	}
+
+	return dataType
+}
+
+// GetDataTypeLength 获取dataType中的数据类型长度
+func GetDataTypeLength(dataType string) []int {
+	var length []int
+	if si := strings.Index(dataType, "("); si > 0 {
+		dataLength := dataType[si+1:]
+		if ei := strings.Index(dataLength, ")"); ei > 0 {
+			dataLength = dataLength[:ei]
+			for _, l := range strings.Split(dataLength, ",") {
+				v, err := strconv.Atoi(l)
+				if err != nil {
+					Log.Debug("GetDataTypeLength() Error: %v", err)
+					return []int{-1}
+				}
+				length = append(length, v)
+			}
+		}
+	}
+
+	if len(length) == 0 {
+		length = []int{-1}
+	}
+
+	return length
+}
+
+// GetDataBytes 计算数据类型字节数
+// https://dev.mysql.com/doc/refman/8.0/en/storage-requirements.html
+// return -1 表示该列无法计算数据大小
+func (col *Column) GetDataBytes(dbVersion int) int {
+	if col.DataType == "" {
+		Log.Warning("Can't get %s.%s data type", col.Table, col.Name)
+		return -1
+	}
+	switch strings.ToLower(GetDataTypeBase(col.DataType)) {
+	case "tinyint", "smallint", "mediumint",
+		"int", "integer", "bigint",
+		"double", "real", "float", "decimal",
+		"numeric", "bit":
+		// numeric
+		return numericStorageReq(col.DataType)
+
+	case "year", "date", "time", "datetime", "timestamp":
+		// date & time
+		return timeStorageReq(col.DataType, dbVersion)
+
+	case "char", "binary", "varchar", "varbinary", "enum", "set":
+		// string
+		return StringStorageReq(col.DataType, col.Character)
+	case "tinyblob", "tinytext", "blob", "text", "mediumblob", "mediumtext",
+		"longblob", "longtext":
+		// strings length depend on it's values
+		// 这些字段为不定长字段,添加索引时必须指定前缀,索引前缀与字符集相关
+		return Config.MaxIdxBytesPerColumn + 1
+	default:
+		Log.Warning("Type %s not support:", col.DataType)
+		return -1
+	}
+}
+
+// Numeric Type Storage Requirements
+// return bytes count
+func numericStorageReq(dataType string) int {
+	typeLength := GetDataTypeLength(dataType)
+	baseType := strings.ToLower(GetDataTypeBase(dataType))
+
+	switch baseType {
+	case "tinyint":
+		return 1
+	case "smallint":
+		return 2
+	case "mediumint":
+		return 3
+	case "int", "integer":
+		return 4
+	case "bigint", "double", "real":
+		return 8
+	case "float":
+		if typeLength[0] == -1 || typeLength[0] >= 0 && typeLength[0] <= 24 {
+			// 4 bytes if 0 <= p <= 24
+			return 4
+		}
+		// 8 bytes if no p || 25 <= p <= 53
+		return 8
+	case "decimal", "numeric":
+		// Values for DECIMAL (and NUMERIC) columns are represented using a binary format
+		// that packs nine decimal (base 10) digits into four bytes. Storage for the integer
+		// and fractional parts of each value are determined separately. Each multiple of nine
+		// digits requires four bytes, and the “leftover” digits require some fraction of four bytes.
+
+		if typeLength[0] == -1 {
+			return 4
+		}
+
+		leftover := func(leftover int) int {
+			if leftover > 0 && leftover <= 2 {
+				return 1
+			} else if leftover > 2 && leftover <= 4 {
+				return 2
+			} else if leftover > 4 && leftover <= 6 {
+				return 3
+			} else if leftover > 6 && leftover <= 8 {
+				return 4
+			} else {
+				return 4
+			}
+		}
+
+		integer := typeLength[0]/9*4 + leftover(typeLength[0]%9)
+		fractional := typeLength[1]/9*4 + leftover(typeLength[1]%9)
+
+		return integer + fractional
+
+	case "bit":
+		// approximately (M+7)/8 bytes
+		if typeLength[0] == -1 {
+			return 1
+		}
+		return (typeLength[0] + 7) / 8
+
+	default:
+		Log.Error("No such numeric type: %s", baseType)
+		return 8
+	}
+}
+
+// Date and Time Type Storage Requirements
+// return bytes count
+func timeStorageReq(dataType string, version int) int {
+	/*
+			https://dev.mysql.com/doc/refman/8.0/en/storage-requirements.html
+			*   ============================================================================================
+			*   |	Data Type |	Storage Required Before MySQL 5.6.4	| Storage Required as of MySQL 5.6.4   |
+			*   | ---------------------------------------------------------------------------------------- |
+			*   |	YEAR	  |	1 byte	                            | 1 byte                               |
+			*   |	DATE	  | 3 bytes	                            | 3 bytes                              |
+			*   |	TIME	  | 3 bytes	                            | 3 bytes + fractional seconds storage |
+			*   |	DATETIME  | 8 bytes	                            | 5 bytes + fractional seconds storage |
+			*   |	TIMESTAMP |	4 bytes	                            | 4 bytes + fractional seconds storage |
+			*   ============================================================================================
+			*	|  Fractional Seconds Precision |Storage Required  |
+			*   | ------------------------------------------------ |
+			*	|  0	    					|0 bytes		   |
+			*	|  1, 2						    |1 byte            |
+			*	|  3, 4						    |2 bytes           |
+			*	|  5, 6						    |3 bytes           |
+		    *   ====================================================
+	*/
+
+	typeLength := GetDataTypeLength(dataType)
+
+	extr := func(length int) int {
+		if length > 0 && length <= 2 {
+			return 1
+		} else if length > 2 && length <= 4 {
+			return 2
+		} else if length > 4 && length <= 6 || length > 6 {
+			return 3
+		}
+		return 0
+	}
+
+	switch strings.ToLower(GetDataTypeBase(dataType)) {
+	case "year":
+		return 1
+	case "date":
+		return 3
+	case "time":
+		if version < 564 {
+			return 3
+		}
+		// 3 bytes + fractional seconds storage
+		return 3 + extr(typeLength[0])
+	case "datetime":
+		if version < 564 {
+			return 8
+		}
+		// 5 bytes + fractional seconds storage
+		return 5 + extr(typeLength[0])
+	case "timestamp":
+		if version < 564 {
+			return 4
+		}
+		// 4 bytes + fractional seconds storage
+		return 4 + extr(typeLength[0])
+	default:
+		return 8
+	}
+}
+
+// SHOW CHARACTER SET
+
+// CharSets character bytes per charcharacter bytes per char
+var CharSets = map[string]int{
+	"armscii8": 1,
+	"ascii":    1,
+	"big5":     2,
+	"binary":   1,
+	"cp1250":   1,
+	"cp1251":   1,
+	"cp1256":   1,
+	"cp1257":   1,
+	"cp850":    1,
+	"cp852":    1,
+	"cp866":    1,
+	"cp932":    2,
+	"dec8":     1,
+	"eucjpms":  3,
+	"euckr":    2,
+	"gb18030":  4,
+	"gb2312":   2,
+	"gbk":      2,
+	"geostd8":  1,
+	"greek":    1,
+	"hebrew":   1,
+	"hp8":      1,
+	"keybcs2":  1,
+	"koi8r":    1,
+	"koi8u":    1,
+	"latin1":   1,
+	"latin2":   1,
+	"latin5":   1,
+	"latin7":   1,
+	"macce":    1,
+	"macroman": 1,
+	"sjis":     2,
+	"swe7":     1,
+	"tis620":   1,
+	"ucs2":     2,
+	"ujis":     3,
+	"utf16":    4,
+	"utf16le":  4,
+	"utf32":    4,
+	"utf8":     3,
+	"utf8mb4":  4,
+}
+
+// StringStorageReq String Type Storage Requirements return bytes count
+func StringStorageReq(dataType string, charset string) int {
+	// get bytes per character, default 1
+	bysPerChar := 1
+	if _, ok := CharSets[strings.ToLower(charset)]; ok {
+		bysPerChar = CharSets[strings.ToLower(charset)]
+	}
+
+	// get length
+	typeLength := GetDataTypeLength(dataType)
+	if typeLength[0] == -1 {
+		return 0
+	}
+
+	// get type
+	baseType := strings.ToLower(GetDataTypeBase(dataType))
+
+	switch baseType {
+	case "char":
+		// Otherwise, M × w bytes, <= M <= 255,
+		// where w is the number of bytes required for the maximum-length character in the character set.
+		if typeLength[0] > 255 {
+			typeLength[0] = 255
+		}
+		return typeLength[0] * bysPerChar
+	case "binary":
+		// M bytes, 0 <= M <= 255
+		if typeLength[0] > 255 {
+			typeLength[0] = 255
+		}
+		return typeLength[0]
+	case "varchar", "varbinary":
+		if typeLength[0] < 255 {
+			return typeLength[0]*bysPerChar + 1
+		}
+		return typeLength[0]*bysPerChar + 2
+
+	case "enum":
+		// 1 or 2 bytes, depending on the number of enumeration values (65,535 values maximum)
+		return 2
+	case "set":
+		// 1, 2, 3, 4, or 8 bytes, depending on the number of set members (64 members maximum)
+		return 8
+	default:
+		return 0
+	}
+}
diff --git a/common/meta_test.go b/common/meta_test.go
new file mode 100644
index 00000000..e1114ed4
--- /dev/null
+++ b/common/meta_test.go
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2018 Xiaomi, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package common
+
+import (
+	"testing"
+)
+
+func TestGetDataTypeLength(t *testing.T) {
+	typeList := map[string][]int{
+		"varchar(20)":  {20},
+		"int(2)":       {2},
+		"int(2000000)": {2000000},
+		"DECIMAL(1,2)": {1, 2},
+		"int":          {-1},
+	}
+
+	for typ, want := range typeList {
+		got := GetDataTypeLength(typ)
+		for i := 0; i < len(want); i++ {
+			if want[i] != got[i] {
+				t.Errorf("Not match, want %v, got %v", want, got)
+			}
+		}
+	}
+
+}
+
+func TestGetDataTypeBase(t *testing.T) {
+	typeList := map[string]string{
+		"varchar(20)":  "varchar",
+		"int(2)":       "int",
+		"int(2000000)": "int",
+	}
+
+	for typ := range typeList {
+		if got := GetDataTypeBase(typ); got != typeList[typ] {
+			t.Errorf("Not match, want %s, got %s", typeList[typ], got)
+		}
+	}
+
+}
+
+func TestGetDataBytes(t *testing.T) {
+	cols564 := map[*Column]int{
+		// numeric type
+		{Name: "col000", DataType: "tinyint", Character: "utf8"}:        1,
+		{Name: "col001", DataType: "SMALLINT", Character: "utf8"}:       2,
+		{Name: "col002", DataType: "MEDIUMINT", Character: "utf8"}:      3,
+		{Name: "col003", DataType: "int(32)", Character: "utf8"}:        4,
+		{Name: "col004", DataType: "integer(32)", Character: "utf8"}:    4,
+		{Name: "col005", DataType: "bigint(10)", Character: "utf8"}:     8,
+		{Name: "col006", DataType: "float(12)", Character: "utf8"}:      4,
+		{Name: "col007", DataType: "float(50)", Character: "utf8"}:      8,
+		{Name: "col008", DataType: "float(100)", Character: "utf8"}:     8,
+		{Name: "col009", DataType: "float", Character: "utf8"}:          4,
+		{Name: "col010", DataType: "double", Character: "utf8"}:         8,
+		{Name: "col011", DataType: "real", Character: "utf8"}:           8,
+		{Name: "col012", DataType: "BIT(32)", Character: "utf8"}:        4,
+		{Name: "col013", DataType: "numeric(32,32)", Character: "utf8"}: 30,
+		{Name: "col013", DataType: "decimal(2,32)", Character: "utf8"}:  16,
+		{Name: "col014", DataType: "BIT(32)", Character: "utf8"}:        4,
+
+		// date & time
+		{Name: "col015", DataType: "year(32)", Character: "utf8mb4"}:      1,
+		{Name: "col016", DataType: "date", Character: "utf8mb4"}:          3,
+		{Name: "col017", DataType: "time", Character: "utf8mb4"}:          3,
+		{Name: "col018", DataType: "time(0)", Character: "utf8mb4"}:       3,
+		{Name: "col019", DataType: "time(2)", Character: "utf8mb4"}:       4,
+		{Name: "col020", DataType: "time(4)", Character: "utf8mb4"}:       5,
+		{Name: "col021", DataType: "time(6)", Character: "utf8mb4"}:       6,
+		{Name: "col022", DataType: "datetime", Character: "utf8mb4"}:      5,
+		{Name: "col023", DataType: "timestamp(32)", Character: "utf8mb4"}: 7,
+
+		// string
+		{Name: "col024", DataType: "varchar(255)", Character: "utf8"}:    767,
+		{Name: "col025", DataType: "varchar(191)", Character: "utf8mb4"}: 765,
+	}
+
+	for col, bytes := range cols564 {
+		if got := col.GetDataBytes(564); got != bytes {
+			t.Errorf("Version 564, %s Not match, want %d, got %d", col.Name, bytes, got)
+		}
+	}
+
+	cols550 := map[*Column]int{
+		// numeric type
+		{Name: "col000", DataType: "tinyint", Character: "utf8"}:        1,
+		{Name: "col001", DataType: "SMALLINT", Character: "utf8"}:       2,
+		{Name: "col002", DataType: "MEDIUMINT", Character: "utf8"}:      3,
+		{Name: "col003", DataType: "int(32)", Character: "utf8"}:        4,
+		{Name: "col004", DataType: "integer(32)", Character: "utf8"}:    4,
+		{Name: "col005", DataType: "bigint(10)", Character: "utf8"}:     8,
+		{Name: "col006", DataType: "float(12)", Character: "utf8"}:      4,
+		{Name: "col007", DataType: "float(50)", Character: "utf8"}:      8,
+		{Name: "col008", DataType: "float(100)", Character: "utf8"}:     8,
+		{Name: "col009", DataType: "float", Character: "utf8"}:          4,
+		{Name: "col010", DataType: "double", Character: "utf8"}:         8,
+		{Name: "col011", DataType: "real", Character: "utf8"}:           8,
+		{Name: "col012", DataType: "BIT(32)", Character: "utf8"}:        4,
+		{Name: "col013", DataType: "numeric(32,32)", Character: "utf8"}: 30,
+		{Name: "col013", DataType: "decimal(2,32)", Character: "utf8"}:  16,
+		{Name: "col014", DataType: "BIT(32)", Character: "utf8"}:        4,
+
+		// date & time
+		{Name: "col015", DataType: "year(32)", Character: "utf8mb4"}:      1,
+		{Name: "col016", DataType: "date", Character: "utf8mb4"}:          3,
+		{Name: "col017", DataType: "time", Character: "utf8mb4"}:          3,
+		{Name: "col018", DataType: "time(0)", Character: "utf8mb4"}:       3,
+		{Name: "col019", DataType: "time(2)", Character: "utf8mb4"}:       3,
+		{Name: "col020", DataType: "time(4)", Character: "utf8mb4"}:       3,
+		{Name: "col021", DataType: "time(6)", Character: "utf8mb4"}:       3,
+		{Name: "col022", DataType: "datetime", Character: "utf8mb4"}:      8,
+		{Name: "col023", DataType: "timestamp(32)", Character: "utf8mb4"}: 4,
+
+		// string
+		{Name: "col024", DataType: "varchar(255)", Character: "utf8"}:    767,
+		{Name: "col025", DataType: "varchar(191)", Character: "utf8mb4"}: 765,
+	}
+
+	for col, bytes := range cols550 {
+		if got := col.GetDataBytes(550); got != bytes {
+			t.Errorf("Version: 550, %s Not match, want %d, got %d", col.Name, bytes, got)
+		}
+	}
+}
diff --git a/common/testdata/TestListReportTypes.golden b/common/testdata/TestListReportTypes.golden
new file mode 100644
index 00000000..86c81bb6
--- /dev/null
+++ b/common/testdata/TestListReportTypes.golden
@@ -0,0 +1,133 @@
+# 支持的报告类型
+
+[toc]
+
+## lint
+* **Description**:参考sqlint格式,以插件形式集成到代码编辑器,显示输出更加友好
+
+* **Example**:
+
+```bash
+soar -report-type lint -query test.sql
+```
+## markdown
+* **Description**:该格式为默认输出格式,以markdown格式展现,可以用网页浏览器插件直接打开,也可以用markdown编辑器打开
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar
+```
+## rewrite
+* **Description**:SQL重写功能,配合-rewrite-rules参数一起使用,可以通过-list-rewrite-rules查看所有支持的SQL重写规则
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -rewrite-rules star2columns,delimiter -report-type rewrite
+```
+## ast
+* **Description**:输出SQL的抽象语法树,主要用于测试
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type ast
+```
+## tiast
+* **Description**:输出SQL的TiDB抽象语法树,主要用于测试
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type tiast
+```
+## fingerprint
+* **Description**:输出SQL的指纹
+
+* **Example**:
+
+```bash
+echo "select * from film where language_id=1" | soar -report-type fingerprint
+```
+## md2html
+* **Description**:markdown格式转html格式小工具
+
+* **Example**:
+
+```bash
+soar -list-heuristic-rules | soar -report-type md2html > heuristic_rules.html
+```
+## explain-digest
+* **Description**:输入为EXPLAIN的表格,JSON或Vertical格式,对其进行分析,给出分析结果
+
+* **Example**:
+
+```bash
+soar -report-type explain-digest << EOF
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+| id | select_type | table | type | possible_keys | key  | key_len | ref  | rows | Extra |
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+|  1 | SIMPLE      | film  | ALL  | NULL          | NULL | NULL    | NULL | 1131 |       |
++----+-------------+-------+------+---------------+------+---------+------+------+-------+
+EOF
+```
+## duplicate-key-checker
+* **Description**:对OnlineDsn中指定的DB进行索引重复检查
+
+* **Example**:
+
+```bash
+soar -report-type duplicate-key-checker -online-dsn user:passwd@127.0.0.1:3306/db
+```
+## html
+* **Description**:以HTML格式输出报表
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type html
+```
+## json
+* **Description**:输出JSON格式报表,方便应用程序处理
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type json
+```
+## tokenize
+* **Description**:对SQL进行切词,主要用于测试
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type tokenize
+```
+## compress
+* **Description**:SQL压缩小工具,使用内置SQL压缩逻辑,测试中的功能
+
+* **Example**:
+
+```bash
+echo "select
+*
+from
+  film" | soar -report-type compress
+```
+## pretty
+* **Description**:使用kr/pretty打印报告,主要用于测试
+
+* **Example**:
+
+```bash
+echo "select * from film" | soar -report-type pretty
+```
+## remove-comment
+* **Description**:去除SQL语句中的注释,支持单行多行注释的去除
+
+* **Example**:
+
+```bash
+echo "select/*comment*/ * from film" | soar -report-type remove-comment
+```
diff --git a/common/testdata/TestMarkdown2Html.golden b/common/testdata/TestMarkdown2Html.golden
new file mode 100644
index 00000000..8e3b056b
--- /dev/null
+++ b/common/testdata/TestMarkdown2Html.golden
@@ -0,0 +1,374 @@
+

Markdown For Typora

+ +

Overview

+ +

Markdown is created by Daring Fireball, the original guideline is here. Its syntax, however, varies between different parsers or editors. Typora is using GitHub Flavored Markdown.

+ +

Please note that HTML fragments in markdown source will be recognized but not parsed or rendered. Also, there may be small reformatting on the original markdown source code after saving.

+ +

Outline

+ +

[TOC]

+ +

Block Elements

+ +

Paragraph and line breaks

+ +

A paragraph is simply one or more consecutive lines of text. In markdown source code, paragraphs are separated by more than one blank lines. In Typora, you only need to press Return to create a new paragraph.

+ +

Press Shift + Return to create a single line break. However, most markdown parser will ignore single line break, to make other markdown parsers recognize your line break, you can leave two whitespace at the end of the line, or insert <br/>.

+ +

Headers

+ +

Headers use 1-6 hash characters at the start of the line, corresponding to header levels 1-6. For example:

+ +
# This is an H1
+
+## This is an H2
+
+###### This is an H6
+
+ +

In typora, input ‘#’s followed by title content, and press Return key will create a header.

+ +

Blockquotes

+ +

Markdown uses email-style > characters for block quoting. They are presented as:

+ +
> This is a blockquote with two paragraphs. This is first paragraph.
+>
+> This is second pragraph.Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.
+
+
+
+> This is another blockquote with one paragraph. There is three empty line to seperate two blockquote.
+
+ +

In typora, just input ‘>’ followed by quote contents a block quote is generated. Typora will insert proper ‘>’ or line break for you. Block quote inside anther block quote is allowed by adding additional levels of ‘>’.

+ +

Lists

+ +

Input * list item 1 will create an un-ordered list, the * symbol can be replace with + or -.

+ +

Input 1. list item 1 will create an ordered list, their markdown source code is like:

+ +
## un-ordered list
+*   Red
+*   Green
+*   Blue
+
+## ordered list
+1.  Red
+2. 	Green
+3.	Blue
+
+ +

Task List

+ +

Task lists are lists with items marked as either [ ] or x. For example:

+ +
- [ ] a task list item
+- [ ] list syntax required
+- [ ] normal **formatting**, @mentions, #1234 refs
+- [ ] incomplete
+- [x] completed
+
+ +

You can change the complete/incomplete state by click the checkbox before the item.

+ +

(Fenced) Code Blocks

+ +

Typora only support fences in Github Flavored Markdown. Original code blocks in markdown is not supported.

+ +

Using fences is easy: Input ``` and press return. Add an optional language identifier after ``` and we'll run it through syntax highlighting:

+ +
Here's an example:
+
+​```
+function test() {
+  console.log("notice the blank line before this function?");
+}
+​```
+
+syntax highlighting:
+​```ruby
+require 'redcarpet'
+markdown = Redcarpet.new("Hello World!")
+puts markdown.to_html
+​```
+
+ +

Math Blocks

+ +

You can render LaTeX mathematical expressions using MathJax.

+ +

Input $$, then press 'Return' key will trigger an input field which accept Tex/LaTex source. Following is an example: +$$ +\mathbf{V}1 \times \mathbf{V}2 = \begin{vmatrix} +\mathbf{i} & \mathbf{j} & \mathbf{k} \ +\frac{\partial X}{\partial u} & \frac{\partial Y}{\partial u} & 0 \ +\frac{\partial X}{\partial v} & \frac{\partial Y}{\partial v} & 0 \ +\end{vmatrix} +$$

+ +

In markdown source file, math block is LaTeX expression wrapped by ‘$$’ mark:

+ +
$$
+\mathbf{V}_1 \times \mathbf{V}_2 =  \begin{vmatrix} 
+\mathbf{i} & \mathbf{j} & \mathbf{k} \\
+\frac{\partial X}{\partial u} &  \frac{\partial Y}{\partial u} & 0 \\
+\frac{\partial X}{\partial v} &  \frac{\partial Y}{\partial v} & 0 \\
+\end{vmatrix}
+$$
+
+ +

Tables

+ +

Input | First Header | Second Header | and press return key will create a table with two column.

+ +

After table is created, focus on that table will pop up a toolbar for table, where you can resize, align, or delete table. You can also use context menu to copy and add/delete column/row.

+ +

Following descriptions can be skipped, as markdown source code for tables are generated by typora automatically.

+ +

In markdown source code, they look like:

+ +
| First Header  | Second Header |
+| ------------- | ------------- |
+| Content Cell  | Content Cell  |
+| Content Cell  | Content Cell  |
+
+ +

You can also include inline Markdown such as links, bold, italics, or strikethrough.

+ +

Finally, by including colons : within the header row, you can define text to be left-aligned, right-aligned, or center-aligned:

+ +
| Left-Aligned  | Center Aligned  | Right Aligned |
+| :------------ |:---------------:| -----:|
+| col 3 is      | some wordy text | $1600 |
+| col 2 is      | centered        |   $12 |
+| zebra stripes | are neat        |    $1 |
+
+ +

A colon on the left-most side indicates a left-aligned column; a colon on the right-most side indicates a right-aligned column; a colon on both sides indicates a center-aligned column.

+ +

Footnotes

+ +
You can create footnotes like this[^footnote].
+
+[^footnote]: Here is the *text* of the **footnote**.
+
+ +

will produce:

+ +

You can create footnotes like this[^footnote].

+ +

[^footnote]: Here is the text of the footnote.

+ +

Mouse on the ‘footnote’ superscript to see content of the footnote.

+ +

Horizontal Rules

+ +

Input *** or --- on a blank line and press return will draw a horizontal line.

+ +
+ +

YAML Front Matter

+ +

Typora support YAML Front Matter now. Input --- at the top of the article and then press Enter will introduce one. Or insert one metadata block from the menu.

+ +

Table of Contents (TOC)

+ +

Input [toc] then press Return key will create a section for “Table of Contents” extracting all headers from one’s writing, its contents will be updated automatically.

+ +

Diagrams (Sequence, Flowchart and Mermaid)

+ +

Typora supports, sequence, flowchart and mermaid, after this feature is enabled from preference panel.

+ +

See this document for detail.

+ +

Span Elements

+ +

Span elements will be parsed and rendered right after your typing. Moving cursor in middle of those span elements will expand those elements into markdown source. Following will explain the syntax of those span element.

+ +

Links

+ +

Markdown supports two style of links: inline and reference.

+ +

In both styles, the link text is delimited by [square brackets].

+ +

To create an inline link, use a set of regular parentheses immediately after the link text’s closing square bracket. Inside the parentheses, put the URL where you want the link to point, along with an optional title for the link, surrounded in quotes. For example:

+ +
This is [an example](http://example.com/ "Title") inline link.
+
+[This link](http://example.net/) has no title attribute.
+
+ +

will produce:

+ +

This is an example inline link. (<p>This is <a href="http://example.com/" title="Title">)

+ +

This link has no title attribute. (<p><a href="http://example.net/">This link</a> has no)

+ +

Internal Links

+ +

You can set the href to headers, which will create a bookmark that allow you to jump to that section after clicking. For example:

+ +

Command(on Windows: Ctrl) + Click This link will jump to header Block Elements. To see how to write that, please move cursor or click that link with key pressed to expand the element into markdown source.

+ +

Reference Links

+ +

Reference-style links use a second set of square brackets, inside which you place a label of your choosing to identify the link:

+ +
This is [an example][id] reference-style link.
+
+Then, anywhere in the document, you define your link label like this, on a line by itself:
+
+[id]: http://example.com/  "Optional Title Here"
+
+ +

In typora, they will be rendered like:

+ +

This is an example reference-style link.

+ +

The implicit link name shortcut allows you to omit the name of the link, in which case the link text itself is used as the name. Just use an empty set of square brackets — e.g., to link the word “Google” to the google.com web site, you could simply write:

+ +
[Google][]
+And then define the link:
+
+[Google]: http://google.com/
+
+ +

In typora click link will expand it for editing, command+click will open the hyperlink in web browser.

+ +

URLs

+ +

Typora allows you to insert urls as links, wrapped by <brackets>.

+ +

<i@typora.io> becomes i@typora.io.

+ +

Typora will aslo auto link standard URLs. e.g: www.google.com.

+ +

Images

+ +

Image looks similar with links, but it requires an additional ! char before the start of link. Image syntax looks like this:

+ +
![Alt text](/path/to/img.jpg)
+
+![Alt text](/path/to/img.jpg "Optional title")
+
+ +

You are able to use drag & drop to insert image from image file or we browser. And modify the markdown source code by clicking on the image. Relative path will be used if image is in same directory or sub-directory with current editing document when drag & drop.

+ +

For more tips on images, please read http://support.typora.io//Images/

+ +

Emphasis

+ +

Markdown treats asterisks (*) and underscores (_) as indicators of emphasis. Text wrapped with one * or _ will be wrapped with an HTML <em> tag. E.g:

+ +
*single asterisks*
+
+_single underscores_
+
+ +

output:

+ +

single asterisks

+ +

single underscores

+ +

GFM will ignores underscores in words, which is commonly used in code and names, like this:

+ +
+

wowgreatstuff

+ +

dothisanddothatandanother_thing.

+
+ +

To produce a literal asterisk or underscore at a position where it would otherwise be used as an emphasis delimiter, you can backslash escape it:

+ +
\*this text is surrounded by literal asterisks\*
+
+ +

Typora recommends to use * symbol.

+ +

Strong

+ +

double *’s or _’s will be wrapped with an HTML <strong> tag, e.g:

+ +
**double asterisks**
+
+__double underscores__
+
+ +

output:

+ +

double asterisks

+ +

double underscores

+ +

Typora recommends to use ** symbol.

+ +

Code

+ +

To indicate a span of code, wrap it with backtick quotes (`). Unlike a pre-formatted code block, a code span indicates code within a normal paragraph. For example:

+ +
Use the `printf()` function.
+
+ +

will produce:

+ +

Use the printf() function.

+ +

Strikethrough

+ +

GFM adds syntax to create strikethrough text, which is missing from standard Markdown.

+ +

~~Mistaken text.~~ becomes Mistaken text.

+ +

Underline

+ +

Underline is powered by raw HTML.

+ +

<u>Underline</u> becomes Underline.

+ +

Emoji :happy:

+ +

Input emoji with syntax :smile:.

+ +

User can trigger auto-complete suggestions for emoji by pressing ESC key, or trigger it automatically after enable it on preference panel. Also, input UTF8 emoji char directly from Edit -> Emoji & Symbols from menu bar is also supported.

+ +

HTML

+ +

Typora cannot render html fragments. But typora can parse and render very limited HTML fragments, as an extension of Markdown, including:

+ + + +

Most of their attributes, styles, or classes will be ignored. For other tags, typora will render them as raw HTML snippets.

+ +

But those HTML will be exported on print or export.

+ +

Inline Math

+ +

To use this feature, first, please enable it in Preference Panel -> Markdown Tab. Then use $ to wrap TeX command, for example: $\lim_{x \to \infty} \exp(-x) = 0$ will be rendered as LaTeX command.

+ +

To trigger inline preview for inline math: input “$”, then press ESC key, then input TeX command, a preview tooltip will be visible like below:

+ +

+ +

Subscript

+ +

To use this feature, first, please enable it in Preference Panel -> Markdown Tab. Then use ~ to wrap subscript content, for example: H~2~O, X~long\ text~/

+ +

Superscript

+ +

To use this feature, first, please enable it in Preference Panel -> Markdown Tab. Then use ^ to wrap superscript content, for example: X^2^.

+ +

Highlight

+ +

To use this feature, first, please enable it in Preference Panel -> Markdown Tab. Then use == to wrap superscript content, for example: ==highlight==.

+ diff --git a/common/testdata/TestMarkdown2Html.md b/common/testdata/TestMarkdown2Html.md new file mode 100644 index 00000000..654b7f3f --- /dev/null +++ b/common/testdata/TestMarkdown2Html.md @@ -0,0 +1,391 @@ +# Markdown For Typora + +## Overview + +**Markdown** is created by [Daring Fireball](http://daringfireball.net/), the original guideline is [here](http://daringfireball.net/projects/markdown/syntax). Its syntax, however, varies between different parsers or editors. **Typora** is using [GitHub Flavored Markdown][GFM]. + +Please note that HTML fragments in markdown source will be recognized but not parsed or rendered. Also, there may be small reformatting on the original markdown source code after saving. + +*Outline* + +[TOC] + +## Block Elements + +### Paragraph and line breaks + +A paragraph is simply one or more consecutive lines of text. In markdown source code, paragraphs are separated by more than one blank lines. In Typora, you only need to press `Return` to create a new paragraph. + +Press `Shift` + `Return` to create a single line break. However, most markdown parser will ignore single line break, to make other markdown parsers recognize your line break, you can leave two whitespace at the end of the line, or insert `
`. + +### Headers + +Headers use 1-6 hash characters at the start of the line, corresponding to header levels 1-6. For example: + +``` markdown +# This is an H1 + +## This is an H2 + +###### This is an H6 +``` + +In typora, input ‘#’s followed by title content, and press `Return` key will create a header. + +### Blockquotes + +Markdown uses email-style > characters for block quoting. They are presented as: + +``` markdown +> This is a blockquote with two paragraphs. This is first paragraph. +> +> This is second pragraph.Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. + + + +> This is another blockquote with one paragraph. There is three empty line to seperate two blockquote. +``` + +In typora, just input ‘>’ followed by quote contents a block quote is generated. Typora will insert proper ‘>’ or line break for you. Block quote inside anther block quote is allowed by adding additional levels of ‘>’. + +### Lists + +Input `* list item 1` will create an un-ordered list, the `*` symbol can be replace with `+` or `-`. + +Input `1. list item 1` will create an ordered list, their markdown source code is like: + +``` markdown +## un-ordered list +* Red +* Green +* Blue + +## ordered list +1. Red +2. Green +3. Blue +``` + +### Task List + +Task lists are lists with items marked as either [ ] or [x] (incomplete or complete). For example: + +``` markdown +- [ ] a task list item +- [ ] list syntax required +- [ ] normal **formatting**, @mentions, #1234 refs +- [ ] incomplete +- [x] completed +``` + +You can change the complete/incomplete state by click the checkbox before the item. + +### (Fenced) Code Blocks + +Typora only support fences in Github Flavored Markdown. Original code blocks in markdown is not supported. + +Using fences is easy: Input \`\`\` and press `return`. Add an optional language identifier after \`\`\` and we'll run it through syntax highlighting: + +``` gfm +Here's an example: + +​``` +function test() { + console.log("notice the blank line before this function?"); +} +​``` + +syntax highlighting: +​```ruby +require 'redcarpet' +markdown = Redcarpet.new("Hello World!") +puts markdown.to_html +​``` +``` + +### Math Blocks + +You can render *LaTeX* mathematical expressions using **MathJax**. + +Input `$$`, then press 'Return' key will trigger an input field which accept *Tex/LaTex* source. Following is an example: +$$ +\mathbf{V}_1 \times \mathbf{V}_2 = \begin{vmatrix} +\mathbf{i} & \mathbf{j} & \mathbf{k} \\ +\frac{\partial X}{\partial u} & \frac{\partial Y}{\partial u} & 0 \\ +\frac{\partial X}{\partial v} & \frac{\partial Y}{\partial v} & 0 \\ +\end{vmatrix} +$$ + + +In markdown source file, math block is *LaTeX* expression wrapped by ‘$$’ mark: + +``` markdown +$$ +\mathbf{V}_1 \times \mathbf{V}_2 = \begin{vmatrix} +\mathbf{i} & \mathbf{j} & \mathbf{k} \\ +\frac{\partial X}{\partial u} & \frac{\partial Y}{\partial u} & 0 \\ +\frac{\partial X}{\partial v} & \frac{\partial Y}{\partial v} & 0 \\ +\end{vmatrix} +$$ +``` + +### Tables + +Input `| First Header | Second Header |` and press `return` key will create a table with two column. + +After table is created, focus on that table will pop up a toolbar for table, where you can resize, align, or delete table. You can also use context menu to copy and add/delete column/row. + +Following descriptions can be skipped, as markdown source code for tables are generated by typora automatically. + +In markdown source code, they look like: + +``` markdown +| First Header | Second Header | +| ------------- | ------------- | +| Content Cell | Content Cell | +| Content Cell | Content Cell | +``` + +You can also include inline Markdown such as links, bold, italics, or strikethrough. + +Finally, by including colons : within the header row, you can define text to be left-aligned, right-aligned, or center-aligned: + +``` markdown +| Left-Aligned | Center Aligned | Right Aligned | +| :------------ |:---------------:| -----:| +| col 3 is | some wordy text | $1600 | +| col 2 is | centered | $12 | +| zebra stripes | are neat | $1 | +``` + +A colon on the left-most side indicates a left-aligned column; a colon on the right-most side indicates a right-aligned column; a colon on both sides indicates a center-aligned column. + +### Footnotes + +``` markdown +You can create footnotes like this[^footnote]. + +[^footnote]: Here is the *text* of the **footnote**. +``` + +will produce: + +You can create footnotes like this[^footnote]. + +[^footnote]: Here is the *text* of the **footnote**. + +Mouse on the ‘footnote’ superscript to see content of the footnote. + +### Horizontal Rules + +Input `***` or `---` on a blank line and press `return` will draw a horizontal line. + +------ + +### YAML Front Matter + +Typora support [YAML Front Matter](http://jekyllrb.com/docs/frontmatter/) now. Input `---` at the top of the article and then press `Enter` will introduce one. Or insert one metadata block from the menu. + +### Table of Contents (TOC) + +Input `[toc]` then press `Return` key will create a section for “Table of Contents” extracting all headers from one’s writing, its contents will be updated automatically. + +### Diagrams (Sequence, Flowchart and Mermaid) + +Typora supports, [sequence](https://bramp.github.io/js-sequence-diagrams/), [flowchart](http://flowchart.js.org/) and [mermaid](https://knsv.github.io/mermaid/#mermaid), after this feature is enabled from preference panel. + +See this [document](http://support.typora.io/Draw-Diagrams-With-Markdown/) for detail. + +## Span Elements + +Span elements will be parsed and rendered right after your typing. Moving cursor in middle of those span elements will expand those elements into markdown source. Following will explain the syntax of those span element. + +### Links + +Markdown supports two style of links: inline and reference. + +In both styles, the link text is delimited by [square brackets]. + +To create an inline link, use a set of regular parentheses immediately after the link text’s closing square bracket. Inside the parentheses, put the URL where you want the link to point, along with an optional title for the link, surrounded in quotes. For example: + +``` markdown +This is [an example](http://example.com/ "Title") inline link. + +[This link](http://example.net/) has no title attribute. +``` + +will produce: + +This is [an example](http://example.com/"Title") inline link. (`

This is `) + +[This link](http://example.net/) has no title attribute. (`

This link has no`) + +#### Internal Links + +**You can set the href to headers**, which will create a bookmark that allow you to jump to that section after clicking. For example: + +Command(on Windows: Ctrl) + Click [This link](#block-elements) will jump to header `Block Elements`. To see how to write that, please move cursor or click that link with `⌘` key pressed to expand the element into markdown source. + +#### Reference Links + +Reference-style links use a second set of square brackets, inside which you place a label of your choosing to identify the link: + +``` markdown +This is [an example][id] reference-style link. + +Then, anywhere in the document, you define your link label like this, on a line by itself: + +[id]: http://example.com/ "Optional Title Here" +``` + +In typora, they will be rendered like: + +This is [an example][id] reference-style link. + +[id]: http://example.com/ "Optional Title Here" + +The implicit link name shortcut allows you to omit the name of the link, in which case the link text itself is used as the name. Just use an empty set of square brackets — e.g., to link the word “Google” to the google.com web site, you could simply write: + +``` markdown +[Google][] +And then define the link: + +[Google]: http://google.com/ +``` + +In typora click link will expand it for editing, command+click will open the hyperlink in web browser. + +### URLs + +Typora allows you to insert urls as links, wrapped by `<`brackets`>`. + +`` becomes . + +Typora will aslo auto link standard URLs. e.g: www.google.com. + +### Images + +Image looks similar with links, but it requires an additional `!` char before the start of link. Image syntax looks like this: + +``` markdown +![Alt text](/path/to/img.jpg) + +![Alt text](/path/to/img.jpg "Optional title") +``` + +You are able to use drag & drop to insert image from image file or we browser. And modify the markdown source code by clicking on the image. Relative path will be used if image is in same directory or sub-directory with current editing document when drag & drop. + +For more tips on images, please read + +### Emphasis + +Markdown treats asterisks (`*`) and underscores (`_`) as indicators of emphasis. Text wrapped with one `*` or `_` will be wrapped with an HTML `` tag. E.g: + +``` markdown +*single asterisks* + +_single underscores_ +``` + +output: + +*single asterisks* + +_single underscores_ + +GFM will ignores underscores in words, which is commonly used in code and names, like this: + +> wow_great_stuff +> +> do_this_and_do_that_and_another_thing. + +To produce a literal asterisk or underscore at a position where it would otherwise be used as an emphasis delimiter, you can backslash escape it: + +``` markdown +\*this text is surrounded by literal asterisks\* +``` + +Typora recommends to use `*` symbol. + +### Strong + +double *’s or _’s will be wrapped with an HTML `` tag, e.g: + +``` markdown +**double asterisks** + +__double underscores__ +``` + +output: + +**double asterisks** + +__double underscores__ + +Typora recommends to use `**` symbol. + +### Code + +To indicate a span of code, wrap it with backtick quotes (`). Unlike a pre-formatted code block, a code span indicates code within a normal paragraph. For example: + +``` markdown +Use the `printf()` function. +``` + +will produce: + +Use the `printf()` function. + +### Strikethrough + +GFM adds syntax to create strikethrough text, which is missing from standard Markdown. + +`~~Mistaken text.~~` becomes ~~Mistaken text.~~ + +### Underline + +Underline is powered by raw HTML. + +`Underline` becomes Underline. + +### Emoji :happy: + +Input emoji with syntax `:smile:`. + +User can trigger auto-complete suggestions for emoji by pressing `ESC` key, or trigger it automatically after enable it on preference panel. Also, input UTF8 emoji char directly from `Edit` -> `Emoji & Symbols` from menu bar is also supported. + +### HTML + +Typora cannot render html fragments. But typora can parse and render very limited HTML fragments, as an extension of Markdown, including: + +- Underline: `underline` +- Image: `` (And `width`, `height` attribute in HTML tag, and `width`, `height`, `zoom` style in `style` attribute will be applied.) +- Comments: `` +- Hyperlink: `link`. + +Most of their attributes, styles, or classes will be ignored. For other tags, typora will render them as raw HTML snippets. + +But those HTML will be exported on print or export. + +### Inline Math + +To use this feature, first, please enable it in `Preference` Panel -> `Markdown` Tab. Then use `$` to wrap TeX command, for example: `$\lim_{x \to \infty} \exp(-x) = 0$` will be rendered as LaTeX command. + +To trigger inline preview for inline math: input “$”, then press `ESC` key, then input TeX command, a preview tooltip will be visible like below: + + + +### Subscript + +To use this feature, first, please enable it in `Preference` Panel -> `Markdown` Tab. Then use `~` to wrap subscript content, for example: `H~2~O`, `X~long\ text~`/ + +### Superscript + +To use this feature, first, please enable it in `Preference` Panel -> `Markdown` Tab. Then use `^` to wrap superscript content, for example: `X^2^`. + +### Highlight + +To use this feature, first, please enable it in `Preference` Panel -> `Markdown` Tab. Then use `==` to wrap superscript content, for example: `==highlight==`. + +[GFM]: https://help.github.com/articles/github-flavored-markdown/ diff --git a/common/testdata/TestMarkdownHTMLHeader.golden b/common/testdata/TestMarkdownHTMLHeader.golden new file mode 100644 index 00000000..e69de29b diff --git a/common/testdata/TestParseDSN.golden b/common/testdata/TestParseDSN.golden new file mode 100644 index 00000000..af08f5e1 --- /dev/null +++ b/common/testdata/TestParseDSN.golden @@ -0,0 +1,15 @@ +&common.dsn{Addr:"", Schema:"", User:"", Password:"", Charset:"", Disable:true, Version:0} +&common.dsn{Addr:"hostname:3307", Schema:"database", User:"user", Password:"password", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3307", Schema:"information_schema", User:"user", Password:"password", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3306", Schema:"database", User:"user", Password:"password", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3307", Schema:"database", User:"user", Password:"password", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3306", Schema:"information_schema", User:"user", Password:"password", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3307", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3307", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3306", Schema:"information_schema", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3306", Schema:"information_schema", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3306", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"hostname:3307", Schema:"information_schema", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3307", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3307", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} +&common.dsn{Addr:"127.0.0.1:3306", Schema:"database", User:"", Password:"", Charset:"utf8mb4", Disable:false, Version:999} diff --git a/common/tricks.go b/common/tricks.go new file mode 100644 index 00000000..2a462482 --- /dev/null +++ b/common/tricks.go @@ -0,0 +1,109 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" +) + +// GoldenDiff 从gofmt学来的测试方法 +// https://medium.com/soon-london/testing-with-golden-files-in-go-7fccc71c43d3 +func GoldenDiff(f func(), name string, update *bool) error { + var b bytes.Buffer + w := bufio.NewWriter(&b) + str := captureOutput(f) + _, err := w.WriteString(str) + if err != nil { + Log.Warning(err.Error()) + } + err = w.Flush() + if err != nil { + Log.Warning(err.Error()) + } + + gp := filepath.Join("testdata", name+".golden") + if *update { + if err = ioutil.WriteFile(gp, b.Bytes(), 0644); err != nil { + err = fmt.Errorf("%s failed to update golden file: %s", name, err) + return err + } + } + g, err := ioutil.ReadFile(gp) + if err != nil { + err = fmt.Errorf("%s failed reading .golden: %s", name, err) + } + if !bytes.Equal(b.Bytes(), g) { + err = fmt.Errorf("%s does not match .golden file", name) + } + return err +} + +// captureOutput 获取函数标准输出 +func captureOutput(f func()) string { + // keep backup of the real stdout + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + // execute function + f() + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + _, err := io.Copy(&buf, r) + if err != nil { + Log.Warning(err.Error()) + } + outC <- buf.String() + }() + + // back to normal state + err := w.Close() + if err != nil { + Log.Warning(err.Error()) + } + os.Stdout = oldStdout // restoring the real stdout + out := <-outC + os.Stdout = oldStdout + return out +} + +// SortedKey sort map[string]interface{}, use in range clause +func SortedKey(m interface{}) []string { + var keys []string + switch reflect.TypeOf(m).Kind() { + case reflect.Map: + switch reflect.TypeOf(m).Key().Kind() { + case reflect.String: + for _, k := range reflect.ValueOf(m).MapKeys() { + keys = append(keys, k.String()) + } + } + } + sort.Strings(keys) + return keys +} diff --git a/database/doc.go b/database/doc.go new file mode 100644 index 00000000..9deb6a09 --- /dev/null +++ b/database/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package database will take cover of communicate with mysql database. +package database diff --git a/database/explain.go b/database/explain.go new file mode 100644 index 00000000..ecaed35f --- /dev/null +++ b/database/explain.go @@ -0,0 +1,1069 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + + "github.com/tidwall/gjson" + "vitess.io/vitess/go/vt/sqlparser" +) + +// format_type 支持的输出格式 +// https://dev.mysql.com/doc/refman/5.7/en/explain-output.html +const ( + TraditionalFormatExplain = iota // 默认输出 + JSONFormatExplain // JSON格式输出 +) + +// ExplainFormatType EXPLAIN支持的FORMAT_TYPE +var ExplainFormatType = map[string]int{ + "traditional": 0, + "json": 1, +} + +// explain_type +const ( + TraditionalExplainType = iota // 默认转出 + ExtendedExplainType // EXTENDED输出 + PartitionsExplainType // PARTITIONS输出 +) + +// ExplainType EXPLAIN命令支持的参数 +var ExplainType = map[string]int{ + "traditional": 0, + "extended": 1, + "partitions": 2, +} + +// 为TraditionalFormatExplain准备的结构体 { start + +// ExplainInfo 用于存放Explain信息 +type ExplainInfo struct { + SQL string + ExplainFormat int + ExplainRows []*ExplainRow + ExplainJSON *ExplainJSON + Warnings []*ExplainWarning + QueryCost float64 +} + +// ExplainRow 单行Explain +type ExplainRow struct { + ID int + SelectType string + TableName string + Partitions string // explain partitions + AccessType string + PossibleKeys []string + Key string + KeyLen string // 索引长度,如果发生了index_merge, KeyLen格式为N,N,所以不能定义为整型 + Ref []string + Rows int + Filtered float64 // 5.6 JSON, 5.7+, 5.5 EXTENDED + Scalability string // O(1), O(n), O(log n), O(log n)+ + Extra string +} + +// ExplainWarning explain extended后SHOW WARNINGS输出的结果 +type ExplainWarning struct { + Level string + Code int + Message string +} + +// 为TraditionalFormatExplain准备的结构体 end } + +// 为JSONFormatExplain准备的结构体 { start + +// ExplainJSONCostInfo JSON +type ExplainJSONCostInfo struct { + ReadCost string `json:"read_cost"` + EvalCost string `json:"eval_cost"` + PrefixCost string `json:"prefix_cost"` + DataReadPerJoin string `json:"data_read_per_join"` + QueryCost string `json:"query_cost"` + SortCost string `json:"sort_cost"` +} + +// ExplainJSONMaterializedFromSubquery JSON +type ExplainJSONMaterializedFromSubquery struct { + UsingTemporaryTable bool `json:"using_temporary_table"` + Dependent bool `json:"dependent"` + Cacheable bool `json:"cacheable"` + QueryBlock *ExplainJSONQueryBlock `json:"query_block"` +} + +// 该变量用于存放JSON到Traditional模式的所有ExplainJSONTable +var explainJSONTables []*ExplainJSONTable + +// ExplainJSONTable JSON +type ExplainJSONTable struct { + TableName string `json:"table_name"` + AccessType string `json:"access_type"` + PossibleKeys []string `json:"possible_keys"` + Key string `json:"key"` + UsedKeyParts []string `json:"used_key_parts"` + KeyLength string `json:"key_length"` + Ref []string `json:"ref"` + RowsExaminedPerScan int `json:"rows_examined_per_scan"` + RowsProducedPerJoin int `json:"rows_produced_per_join"` + Filtered string `json:"filtered"` + UsingIndex bool `json:"using_index"` + UsingIndexForGroupBy bool `json:"using_index_for_group_by"` + CostInfo ExplainJSONCostInfo `json:"cost_info"` + UsedColumns []string `json:"used_columns"` + AttachedCondition string `json:"attached_condition"` + AttachedSubqueries []ExplainJSONSubqueries `json:"attached_subqueries"` + MaterializedFromSubquery ExplainJSONMaterializedFromSubquery `json:"materialized_from_subquery"` +} + +// ExplainJSONNestedLoop JSON +type ExplainJSONNestedLoop struct { + Table ExplainJSONTable `json:"table"` +} + +// ExplainJSONBufferResult JSON +type ExplainJSONBufferResult struct { + UsingTemporaryTable bool `json:"using_temporary_table"` + NestedLoop []ExplainJSONNestedLoop `json:"nested_loop"` +} + +// ExplainJSONSubqueries JSON +type ExplainJSONSubqueries struct { + Dependent bool `json:"dependent"` + Cacheable bool `json:"cacheable"` + QueryBlock ExplainJSONQueryBlock `json:"query_block"` +} + +// ExplainJSONGroupingOperation JSON +type ExplainJSONGroupingOperation struct { + UsingTemporaryTable bool `json:"using_temporary_table"` + UsingFilesort bool `json:"using_filesort"` + Table ExplainJSONTable `json:"table"` + CostInfo ExplainJSONCostInfo `json:"cost_info"` + NestedLoop []ExplainJSONNestedLoop `json:"nested_loop"` + GroupBySubqueries []ExplainJSONSubqueries `json:"group_by_subqueries"` +} + +// ExplainJSONDuplicatesRemoval JSON +type ExplainJSONDuplicatesRemoval struct { + UsingTemporaryTable bool `json:"using_temporary_table"` + UsingFilesort bool `json:"using_filesort"` + BufferResult ExplainJSONBufferResult `json:"buffer_result"` + GroupingOperation ExplainJSONGroupingOperation `json:"grouping_operation"` +} + +// ExplainJSONOrderingOperation JSON +type ExplainJSONOrderingOperation struct { + UsingFilesort bool `json:"using_filesort"` + Table ExplainJSONTable `json:"table"` + DuplicatesRemoval ExplainJSONDuplicatesRemoval `json:"duplicates_removal"` + GroupingOperation ExplainJSONGroupingOperation `json:"grouping_operation"` + OderbySubqueries []ExplainJSONSubqueries `json:"order_by_subqueries"` +} + +// ExplainJSONQueryBlock JSON +type ExplainJSONQueryBlock struct { + SelectID int `json:"select_id"` + CostInfo ExplainJSONCostInfo `json:"cost_info"` + Table ExplainJSONTable `json:"table"` + NestedLoop []ExplainJSONNestedLoop `json:"nested_loop"` + OrderingOperation ExplainJSONOrderingOperation `json:"ordering_operation"` + GroupingOperation ExplainJSONGroupingOperation `json:"grouping_operation"` + OptimizedAwaySubqueries []ExplainJSONSubqueries `json:"optimized_away_subqueries"` + HavingSubqueries []ExplainJSONSubqueries `json:"having_subqueries"` + SelectListSubqueries []ExplainJSONSubqueries `json:"select_list_subqueries"` + UpdateValueSubqueries []ExplainJSONSubqueries `json:"update_value_subqueries"` + QuerySpecifications []ExplainJSONSubqueries `json:"query_specifications"` + UnionResult ExplainJSONUnionResult `json:"union_result"` + Message string `json:"message"` +} + +// ExplainJSONUnionResult JSON +type ExplainJSONUnionResult struct { + UsingTemporaryTable bool `json:"using_temporary_table"` + TableName string `json:"table_name"` + AccessType string `json:"access_type"` + QuerySpecifications []ExplainJSONSubqueries `json:"query_specifications"` +} + +// ExplainJSON 根结点 +type ExplainJSON struct { + QueryBlock ExplainJSONQueryBlock `json:"query_block"` +} + +// 为JSONFormatExplain准备的结构体 end } + +// ExplainKeyWords 需要解释的关键字 +var ExplainKeyWords = []string{ + "access_type", + "attached_condition", + "attached_subqueries", + "buffer_result", + "cacheable", + "cost_info", + "data_read_per_join", + "dependent", + "duplicates_removal", + "eval_cost", + "filtered", + "group_by_subqueries", + "grouping_operation", + "having_subqueries", + "key", + "key_length", + "materialized_from_subquery", + "message", + "nested_loop", + "optimized_away_subqueries", + "order_by_subqueries", + "ordering_operation", + "possible_keys", + "prefix_cost", + "query_block", + "query_cost", + "query_specifications", + "read_cost", + "ref", + "rows_examined_per_scan", + "rows_produced_per_join", + "select_id", + "select_list_subqueries", + "sort_cost", + "table", + "table_name", + "union_result", + "update_value_subqueries", + "used_columns", + "used_key_parts", + "using_filesort", + "using_index", + "using_index_for_group_by", + "using_temporary_table", +} + +// ExplainColumnIndent EXPLAIN表头 +var ExplainColumnIndent = map[string]string{ + "id": "id为SELECT的标识符. 它是在SELECT查询中的顺序编号. 如果这一行表示其他行的union结果, 这个值可以为空. 在这种情况下, table列会显示为形如, 表示它是id为M和N的查询行的联合结果.", + "select_type": "表示查询的类型. ", + "table": "输出行所引用的表.", + "type": "type显示连接使用的类型, 有关不同类型的描述, 请参见解释连接类型.", + "possible_keys": "指出MySQL能在该表中使用哪些索引有助于查询. 如果为空, 说明没有可用的索引.", + "key": "MySQL实际从possible_keys选择使用的索引. 如果为NULL, 则没有使用索引. 很少情况下, MySQL会选择优化不足的索引. 这种情况下, 可以在select语句中使用USE INDEX (indexname)来强制使用一个索引或者用IGNORE INDEX (indexname)来强制MySQL忽略索引.", + "key_len": "显示MySQL使用索引键的长度. 如果key是NULL, 则key_len为NULL. 使用的索引的长度. 在不损失精确性的情况下, 长度越短越好.", + "ref": "显示索引的哪一列被使用了.", + "rows": "表示MySQL认为必须检查的用来返回请求数据的行数.", + "filtered": "表示返回结果的行占需要读到的行(rows列的值)的百分比.", + "Extra": "该列显示MySQL在查询过程中的一些详细信息, MySQL查询优化器执行查询的过程中对查询计划的重要补充信息.", +} + +// ExplainSelectType EXPLAIN中SELECT TYPE会出现的类型 +var ExplainSelectType = map[string]string{ + "SIMPLE": "简单SELECT(不使用UNION或子查询等).", + "PRIMARY": "最外层的select.", + "UNION": "UNION中的第二个或后面的SELECT查询, 不依赖于外部查询的结果集.", + "DEPENDENT": "UNION中的第二个或后面的SELECT查询, 依赖于外部查询的结果集.", + "UNION RESULT": "UNION查询的结果集.", + "SUBQUERY": "子查询中的第一个SELECT查询, 不依赖于外部查询的结果集.", + "DEPENDENT SUBQUERY": "子查询中的第一个SELECT查询, 依赖于外部查询的结果集.", + "DERIVED": "用于from子句里有子查询的情况. MySQL会递归执行这些子查询, 把结果放在临时表里.", + "MATERIALIZED": "Materialized subquery.", + "UNCACHEABLE SUBQUERY": "结果集不能被缓存的子查询, 必须重新为外层查询的每一行进行评估.", + "UNCACHEABLE UNION": "UNION中的第二个或后面的select查询, 属于不可缓存的子查询(类似于UNCACHEABLE SUBQUERY).", +} + +// ExplainAccessType EXPLAIN中ACCESS TYPE会出现的类型 +var ExplainAccessType = map[string]string{ + "system": "这是const连接类型的一种特例, 该表仅有一行数据(=系统表).", + "const": `const用于使用常数值比较PRIMARY KEY时, 当查询的表仅有一行时, 使用system. 例:SELECT * FROM tbl WHERE col =1.`, + "eq_ref": `除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'.`, + "ref": `连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'.`, + "fulltext": "查询时使用 FULLTEXT 索引.", + "ref_or_null": "如同ref, 但是MySQL必须在初次查找的结果里找出null条目, 然后进行二次查找.", + "index_merge": `表示使用了索引合并优化方法. 在这种情况下. key列包含了使用的索引的清单, key_len包含了使用的索引的最长的关键元素. 详情请见 8.2.1.4, “Index Merge Optimization”.`, + "unique_subquery": `在某些IN查询中使用此种类型,而不是常规的ref:'value IN (SELECT primary_key FROM single_table WHERE some_expr)'.`, + "index_subquery": "在某些IN查询中使用此种类型, 与unique_subquery类似, 但是查询的是非唯一索引性索引.", + "range": `只检索给定范围的行, 使用一个索引来选择行. key列显示使用了哪个索引. key_len包含所使用索引的最长关键元素.`, + "index": "全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大.", + "ALL": `最坏的情况, 从头到尾全表扫描.`, +} + +// ExplainScalability ACCESS TYPE对应的运算复杂度 [AccessType]scalability map +var ExplainScalability = map[string]string{ + "ALL": "O(n)", + "index": "O(n)", + "range": "O(log n)+", + "index_subquery": "O(log n)+", + "unique_subquery": "O(log n)+", + "index_merge": "O(log n)+", + "ref_or_null": "O(log n)+", + "fulltext": "O(log n)+", + "ref": "O(log n)", + "eq_ref": "O(log n)", + "const": "O(1)", + "system": "O(1)", +} + +// ExplainExtra Extra信息解读 +// https://dev.mysql.com/doc/refman/8.0/en/explain-output.html +// sql/opt_explain_traditional.cc:traditional_extra_tags +var ExplainExtra = map[string]string{ + "Using temporary": "表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by.", + "Using filesort": "MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'.", + "Using index condition": "在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。", + "Range checked for each record": "MySQL没有发现好的可以使用的索引,但发现如果来自前面的表的列值已知,可能部分索引可以使用。", + "Using where with pushed condition": "这是一个仅仅在NDBCluster存储引擎中才会出现的信息,打开condition pushdown优化功能才可能被使用。", + "Using MRR": "使用了 MRR Optimization IO 层面进行了优化,减少 IO 方面的开销。", + "Skip_open_table": "Tables are read using the Multi-Range Read optimization strategy.", + "Open_frm_only": "Table files do not need to be opened. The information is already available from the data dictionary.", + "Open_full_table": "Unoptimized information lookup. Table information must be read from the data dictionary and by reading table files.", + "Scanned": "This indicates how many directory scans the server performs when processing a query for INFORMATION_SCHEMA tables.", + "Using index for group-by": "Similar to the Using index table access method, Using index for group-by indicates that MySQL found an index that can be used to retrieve all columns of a GROUP BY or DISTINCT query without any extra disk access to the actual table. Additionally, the index is used in the most efficient way so that for each group, only a few index entries are read.", + "Start temporary": "This indicates temporary table use for the semi-join Duplicate Weedout strategy.Start", + "End temporary": "This indicates temporary table use for the semi-join Duplicate Weedout strategy.End", + "FirstMatch": "The semi-join FirstMatch join shortcutting strategy is used for tbl_name.", + "Materialize": "Materialized subquery", + "Start materialize": "Materialized subquery Start", + "End materialize": "Materialized subquery End", + "unique row not found": "For a query such as SELECT ... FROM tbl_name, no rows satisfy the condition for a UNIQUE index or PRIMARY KEY on the table.", + //"Scan": "", + //"Impossible ON condition": "", + //"Ft_hints:": "", + //"Backward index scan": "", + //"Recursive": "", + //"Table function:": "", + "Index dive skipped due to FORCE": "This item applies to NDB tables only. It means that MySQL Cluster is using the Condition Pushdown optimization to improve the efficiency of a direct comparison between a nonindexed column and a constant. In such cases, the condition is “pushed down” to the cluster's data nodes and is evaluated on all data nodes simultaneously. This eliminates the need to send nonmatching rows over the network, and can speed up such queries by a factor of 5 to 10 times over cases where Condition Pushdown could be but is not used.", + "Impossible WHERE noticed after reading const tables": "查询了所有const(和system)表, 但发现WHERE查询条件不起作用.", + "Using where": "WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的.", + "Using join buffer": "从已有连接中找被读入缓存的数据, 并且通过缓存来完成与当前表的连接.", + "Using index": "只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询.", + "const row not found": "空表做类似 SELECT ... FROM tbl_name 的查询操作.", + "Distinct": "MySQL is looking for distinct values, so it stops searching for more rows for the current row combination after it has found the first matching row.", + "Full scan on NULL key": "子查询中的一种优化方式, 常见于无法通过索引访问null值.", + "Impossible HAVING": "HAVING条件过滤没有效果, 返回已有查询的结果集.", + "Impossible WHERE": "WHERE条件过滤没有效果, 最终是全表扫描.", + "LooseScan": "使用半连接LooseScan策略.", + "No matching min/max row": "没有行满足查询的条件, 如 SELECT MIN(...) FROM ... WHERE condition.", + "no matching row in const table": "对于连接查询, 列未满足唯一索引的条件或表为空.", + "No matching rows after partition pruning": "对于DELETE 或 UPDATE, 优化器在分区之后, 未发现任何要删除或更新的内容. 类似查询 Impossible WHERE.", + "No tables used": "查询没有FROM子句, 或者有一个 FROM DUAL子句.", + "Not exists": "MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行.", + "Plan isn't ready yet": "This value occurs with EXPLAIN FOR CONNECTION when the optimizer has not finished creating the execution plan for the statement executing in the named connection. If execution plan output comprises multiple lines, any or all of them could have this Extra value, depending on the progress of the optimizer in determining the full execution plan.", + "Using intersect": "开启了index merge,即:对多个索引分别进行条件扫描,然后将它们各自的结果进行合并,使用的算法为:index_merge_intersection", + "Using union": "开启了index merge,即:对多个索引分别进行条件扫描,然后将它们各自的结果进行合并,使用的算法为:index_merge_union", + "Using sort_union": "开启了index merge,即:对多个索引分别进行条件扫描,然后将它们各自的结果进行合并,使用的算法为:index_merge_sort_union", +} + +// 提取ExplainJSON中所有的ExplainJSONTable, 将其写入全局变量explainJSONTables +// depth只是用于debug,逻辑上并未使用 +func findTablesInJSON(explainJSON string, depth int) { + common.Log.Debug("findTablesInJSON Enter: depth(%d), json(%s)", depth, explainJSON) + // 去除注释,语法检查 + explainJSON = string(RemoveSQLComments([]byte(explainJSON))) + if !gjson.Valid(explainJSON) { + return + } + // 提取所有ExplainJSONTable struct + for _, key := range ExplainKeyWords { + result := gjson.Get(explainJSON, key) + if result.String() == "" { + continue + } + + if key == "table" { + table := new(ExplainJSONTable) + common.Log.Debug("findTablesInJSON FindTable: depth(%d), table(%s)", depth, result.String) + err := json.Unmarshal([]byte(result.Raw), table) + common.LogIfError(err, "") + if table.TableName != "" { + explainJSONTables = append(explainJSONTables, table) + } + findTablesInJSON(result.String(), depth+1) + } else { + common.Log.Debug("findTablesInJSON ScanOther: depth(%d), key(%s), array_len(%d), json(%s)", depth, key, len(result.Array()), result.String) + for _, val := range result.Array() { + if val.String() != "" { + findTablesInJSON(val.String(), depth+1) + } + } + findTablesInJSON(result.String(), depth+1) + } + } +} + +// FormatJSONIntoTraditional 将JSON形式转换为TRADITIONAL形式,方便前端展现 +func FormatJSONIntoTraditional(explainJSON string) []*ExplainRow { + // 查找JSON中的所有ExplainJSONTable + explainJSONTables = []*ExplainJSONTable{} + findTablesInJSON(explainJSON, 0) + + var explainRows []*ExplainRow + id := -1 + for _, table := range explainJSONTables { + keyLen := table.KeyLength + filtered, err := strconv.ParseFloat(table.Filtered, 64) + if err != nil { + filtered = 0.00 + } + if filtered > 100.00 { + filtered = 100.00 + } + explainRows = append(explainRows, &ExplainRow{ + ID: id + 1, + SelectType: "", + TableName: table.TableName, + Partitions: "NULL", + AccessType: table.AccessType, + PossibleKeys: table.PossibleKeys, + Key: table.Key, + KeyLen: keyLen, + Ref: table.Ref, + Rows: table.RowsExaminedPerScan, + Filtered: filtered, + Scalability: ExplainScalability[table.AccessType], + Extra: "", + }) + } + return explainRows +} + +// ConvertExplainJSON2Row 将JSON格式转成ROW格式,为方便统一做优化建议 +// 但是会损失一些JSON特有的分析结果 +func ConvertExplainJSON2Row(explainJSON *ExplainJSON) []*ExplainRow { + buf, err := json.Marshal(explainJSON) + if err != nil { + return nil + } + return FormatJSONIntoTraditional(string(buf)) +} + +// 用于检测MySQL版本是否低于MySQL5.6 +// 低于5.6 返回 true, 表示需要改写非SELECT的SQL --> SELECT +func (db *Connector) supportExplainWrite() (bool, error) { + defer func() { + err := recover() + if err != nil { + common.Log.Error("Recover supportExplainWrite() Error:", err) + } + }() + + // 5.6以上版本支持EXPLAIN UPDATE/DELETE等语句,但需要开启写入 + // 如开启了read_only,EXPLAIN UPDATE/DELETE也会受限制 + if common.Config.TestDSN.Version >= 560 { + readOnly, err := db.SingleIntValue("read_only") + if err != nil { + return false, err + } + superReadOnly, err := db.SingleIntValue("super_read_only") + // Percona, MariaDB 5.6就已经有super_read_only了,但社区版5.6还没有这个参数 + if strings.Contains(err.Error(), "Unknown system variable") { + superReadOnly = readOnly + } else if err != nil { + return false, err + } + + if readOnly == 1 || superReadOnly == 1 { + return true, nil + } + + return false, nil + } + + return true, nil +} + +// 将SQL语句转换为可以被Explain的语句,如:写转读 +// 当输出为空时,表示语法错误或不支持EXPLAIN +func (db *Connector) explainAbleSQL(sql string) (string, error) { + stmt, err := sqlparser.Parse(sql) + if err != nil { + common.Log.Error("explainAbleSQL sqlparser.Parse Error: %v", err) + return sql, err + } + + switch stmt.(type) { + case *sqlparser.Insert, *sqlparser.Update, *sqlparser.Delete: // REPLACE和INSERT的AST基本相同,只是Action不同 + // 判断Explain的SQL是否需要被改写 + need, err := db.supportExplainWrite() + if err != nil { + common.Log.Error("explainAbleSQL db.supportExplainWrite Error: %v", err) + return "", err + } + if need { + rw := ast.NewRewrite(sql) + if rw != nil { + return rw.RewriteDML2Select().NewSQL, nil + } + } + return sql, nil + + case *sqlparser.Union, *sqlparser.ParenSelect, *sqlparser.Select, sqlparser.SelectStatement: + return sql, nil + default: + } + return "", nil +} + +// 执行explain请求,返回mysql.Result执行结果 +func (db *Connector) executeExplain(sql string, explainType int, formatType int) (*QueryResult, error) { + var err error + sql, _ = db.explainAbleSQL(sql) + if sql == "" { + return nil, err + } + + // 5.6以上支持FORMAT=JSON + explainFormat := "" + switch formatType { + case JSONFormatExplain: + if common.Config.TestDSN.Version >= 560 { + explainFormat = "FORMAT=JSON" + } + } + // 执行explain + var res *QueryResult + switch explainType { + case ExtendedExplainType: + // 5.6以上extended关键字已经不推荐使用,8.0废弃了这个关键字 + if common.Config.TestDSN.Version >= 560 { + res, err = db.Query("explain %s", sql) + } else { + res, err = db.Query("explain extended %s", sql) + } + case PartitionsExplainType: + res, err = db.Query("explain partitions %s", sql) + + default: + res, err = db.Query("explain %s %s", explainFormat, sql) + } + return res, err +} + +// MySQLExplainWarnings WARNINGS信息中包含的优化器信息 +func MySQLExplainWarnings(exp *ExplainInfo) string { + content := "## MySQL优化器调优结果\n\n```sql\n" + for _, row := range exp.Warnings { + content += "\n" + row.Message + "\n" + } + content += "\n```" + return content +} + +// MySQLExplainQueryCost 将last_query_cost信息补充到评审结果中 +func MySQLExplainQueryCost(exp *ExplainInfo) string { + var content string + if exp.QueryCost > 0 { + + tmp := fmt.Sprintf("%.3f\n", exp.QueryCost) + + content = "Query cost: " + if exp.QueryCost > float64(common.Config.MaxQueryCost) { + content += fmt.Sprintf("☠️ **%s**", tmp) + } else { + content += tmp + } + + } + return content +} + +// ExplainInfoTranslator 将explain信息翻译成人能读懂的 +func ExplainInfoTranslator(exp *ExplainInfo) string { + var buf []string + var selectTypeBuf []string + var accessTypeBuf []string + var extraTypeBuf []string + buf = append(buf, fmt.Sprint("### Explain信息解读\n")) + rows := exp.ExplainRows + if exp.ExplainFormat == JSONFormatExplain { + // JSON形式遍历分析不方便,转成Row格式统一处理 + rows = ConvertExplainJSON2Row(exp.ExplainJSON) + } + if len(rows) == 0 { + return "" + } + + // SelectType信息解读 + explainSelectType := make(map[string]string) + for k, v := range ExplainSelectType { + explainSelectType[k] = v + } + for _, row := range rows { + if _, ok := explainSelectType[row.SelectType]; ok { + desc := fmt.Sprintf("* **%s**: %s\n", row.SelectType, explainSelectType[row.SelectType]) + selectTypeBuf = append(selectTypeBuf, desc) + delete(explainSelectType, row.SelectType) + } + } + if len(selectTypeBuf) > 0 { + buf = append(buf, fmt.Sprint("#### SelectType信息解读\n")) + buf = append(buf, strings.Join(selectTypeBuf, "\n")) + } + + // #### Type信息解读 + explainAccessType := make(map[string]string) + for k, v := range ExplainAccessType { + explainAccessType[k] = v + } + for _, row := range rows { + if _, ok := explainAccessType[row.AccessType]; ok { + var warn bool + var desc string + for _, t := range common.Config.ExplainWarnAccessType { + if row.AccessType == t { + warn = true + } + } + if warn { + desc = fmt.Sprintf("* ☠️ **%s**: %s\n", row.AccessType, explainAccessType[row.AccessType]) + } else { + desc = fmt.Sprintf("* **%s**: %s\n", row.AccessType, explainAccessType[row.AccessType]) + } + + accessTypeBuf = append(accessTypeBuf, desc) + delete(explainAccessType, row.AccessType) + } + } + if len(accessTypeBuf) > 0 { + buf = append(buf, fmt.Sprint("#### Type信息解读\n")) + buf = append(buf, strings.Join(accessTypeBuf, "\n")) + } + + // #### Extra信息解读 + if exp.ExplainFormat != JSONFormatExplain { + explainExtra := make(map[string]string) + for k, v := range ExplainExtra { + explainExtra[k] = v + } + for _, row := range rows { + for k, c := range explainExtra { + if strings.Contains(row.Extra, k) { + if k == "Impossible WHERE" { + if strings.Contains(row.Extra, "Impossible WHERE noticed after reading const tables") { + continue + } + } + warn := false + for _, w := range common.Config.ExplainWarnExtra { + if k == w { + warn = true + } + } + if warn { + extraTypeBuf = append(extraTypeBuf, fmt.Sprintf("* ☠️ **%s**: %s\n", k, c)) + } else { + extraTypeBuf = append(extraTypeBuf, fmt.Sprintf("* **%s**: %s\n", k, c)) + } + delete(explainExtra, k) + } + } + } + } + if len(extraTypeBuf) > 0 { + buf = append(buf, fmt.Sprint("#### Extra信息解读\n")) + buf = append(buf, strings.Join(extraTypeBuf, "\n")) + } + + return strings.Join(buf, "\n") +} + +// ParseExplainText 解析explain文本信息(很可能是用户复制粘贴得到),返回格式化数据 +func ParseExplainText(content string) (exp *ExplainInfo, err error) { + exp = &ExplainInfo{ExplainFormat: TraditionalFormatExplain} + + content = strings.TrimSpace(content) + verticalFormat := strings.HasPrefix(content, "*") + jsonFormat := strings.HasPrefix(content, "{") + traditionalFormat := strings.HasPrefix(content, "+") + + if verticalFormat && traditionalFormat && jsonFormat { + return nil, errors.New("not supported explain type") + } + + if verticalFormat { + exp.ExplainRows, err = parseVerticalExplainText(content) + } + + if jsonFormat { + exp.ExplainFormat = JSONFormatExplain + exp.ExplainJSON, err = parseJSONExplainText(content) + } + + if traditionalFormat { + exp.ExplainRows, err = parseTraditionalExplainText(content) + } + return exp, err +} + +// 解析文本形式传统形式Explain信息 +func parseTraditionalExplainText(content string) (explainRows []*ExplainRow, err error) { + LS := regexp.MustCompile(`^\+`) // 华丽的分隔线:) + + // 格式正确性检查 + lines := strings.Split(content, "\n") + if len(lines) < 3 { + return nil, errors.New("explain Rows less than 3") + } + + // 提取头部,用于后续list到map的转换 + var header []string + for _, h := range strings.Split(strings.Trim(lines[1], "|"), "|") { + header = append(header, strings.TrimSpace(h)) + } + colIdx := make(map[string]int) + for i, item := range header { + colIdx[strings.ToLower(item)] = i + } + + // explain format=json未把外面的框去了 + if strings.ToLower(header[0]) == "explain" { + return nil, errors.New("json format explain need remove") + } + + // 将每一列填充至ExplainRow结构体 + colsMap := make(map[string]string) + for _, l := range lines[3:] { + var keylen string + var rows int + var filtered float64 + var partitions string + // 跳过分割线 + if LS.MatchString(l) || strings.TrimSpace(l) == "" { + continue + } + + // list到map的转换 + var cols []string + for _, c := range strings.Split(strings.Trim(l, "|"), "|") { + cols = append(cols, strings.TrimSpace(c)) + } + for item, i := range colIdx { + colsMap[item] = cols[i] + } + + // 值类型转换 + id, err := strconv.Atoi(colsMap["id"]) + if err != nil { + return nil, err + } + + // 不存在字段给默认值 + if colsMap["partitions"] == "" { + partitions = "NULL" + } else { + partitions = colsMap["partitions"] + } + + keylen = colsMap["key_len"] + + rows, err = strconv.Atoi(colsMap["Rows"]) + if err != nil { + rows = 0 + } + + filtered, err = strconv.ParseFloat(colsMap["filtered"], 64) + if err != nil { + filtered = 0.00 + } + if filtered > 100.00 { + filtered = 100.00 + } + + // 拼接结构体 + explainRows = append(explainRows, &ExplainRow{ + ID: id, + SelectType: colsMap["select_type"], + TableName: colsMap["table"], + Partitions: partitions, + AccessType: colsMap["type"], + PossibleKeys: strings.Split(colsMap["possible_keys"], ","), + Key: colsMap["key"], + KeyLen: keylen, + Ref: strings.Split(colsMap["ref"], ","), + Rows: rows, + Filtered: filtered, + Scalability: ExplainScalability[colsMap["type"]], + Extra: colsMap["extra"], + }) + } + return explainRows, nil +} + +// 解析文本形式竖排版 Explain信息 +func parseVerticalExplainText(content string) (explainRows []*ExplainRow, err error) { + var lines []string + explainRow := &ExplainRow{ + Partitions: "NULL", + Filtered: 0.00, + } + LS := regexp.MustCompile(`^\*.*\*$`) // 华丽的分隔线:) + + // 格式正确性检查 + for _, l := range strings.Split(content, "\n") { + lines = append(lines, strings.TrimSpace(l)) + } + if len(lines) < 11 { + return nil, errors.New("explain rows less than 11") + } + + // 将每一行填充至ExplainRow结构体 + for _, l := range lines { + if LS.MatchString(l) || strings.TrimSpace(l) == "" { + continue + } + if strings.HasPrefix(l, "id:") { + id := strings.TrimPrefix(l, "id: ") + explainRow.ID, err = strconv.Atoi(id) + if err != nil { + return nil, err + } + } + if strings.HasPrefix(l, "select_type:") { + explainRow.SelectType = strings.TrimPrefix(l, "select_type: ") + } + if strings.HasPrefix(l, "table:") { + explainRow.TableName = strings.TrimPrefix(l, "table: ") + } + if strings.HasPrefix(l, "partitions:") { + explainRow.AccessType = strings.TrimPrefix(l, "partitions: ") + } + if strings.HasPrefix(l, "type:") { + explainRow.AccessType = strings.TrimPrefix(l, "type: ") + explainRow.Scalability = ExplainScalability[explainRow.AccessType] + } + if strings.HasPrefix(l, "possible_keys:") { + explainRow.PossibleKeys = strings.Split(strings.TrimPrefix(l, "possible_keys: "), ",") + } + if strings.HasPrefix(l, "key:") { + explainRow.Key = strings.TrimPrefix(l, "key: ") + } + if strings.HasPrefix(l, "key_len:") { + keyLen := strings.TrimPrefix(l, "key_len: ") + explainRow.KeyLen = keyLen + } + if strings.HasPrefix(l, "ref:") { + explainRow.Ref = strings.Split(strings.TrimPrefix(l, "ref: "), ",") + } + if strings.HasPrefix(l, "Rows:") { + rows := strings.TrimPrefix(l, "Rows: ") + explainRow.Rows, err = strconv.Atoi(rows) + if err != nil { + explainRow.Rows = 0 + } + } + if strings.HasPrefix(l, "filtered:") { + filtered := strings.TrimPrefix(l, "filtered: ") + explainRow.Filtered, err = strconv.ParseFloat(filtered, 64) + if err != nil { + return nil, err + } else if explainRow.Filtered > 100.00 { + explainRow.Filtered = 100.00 + } + } + if strings.HasPrefix(l, "Extra:") { + explainRow.Extra = strings.TrimPrefix(l, "Extra: ") + explainRows = append(explainRows, explainRow) + } + } + return explainRows, err +} + +// 解析文本形式JSON Explain信息 +func parseJSONExplainText(content string) (*ExplainJSON, error) { + explainJSON := new(ExplainJSON) + err := json.Unmarshal(RemoveSQLComments([]byte(content)), explainJSON) + return explainJSON, err +} + +// ParseExplainResult 分析mysql执行explain的结果,返回ExplainInfo结构化数据 +func ParseExplainResult(res *QueryResult, formatType int) (exp *ExplainInfo, err error) { + exp = &ExplainInfo{ + ExplainFormat: formatType, + } + // JSON格式直接调用文本方式解析 + if formatType == JSONFormatExplain { + exp.ExplainJSON, err = parseJSONExplainText(res.Rows[0].Str(0)) + return exp, err + } + + // 生成表头 + colIdx := make(map[int]string) + for i, f := range res.Result.Fields() { + colIdx[i] = strings.ToLower(f.Name) + } + // 补全ExplainRows + var explainrows []*ExplainRow + for _, row := range res.Rows { + expRow := &ExplainRow{Partitions: "NULL", Filtered: 0.00} + // list到map的转换 + for i := range row { + switch colIdx[i] { + case "id": + expRow.ID = row.ForceInt(i) + case "select_type": + expRow.SelectType = row.Str(i) + case "table": + expRow.TableName = row.Str(i) + if expRow.TableName == "" { + expRow.TableName = "NULL" + } + case "type": + expRow.AccessType = row.Str(i) + if expRow.AccessType == "" { + expRow.AccessType = "NULL" + } + expRow.Scalability = ExplainScalability[expRow.AccessType] + case "possible_keys": + expRow.PossibleKeys = strings.Split(row.Str(i), ",") + case "key": + expRow.Key = row.Str(i) + if expRow.Key == "" { + expRow.Key = "NULL" + } + case "key_len": + expRow.KeyLen = row.Str(i) + case "ref": + expRow.Ref = strings.Split(row.Str(i), ",") + case "rows": + expRow.Rows = row.ForceInt(i) + case "extra": + expRow.Extra = row.Str(i) + if expRow.Extra == "" { + expRow.Extra = "NULL" + } + case "filtered": + expRow.Filtered = row.ForceFloat(i) + // MySQL bug: https://bugs.mysql.com/bug.php?id=34124 + if expRow.Filtered > 100.00 { + expRow.Filtered = 100.00 + } + } + } + explainrows = append(explainrows, expRow) + } + exp.ExplainRows = explainrows + for _, w := range res.Warning { + // 'EXTENDED' is deprecated and will be removed in a future release. + if w.Int(1) != 1681 { + exp.Warnings = append(exp.Warnings, &ExplainWarning{Level: w.Str(0), Code: w.Int(1), Message: w.Str(2)}) + } + } + + // 添加 last_query_cost + exp.QueryCost = res.QueryCost + + return exp, err +} + +// Explain 获取SQL的explain信息 +func (db *Connector) Explain(sql string, explainType int, formatType int) (exp *ExplainInfo, err error) { + exp = &ExplainInfo{} + if explainType != TraditionalExplainType { + formatType = TraditionalFormatExplain + } + defer func() { + if e := recover(); e != nil { + const size = 4096 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + common.Log.Error("Recover Explain() Error: %v\n%v", e, string(buf)) + err = errors.New(fmt.Sprint(e)) + } + }() + + // 执行EXPLAIN请求 + res, err := db.executeExplain(sql, explainType, formatType) + if err != nil || res == nil { + return exp, err + } + + // 解析mysql结果,输出ExplainInfo + exp, err = ParseExplainResult(res, formatType) + + // 补全SQL + exp.SQL = sql + return exp, err +} + +// PrintMarkdownExplainTable 打印markdown格式的explain table +func PrintMarkdownExplainTable(exp *ExplainInfo) string { + var buf []string + rows := exp.ExplainRows + // JSON转换为TRADITIONAL格式 + if exp.ExplainFormat == JSONFormatExplain { + buf = append(buf, fmt.Sprint("以下为JSON格式转为传统格式EXPLAIN表格", "\n\n")) + rows = ConvertExplainJSON2Row(exp.ExplainJSON) + } + + // explain出错 + if len(rows) == 0 { + return "" + } + if exp.ExplainFormat == JSONFormatExplain { + buf = append(buf, fmt.Sprintln("| table | partitions | type | possible\\_keys | key | key\\_len | ref | rows | filtered | scalability | Extra |")) + buf = append(buf, fmt.Sprintln("|---|---|---|---|---|---|---|---|---|---|---|")) + for _, row := range rows { + buf = append(buf, fmt.Sprintln("|", row.TableName, "|", row.Partitions, "|", row.AccessType, + "|", strings.Join(row.PossibleKeys, ","), "|", row.Key, "|", row.KeyLen, "|", + strings.Join(row.Ref, ","), "|", row.Rows, "|", fmt.Sprintf("%.2f%s", row.Filtered, "%"), + "|", row.Scalability, "|", row.Extra, "|")) + } + } else { + buf = append(buf, fmt.Sprintln("| id | select\\_type | table | partitions | type | possible_keys | key | key\\_len | ref | rows | filtered | scalability | Extra |")) + buf = append(buf, fmt.Sprintln("|---|---|---|---|---|---|---|---|---|---|---|---|---|")) + for _, row := range rows { + // 加粗 + rows := fmt.Sprint(row.Rows) + if row.Rows >= common.Config.ExplainMaxRows { + rows = "☠️ **" + rows + "**" + } + filtered := fmt.Sprintf("%.2f%s", row.Filtered, "%") + if row.Filtered >= common.Config.ExplainMaxFiltered { + filtered = "☠️ **" + filtered + "**" + } + scalability := row.Scalability + for _, s := range common.Config.ExplainWarnScalability { + scalability = "☠️ **" + s + "**" + } + buf = append(buf, fmt.Sprintln("|", row.ID, " |", + common.MarkdownEscape(row.SelectType), + "| *"+common.MarkdownEscape(row.TableName)+"* |", + common.MarkdownEscape(row.Partitions), "|", + common.MarkdownEscape(row.AccessType), "|", + common.MarkdownEscape(strings.Join(row.PossibleKeys, ",
")), "|", + common.MarkdownEscape(row.Key), "|", + row.KeyLen, "|", + common.MarkdownEscape(strings.Join(row.Ref, ",
")), + "|", rows, "|", + filtered, "|", scalability, "|", + strings.Replace(common.MarkdownEscape(row.Extra), ",", ",
", -1), + "|")) + } + } + buf = append(buf, "\n") + return strings.Join(buf, "") +} diff --git a/database/explain_test.go b/database/explain_test.go new file mode 100644 index 00000000..6cd6e60b --- /dev/null +++ b/database/explain_test.go @@ -0,0 +1,2454 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "fmt" + "os" + "testing" + + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" +) + +var connTest *Connector + +func init() { + common.BaseDir = common.DevPath + common.ParseConfig("") + connTest = &Connector{ + Addr: common.Config.OnlineDSN.Addr, + User: common.Config.OnlineDSN.User, + Pass: common.Config.OnlineDSN.Password, + Database: common.Config.OnlineDSN.Schema, + } + if _, err := connTest.Version(); err != nil { + common.Log.Critical("Test env Error: %v", err) + os.Exit(0) + } +} + +var sqls = []string{ + `select * from city where country_id = 44;`, + `select * from address where address2 is not null;`, + `select * from address where address2 is null;`, + `select * from address where address2 >= 44;`, + `select * from city where country_id between 44 and 107;`, + `select * from city where city like 'Ad%';`, + `select * from city where city = 'Aden' and country_id = 107;`, + `select * from city where country_id > 31 and city = 'Aden';`, + `select * from address where address_id > 8 and city_id < 400 and district = 'Nantou';`, + `select * from address where address_id > 8 and city_id < 400;`, + `select * from actor where last_update='2006-02-15 04:34:33' and last_name='CHASE' group by first_name;`, + `select * from address where last_update >='2014-09-25 22:33:47' group by district;`, + `select * from address group by address,district;`, + `select * from address where last_update='2014-09-25 22:30:27' group by district,(address_id+city_id);`, + `select * from customer where active=1 order by last_name limit 10;`, + `select * from customer order by last_name limit 10;`, + `select * from customer where address_id > 224 order by address_id limit 10;`, + `select * from customer where address_id < 224 order by address_id limit 10;`, + `select * from customer where active=1 order by last_name;`, + `select * from customer where address_id > 224 order by address_id;`, + `select * from customer where address_id in (224,510) order by last_name;`, + `select city from city where country_id = 44;`, + `select city,city_id from city where country_id = 44 and last_update='2006-02-15 04:45:25';`, + `select city from city where country_id > 44 and last_update > '2006-02-15 04:45:25';`, + `select * from city where country_id=1 and city='Kabul' order by last_update;`, + `select * from city where country_id>1 and city='Kabul' order by last_update;`, + `select * from city where city_id>251 order by last_update;`, + `select * from city i inner join country o on i.country_id=o.country_id;`, + `select * from city i left join country o on i.city_id=o.country_id;`, + `select * from city i right join country o on i.city_id=o.country_id;`, + `select * from city i left join country o on i.city_id=o.country_id where o.country_id is null;`, + `select * from city i right join country o on i.city_id=o.country_id where i.city_id is null;`, + `select * from city i left join country o on i.city_id=o.country_id union select * from city i right join country o on i.city_id=o.country_id;`, + `select * from city i left join country o on i.city_id=o.country_id where o.country_id is null union select * from city i right join country o on i.city_id=o.country_id where i.city_id is null;`, + `select first_name,last_name,email from customer natural left join address;`, + `select first_name,last_name,email from customer natural left join address;`, + `select first_name,last_name,email from customer natural right join address;`, + `select first_name,last_name,email from customer STRAIGHT_JOIN address on customer.address_id=address.address_id;`, + `select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;`, +} + +var exp = []string{ + `+----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | ++----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| 1 | SIMPLE | country | index | PRIMARY,country_id | country | 152 | NULL | 109 | Using index | +| 1 | SIMPLE | city | ref | idx_fk_country_id,idx_country_id_city,idx_all,idx_other | idx_fk_country_id | 2 | sakila.country.country_id | 2 | Using index | ++----+-------------+---------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+`, + `+----+-------------+---------+------------+-------+-------------------+-------------------+---------+---------------------------+------+----------+-------------+ +| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra | ++----+-------------+---------+------------+-------+-------------------+-------------------+---------+---------------------------+------+----------+-------------+ +| 1 | SIMPLE | country | NULL | index | PRIMARY | PRIMARY | 2 | NULL | 109 | 100.00 | Using index | +| 1 | SIMPLE | city | NULL | ref | idx_fk_country_id | idx_fk_country_id | 2 | sakila.country.country_id | 5 | 100.00 | Using index | ++----+-------------+---------+------------+-------+-------------------+-------------------+---------+---------------------------+------+----------+-------------+`, + `*************************** 1. row *************************** + id: 1 + select_type: SIMPLE + table: country + type: index +possible_keys: PRIMARY,country_id + key: country + key_len: 152 + ref: NULL + rows: 109 + Extra: Using index +*************************** 2. row *************************** + id: 1 + select_type: SIMPLE + table: city + type: ref +possible_keys: idx_fk_country_id,idx_country_id_city,idx_all,idx_other + key: idx_fk_country_id + key_len: 2 + ref: sakila.country.country_id + rows: 2 + Extra: Using index`, + `+----+-------------+---------+------------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | Extra | ++----+-------------+---------+------------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+ +| 1 | SIMPLE | country | NULL | index | PRIMARY,country_id | country | 152 | NULL | 109 | Using index | +| 1 | SIMPLE | city | NULL | ref | idx_fk_country_id,idx_country_id_city,idx_all,idx_other | idx_fk_country_id | 2 | sakila.country.country_id | 2 | Using index | ++----+-------------+---------+------------+-------+---------------------------------------------------------+-------------------+---------+---------------------------+------+-------------+`, + `{ + "query_block": { + "select_id": 1, + "message": "No tables used" + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "message": "no matching row in const table" + } +}`, + `{ + "query_block": { + "select_id": 1, + "table": { + "insert": true, + "table_name": "t1", + "access_type": "ALL" + } /* table */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "message": "no matching row in const table" + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "message": "no matching row in const table" + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "13.50" + } /* cost_info */, + "table": { + "table_name": "a4", + "access_type": "ALL", + "rows_examined_per_scan": 14, + "rows_produced_per_join": 14, + "filtered": "100.00", + "cost_info": { + "read_cost": "10.70", + "eval_cost": "2.80", + "prefix_cost": "13.50", + "data_read_per_join": "224" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "13.50" + } /* cost_info */, + "table": { + "table_name": "a3", + "access_type": "ALL", + "rows_examined_per_scan": 14, + "rows_produced_per_join": 14, + "filtered": "100.00", + "cost_info": { + "read_cost": "10.70", + "eval_cost": "2.80", + "prefix_cost": "13.50", + "data_read_per_join": "224" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "13.50" + } /* cost_info */, + "table": { + "table_name": "a2", + "access_type": "ALL", + "rows_examined_per_scan": 14, + "rows_produced_per_join": 14, + "filtered": "100.00", + "cost_info": { + "read_cost": "10.70", + "eval_cost": "2.80", + "prefix_cost": "13.50", + "data_read_per_join": "224" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 4, + "cost_info": { + "query_cost": "15.55" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */ + } /* table */ + }, + { + "table": { + "table_name": "a1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 14, + "filtered": "100.00", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "10.35", + "eval_cost": "2.80", + "prefix_cost": "15.55", + "data_read_per_join": "224" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 5, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "5.81" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 0, + "filtered": "14.29", + "cost_info": { + "read_cost": "3.21", + "eval_cost": "0.20", + "prefix_cost": "3.41", + "data_read_per_join": "7" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "(test.t1.i = 10)" + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 0, + "filtered": "50.00", + "first_match": "t1", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.20", + "eval_cost": "0.20", + "prefix_cost": "5.82", + "data_read_per_join": "7" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "(test.t2.i = 10)" + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "((test.t1.i ,(/* select#2 */ select 1 from test.t2 where ((test.t1.i = 10) and ((test.t1.i) = test.t2.i)))) or (test.t1.i.test.t1.i in ( (/* select#3 */ select NULL from test.t4 where 1 ), (test.t1.i in on where ((test.t1.i = materialized-subquery.i))))))", + "attached_subqueries": [ + { + "table": { + "table_name": "", + "access_type": "eq_ref", + "key": "", + "key_length": "5", + "rows_examined_per_scan": 1, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 3, + "message": "no matching row in const table" + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + }, + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 1, + "filtered": "50.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.20", + "prefix_cost": "2.40", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "((test.t1.i = 10) and ((test.t1.i) = test.t2.i))" + } /* table */ + } /* query_block */ + } + ] /* attached_subqueries */ + } /* table */ + } /* query_block */ +}`, + `{ + "query_block": { + "union_result": { + "using_temporary_table": true, + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "message": "no matching row in const table" + } /* query_block */ + } + ] /* query_specifications */ + } /* union_result */ + } /* query_block */ +}`, + `{ + "query_block": { + "union_result": { + "using_temporary_table": false, + "query_specifications": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "7.21" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */ + } /* table */ + }, + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 14, + "filtered": "100.00", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "2.80", + "prefix_cost": "7.22", + "data_read_per_join": "112" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "message": "no matching row in const table" + } /* query_block */ + } + ] /* query_specifications */ + } /* union_result */ + } /* query_block */ +}`, + `{ + "query_block": { + "ordering_operation": { + "using_filesort": true, + "union_result": { + "using_temporary_table": true, + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* query_specifications */ + } /* union_result */, + "order_by_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 3, + "message": "No tables used" + } /* query_block */ + } + ] /* order_by_subqueries */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "ordering_operation": { + "using_filesort": false, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "optimized_away_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */ + } /* table */ + } /* query_block */ + } + ] /* optimized_away_subqueries */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "having_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* having_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "10.41" + } /* cost_info */, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": true, + "cost_info": { + "sort_cost": "7.00" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "group_by_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "(outer_field_is_not_null, (((test.t1.i) >= test.t2.i) or isnull(test.t2.i)), true)" + } /* table */ + } /* query_block */ + }, + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */, + "attached_condition": "(outer_field_is_not_null, (((test.t1.i) <= test.t2.i) or isnull(test.t2.i)), true)" + } /* table */ + } /* query_block */ + } + ] /* group_by_subqueries */ + } /* grouping_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "select_list_subqueries": [ + { + "dependent": false, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "3.41" + } /* cost_info */, + "ordering_operation": { + "using_temporary_table": true, + "using_filesort": true, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 7, + "rows_produced_per_join": 7, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.40", + "prefix_cost": "3.41", + "data_read_per_join": "56" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* ordering_operation */ + } /* query_block */ + } + ] /* select_list_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "message": "no matching row in const table" + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "5.21" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "32" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */, + "attached_condition": "(((/* select#3 */ select test.t3.e from test.t3),(/* select#4 */ select 1 from test.t3 where (test.t1.b and (outer_field_is_not_null, ((((/* select#3 */ select test.t3.e from test.t3)) < test.t3.e) or isnull(test.t3.e)), true)) having (outer_field_is_not_null, (test.t3.e), true))))", + "attached_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 4, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "e" + ] /* used_columns */, + "attached_condition": "(test.t1.b and (outer_field_is_not_null, ((((/* select#3 */ select test.t3.e from test.t3)) < test.t3.e) or isnull(test.t3.e)), true))" + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "e" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* attached_subqueries */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "50.00", + "first_match": "t1", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "5.21", + "data_read_per_join": "32" + } /* cost_info */, + "used_columns": [ + "c" + ] /* used_columns */, + "attached_condition": "(test.t2.c = test.t1.a)" + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "35.44" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 12, + "rows_produced_per_join": 12, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "2.40", + "prefix_cost": "4.42", + "data_read_per_join": "96" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */, + "attached_condition": "((test.t1.a is not null) and (test.t1.a is not null))" + } /* table */ + }, + { + "table": { + "table_name": "", + "access_type": "eq_ref", + "key": "", + "key_length": "5", + "ref": [ + "test.t1.a" + ] /* ref */, + "rows_examined_per_scan": 1, + "materialized_from_subquery": { + "using_temporary_table": true, + "query_block": { + "nested_loop": [ + { + "table": { + "table_name": "t4", + "access_type": "ALL", + "rows_examined_per_scan": 12, + "rows_produced_per_join": 3, + "filtered": "33.33", + "cost_info": { + "read_cost": "3.62", + "eval_cost": "0.80", + "prefix_cost": "4.42", + "data_read_per_join": "31" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */, + "attached_condition": "(test.t4.a > 0)" + } /* table */ + }, + { + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 12, + "rows_produced_per_join": 4, + "filtered": "10.00", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "0.96", + "prefix_cost": "16.04", + "data_read_per_join": "38" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */, + "attached_condition": "(test.t3.a = test.t4.a)" + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + }, + { + "table": { + "table_name": "", + "access_type": "eq_ref", + "key": "", + "key_length": "5", + "ref": [ + "test.t1.a" + ] /* ref */, + "rows_examined_per_scan": 1, + "materialized_from_subquery": { + "using_temporary_table": true, + "query_block": { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 12, + "rows_produced_per_join": 3, + "filtered": "33.33", + "cost_info": { + "read_cost": "3.62", + "eval_cost": "0.80", + "prefix_cost": "4.42", + "data_read_per_join": "31" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */, + "attached_condition": "(test.t2.a > 0)" + } /* table */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "1.20" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "1.00", + "eval_cost": "0.20", + "prefix_cost": "1.20", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i1", + "c1" + ] /* used_columns */, + "attached_condition": "exists(/* select#2 */ select test.t2.c1 from test.t2 join test.t3 where ((test.t2.c1 = test.t3.c1) and (test.t2.c2 = (/* select#3 */ select min(test.t3.c1) from test.t3)) and ((/* select#3 */ select min(test.t3.c1) from test.t3) <> test.t1.c1)))", + "attached_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "1.00", + "eval_cost": "0.20", + "prefix_cost": "1.20", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "c1" + ] /* used_columns */, + "attached_condition": "((/* select#3 */ select min(test.t3.c1) from test.t3) <> test.t1.c1)", + "attached_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "1.20" + } /* cost_info */, + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "1.00", + "eval_cost": "0.20", + "prefix_cost": "1.20", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "c1" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* attached_subqueries */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": [ + "c1" + ] /* possible_keys */, + "key": "c1", + "used_key_parts": [ + "c1" + ] /* used_key_parts */, + "key_length": "3", + "ref": [ + "test.t3.c1" + ] /* ref */, + "rows_examined_per_scan": 1, + "rows_produced_per_join": 0, + "filtered": "50.00", + "cost_info": { + "read_cost": "1.00", + "eval_cost": "0.10", + "prefix_cost": "2.40", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "c1", + "c2" + ] /* used_columns */, + "attached_condition": "(test.t2.c2 = (/* select#3 */ select min(test.t3.c1) from test.t3))", + "attached_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "1.20" + } /* cost_info */, + "table": { + "table_name": "t3", + "access_type": "ALL", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "1.00", + "eval_cost": "0.20", + "prefix_cost": "1.20", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "c1" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* attached_subqueries */ + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ + } + ] /* attached_subqueries */ + } /* table */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "20.82" + } /* cost_info */, + "duplicates_removal": { + "using_temporary_table": true, + "nested_loop": [ + { + "table": { + "table_name": "t5", + "access_type": "ALL", + "rows_examined_per_scan": 3, + "rows_produced_per_join": 3, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "0.60", + "prefix_cost": "2.61", + "data_read_per_join": "24" + } /* cost_info */, + "used_columns": [ + "c" + ] /* used_columns */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 3, + "rows_produced_per_join": 3, + "filtered": "33.33", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "0.60", + "prefix_cost": "6.41", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "c", + "c_key" + ] /* used_columns */, + "attached_condition": "(test.t2.c = test.t5.c)" + } /* table */ + }, + { + "table": { + "table_name": "t1", + "access_type": "index", + "possible_keys": [ + "c_key" + ] /* possible_keys */, + "key": "c_key", + "used_key_parts": [ + "c_key" + ] /* used_key_parts */, + "key_length": "5", + "rows_examined_per_scan": 3, + "rows_produced_per_join": 3, + "filtered": "33.33", + "using_index": true, + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "0.60", + "prefix_cost": "12.22", + "data_read_per_join": "24" + } /* cost_info */, + "used_columns": [ + "c_key" + ] /* used_columns */, + "attached_condition": "(test.t1.c_key = test.t2.c_key)" + } /* table */ + }, + { + "table": { + "table_name": "t4", + "access_type": "ALL", + "rows_examined_per_scan": 3, + "rows_produced_per_join": 3, + "filtered": "33.33", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "0.60", + "prefix_cost": "16.02", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "c", + "c_key" + ] /* used_columns */, + "attached_condition": "((test.t4.c = test.t5.c) and (test.t4.c_key is not null))" + } /* table */ + }, + { + "table": { + "table_name": "t3", + "access_type": "ref", + "possible_keys": [ + "c_key" + ] /* possible_keys */, + "key": "c_key", + "used_key_parts": [ + "c_key" + ] /* used_key_parts */, + "key_length": "5", + "ref": [ + "test.t4.c_key" + ] /* ref */, + "rows_examined_per_scan": 1, + "rows_produced_per_join": 3, + "filtered": "100.00", + "using_index": true, + "cost_info": { + "read_cost": "3.00", + "eval_cost": "0.60", + "prefix_cost": "20.82", + "data_read_per_join": "24" + } /* cost_info */, + "used_columns": [ + "c_key" + ] /* used_columns */ + } /* table */ + } + ] /* nested_loop */ + } /* duplicates_removal */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "table": { + "update": true, + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 1, + "filtered": "100.00" + } /* table */, + "update_value_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* update_value_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "update": true, + "table_name": "t1", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */ + } /* table */ + } + ] /* nested_loop */, + "update_value_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t3", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* update_value_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "insert": true, + "table_name": "t1", + "access_type": "ALL" + } /* table */, + "insert_from": { + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* insert_from */, + "update_value_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* update_value_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "table": { + "insert": true, + "table_name": "t1", + "access_type": "ALL" + } /* table */, + "update_value_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* update_value_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "table": { + "insert": true, + "table_name": "t3", + "access_type": "ALL" + } /* table */, + "optimized_away_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */ + } /* query_block */ + } + ] /* optimized_away_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "10.50" + } /* cost_info */, + "ordering_operation": { + "using_filesort": true, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": false, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "10.10", + "eval_cost": "0.40", + "prefix_cost": "10.50", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "union_result": { + "using_temporary_table": false, + "query_specifications": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "message": "No tables used" + } /* query_block */ + }, + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "message": "No tables used" + } /* query_block */ + } + ] /* query_specifications */ + } /* union_result */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } /* grouping_operation */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "4.40" + } /* cost_info */, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": true, + "cost_info": { + "sort_cost": "2.00" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "32" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */ + } /* table */, + "group_by_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "1.00" + } /* cost_info */, + "table": { + "table_name": "d", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "b" + ] /* used_columns */, + "materialized_from_subquery": { + "using_temporary_table": true, + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "5.21" + } /* cost_info */, + "ordering_operation": { + "using_temporary_table": true, + "using_filesort": true, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "32" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 4, + "filtered": "100.00", + "using_join_buffer": "Block Nested Loop", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.80", + "prefix_cost": "5.21", + "data_read_per_join": "64" + } /* cost_info */ + } /* table */ + } + ] /* nested_loop */ + } /* ordering_operation */ + } /* query_block */ + } /* materialized_from_subquery */ + } /* table */ + } /* query_block */ + } + ] /* group_by_subqueries */ + } /* grouping_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */ + } /* table */, + "optimized_away_subqueries": [ + { + "dependent": false, + "cacheable": true, + "query_block": { + "select_id": 3, + "cost_info": { + "query_cost": "4.40" + } /* cost_info */, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": true, + "cost_info": { + "sort_cost": "2.00" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "f1" + ] /* used_columns */ + } /* table */ + } /* grouping_operation */ + } /* query_block */ + } + ] /* optimized_away_subqueries */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "4.02" + } /* cost_info */, + "ordering_operation": { + "using_filesort": true, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 10, + "rows_produced_per_join": 10, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "2.00", + "prefix_cost": "4.02", + "data_read_per_join": "80" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "order_by_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "4.02" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 10, + "rows_produced_per_join": 1, + "filtered": "10.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "0.20", + "prefix_cost": "4.02", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i", + "j" + ] /* used_columns */, + "attached_condition": "(test.t2.i = test.t1.i)" + } /* table */ + } /* query_block */ + } + ] /* order_by_subqueries */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "4.02" + } /* cost_info */, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": true, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 10, + "rows_produced_per_join": 10, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "2.00", + "prefix_cost": "4.02", + "data_read_per_join": "80" + } /* cost_info */, + "used_columns": [ + "i" + ] /* used_columns */ + } /* table */, + "group_by_subqueries": [ + { + "dependent": true, + "cacheable": false, + "query_block": { + "select_id": 2, + "cost_info": { + "query_cost": "4.02" + } /* cost_info */, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows_examined_per_scan": 10, + "rows_produced_per_join": 1, + "filtered": "10.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "0.20", + "prefix_cost": "4.02", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "i", + "j" + ] /* used_columns */, + "attached_condition": "(test.t2.i = test.t1.i)" + } /* table */ + } /* query_block */ + } + ] /* group_by_subqueries */ + } /* grouping_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "6.50" + } /* cost_info */, + "ordering_operation": { + "using_temporary_table": true, + "using_filesort": true, + "grouping_operation": { + "using_filesort": false, + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": [ + "k1" + ] /* possible_keys */, + "key": "k1", + "used_key_parts": [ + "a" + ] /* used_key_parts */, + "key_length": "4", + "rows_examined_per_scan": 11, + "rows_produced_per_join": 11, + "filtered": "100.00", + "using_index_for_group_by": true, + "cost_info": { + "read_cost": "4.30", + "eval_cost": "2.20", + "prefix_cost": "6.50", + "data_read_per_join": "176" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + } /* grouping_operation */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "6.20" + } /* cost_info */, + "ordering_operation": { + "using_temporary_table": true, + "using_filesort": true, + "grouping_operation": { + "using_filesort": true, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": [ + "PRIMARY" + ] /* possible_keys */, + "rows_examined_per_scan": 3, + "rows_produced_per_join": 3, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "0.60", + "prefix_cost": "2.61", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "ref", + "possible_keys": [ + "PRIMARY" + ] /* possible_keys */, + "key": "PRIMARY", + "used_key_parts": [ + "a" + ] /* used_key_parts */, + "key_length": "4", + "ref": [ + "test.t1.a" + ] /* ref */, + "rows_examined_per_scan": 1, + "rows_produced_per_join": 3, + "filtered": "100.00", + "using_index": true, + "cost_info": { + "read_cost": "3.00", + "eval_cost": "0.60", + "prefix_cost": "6.21", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + } + ] /* nested_loop */ + } /* grouping_operation */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "12.82" + } /* cost_info */, + "grouping_operation": { + "using_filesort": true, + "cost_info": { + "sort_cost": "9.00" + } /* cost_info */, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 9, + "rows_produced_per_join": 9, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.02", + "eval_cost": "1.80", + "prefix_cost": "3.82", + "data_read_per_join": "144" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + } /* grouping_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "3.01" + } /* cost_info */, + "ordering_operation": { + "using_filesort": true, + "duplicates_removal": { + "using_temporary_table": true, + "using_filesort": false, + "grouping_operation": { + "using_temporary_table": true, + "using_filesort": false, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows_examined_per_scan": 5, + "rows_produced_per_join": 5, + "filtered": "100.00", + "cost_info": { + "read_cost": "2.01", + "eval_cost": "1.00", + "prefix_cost": "3.01", + "data_read_per_join": "80" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */ + } /* table */ + } /* grouping_operation */ + } /* duplicates_removal */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "2.40" + } /* cost_info */, + "ordering_operation": { + "using_filesort": false, + "duplicates_removal": { + "using_temporary_table": true, + "using_filesort": false, + "buffer_result": { + "using_temporary_table": true, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "system", + "rows_examined_per_scan": 1, + "rows_produced_per_join": 1, + "filtered": "100.00", + "cost_info": { + "read_cost": "0.00", + "eval_cost": "0.20", + "prefix_cost": "0.00", + "data_read_per_join": "8" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */ + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "index", + "key": "PRIMARY", + "used_key_parts": [ + "a" + ] /* used_key_parts */, + "key_length": "4", + "rows_examined_per_scan": 2, + "rows_produced_per_join": 2, + "filtered": "100.00", + "using_index": true, + "distinct": true, + "cost_info": { + "read_cost": "2.00", + "eval_cost": "0.40", + "prefix_cost": "2.40", + "data_read_per_join": "16" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */ + } /* table */ + } + ] /* nested_loop */ + } /* buffer_result */ + } /* duplicates_removal */ + } /* ordering_operation */ + } /* query_block */ +}`, + `{ + "query_block": { + "select_id": 1, + "cost_info": { + "query_cost": "6.41" + } /* cost_info */, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "possible_keys": [ + "PRIMARY" + ] /* possible_keys */, + "rows_examined_per_scan": 4, + "rows_produced_per_join": 3, + "filtered": "75.00", + "cost_info": { + "read_cost": "2.21", + "eval_cost": "0.60", + "prefix_cost": "2.81", + "data_read_per_join": "48" + } /* cost_info */, + "used_columns": [ + "a", + "b" + ] /* used_columns */, + "attached_condition": "(test.t1.b <> 30)" + } /* table */ + }, + { + "table": { + "table_name": "t2", + "access_type": "eq_ref", + "possible_keys": [ + "PRIMARY" + ] /* possible_keys */, + "key": "PRIMARY", + "used_key_parts": [ + "a" + ] /* used_key_parts */, + "key_length": "4", + "ref": [ + "test.t1.a" + ] /* ref */, + "rows_examined_per_scan": 1, + "rows_produced_per_join": 3, + "filtered": "100.00", + "using_index": true, + "cost_info": { + "read_cost": "3.00", + "eval_cost": "0.60", + "prefix_cost": "6.41", + "data_read_per_join": "24" + } /* cost_info */, + "used_columns": [ + "a" + ] /* used_columns */ + } /* table */ + } + ] /* nested_loop */ + } /* query_block */ +}`, +} + +func TestExplain(t *testing.T) { + for _, sql := range sqls { + exp, err := connTest.Explain(sql, TraditionalExplainType, TraditionalFormatExplain) + //exp, err := conn.Explain(sql, TraditionalExplainType, JSONFormatExplain) + fmt.Println("Old: ", sql) + fmt.Println("New: ", exp.SQL) + if err != nil { + fmt.Println(err) + } + pretty.Println(exp) + fmt.Println() + } +} + +func TestParseExplainText(t *testing.T) { + for _, content := range exp { + pretty.Println(string(RemoveSQLComments([]byte(content)))) + pretty.Println(ParseExplainText(content)) + } + /* + //length := len(exp) + pretty.Println(string(RemoveSQLComments([]byte(exp[9])))) + explainInfo, err := ParseExplainText(exp[9]) + pretty.Println(explainInfo) + fmt.Println(err) + */ +} + +func TestFindTablesInJson(t *testing.T) { + idx := 9 + for _, j := range exp[idx : idx+1] { + pretty.Println(j) + findTablesInJSON(j, 0) + } + pretty.Println(len(explainJSONTables), explainJSONTables) +} + +func TestFormatJsonIntoTraditional(t *testing.T) { + idx := 11 + for _, j := range exp[idx : idx+1] { + pretty.Println(j) + pretty.Println(FormatJSONIntoTraditional(j)) + } +} + +func TestPrintMarkdownExplainTable(t *testing.T) { + expInfo, err := connTest.Explain("select 1", TraditionalExplainType, TraditionalFormatExplain) + if err != nil { + t.Error(err) + } + err = common.GoldenDiff(func() { + PrintMarkdownExplainTable(expInfo) + }, t.Name(), update) + if err != nil { + t.Error(err) + } +} + +func TestExplainInfoTranslator(t *testing.T) { + expInfo, err := connTest.Explain("select 1", TraditionalExplainType, TraditionalFormatExplain) + if err != nil { + t.Error(err) + } + err = common.GoldenDiff(func() { + ExplainInfoTranslator(expInfo) + }, t.Name(), update) + if err != nil { + t.Error(err) + } +} + +func TestMySQLExplainWarnings(t *testing.T) { + expInfo, err := connTest.Explain("select 1", TraditionalExplainType, TraditionalFormatExplain) + if err != nil { + t.Error(err) + } + err = common.GoldenDiff(func() { + MySQLExplainWarnings(expInfo) + }, t.Name(), update) + if err != nil { + t.Error(err) + } +} + +func TestMySQLExplainQueryCost(t *testing.T) { + expInfo, err := connTest.Explain("select 1", TraditionalExplainType, TraditionalFormatExplain) + if err != nil { + t.Error(err) + } + err = common.GoldenDiff(func() { + MySQLExplainQueryCost(expInfo) + }, t.Name(), update) + if err != nil { + t.Error(err) + } +} + +func TestSupportExplainWrite(t *testing.T) { + _, err := connTest.supportExplainWrite() + if err != nil { + t.Error(err) + } +} diff --git a/database/mysql.go b/database/mysql.go new file mode 100644 index 00000000..8695453d --- /dev/null +++ b/database/mysql.go @@ -0,0 +1,310 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + + "github.com/ziutek/mymysql/mysql" + // mymysql driver + _ "github.com/ziutek/mymysql/native" + "vitess.io/vitess/go/vt/sqlparser" +) + +// Connector 数据库连接基本对象 +type Connector struct { + Addr string + User string + Pass string + Database string + Charset string +} + +// QueryResult 数据库查询返回值 +type QueryResult struct { + Rows []mysql.Row + Result mysql.Result + Error error + Warning []mysql.Row + QueryCost float64 +} + +// NewConnection 创建新连接 +func (db *Connector) NewConnection() mysql.Conn { + return mysql.New("tcp", "", db.Addr, db.User, db.Pass, db.Database) +} + +// Query 执行SQL +func (db *Connector) Query(sql string, params ...interface{}) (*QueryResult, error) { + // 测试环境如果检查是关闭的,则SQL不会被执行 + if common.Config.TestDSN.Disable { + return nil, errors.New("TestDsn Disable") + } + + // 数据库安全性检查:如果Connector的IP端口与TEST环境不一致,则启用SQL白名单 + // 不在白名单中的SQL不允许执行 + // 执行环境与test环境不相同 + if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) { + return nil, fmt.Errorf("query execution deny: execute SQL with DSN(%s/%s) '%s'", + db.Addr, db.Database, fmt.Sprintf(sql, params...)) + } + + common.Log.Debug("Execute SQL with DSN(%s/%s) : %s", db.Addr, db.Database, fmt.Sprintf(sql, params...)) + conn := db.NewConnection() + + // 设置SQL连接超时时间 + conn.SetTimeout(time.Duration(common.Config.ConnTimeOut) * time.Second) + defer conn.Close() + err := conn.Connect() + if err != nil { + return nil, err + } + + // 添加SQL执行超时限制 + ch := make(chan QueryResult, 1) + go func() { + res := QueryResult{} + res.Rows, res.Result, res.Error = conn.Query(sql, params...) + + if common.Config.ShowWarnings { + warning, _, err := conn.Query("SHOW WARNINGS") + if err == nil { + res.Warning = warning + } + } + + // SHOW WARNINGS并不会影响last_query_cost + if common.Config.ShowLastQueryCost { + cost, _, err := conn.Query("SHOW SESSION STATUS LIKE 'last_query_cost'") + if err == nil { + if len(cost) > 0 { + res.QueryCost = cost[0].Float(1) + } + } + } + + ch <- res + }() + + select { + case res := <-ch: + return &res, res.Error + case <-time.After(time.Duration(common.Config.QueryTimeOut) * time.Second): + return nil, errors.New("query execution timeout") + } + +} + +// Version 获取MySQL数据库版本 +func (db *Connector) Version() (int, error) { + // 从数据库中获取版本信息 + res, err := db.Query("select @@version") + if err != nil { + common.Log.Warn("(db *Connector) Version() Error: %v", err) + return -1, err + } + + // 从MySQL版本中获取版本号 + var reg *regexp.Regexp + var v int + reg, err = regexp.Compile(`[^0-9]+`) + if err != nil { + // 如果获取不到version信息,则以最新版本为准 + v = 999 + return v, err + } + version := reg.ReplaceAllString(res.Rows[0].Str(0), "")[:3] + v, err = strconv.Atoi(version) + if err != nil { + // 如果获取不到version信息,则以最新版本为准 + v = 999 + } + return v, err +} + +// Source execute sql from file +func (db *Connector) Source(file string) ([]*QueryResult, error) { + var sqlCounter int // SQL 计数器 + var result []*QueryResult + + fd, err := os.Open(file) + defer func() { + err = fd.Close() + if err != nil { + common.Log.Error("(db *Connector) Source(%s) fd.Close failed: %s", file, err.Error()) + } + }() + if err != nil { + common.Log.Warning("(db *Connector) Source(%s) os.Open failed: %s", file, err.Error()) + return nil, err + } + data, err := ioutil.ReadAll(fd) + if err != nil { + common.Log.Critical("ioutil.ReadAll Error: %s", err.Error()) + return nil, err + } + + sql := strings.TrimSpace(string(data)) + buf := strings.TrimSpace(sql) + for ; ; sqlCounter++ { + if buf == "" { + break + } + + // 查询请求切分 + sql, bufBytes := ast.SplitStatement([]byte(buf), []byte(common.Config.Delimiter)) + buf = string(bufBytes) + sql = strings.TrimSpace(sql) + common.Log.Debug("Source Query SQL: %s", sql) + + res, e := db.Query(sql) + if e != nil { + common.Log.Error("(db *Connector) Source Filename: %s, SQLCounter.: %d", file, sqlCounter) + return result, e + } + result = append(result, res) + } + return result, nil +} + +// SingleIntValue 获取某个int型变量的值 +func (db *Connector) SingleIntValue(option string) (int, error) { + // 从数据库中获取信息 + res, err := db.Query("select @@%s", option) + if err != nil { + common.Log.Warn("(db *Connector) SingleIntValue() Error: %v", err) + return -1, err + } + + return res.Rows[0].Int(0), err +} + +// ColumnCardinality 粒度计算 +func (db *Connector) ColumnCardinality(tb, col string) float64 { + // 获取该表上的已有的索引 + + // show table status 获取总行数(近似) + tbStatus, err := db.ShowTableStatus(tb) + if err != nil { + common.Log.Warn("(db *Connector) ColumnCardinality() ShowTableStatus Error: %v", err) + return 0 + } + + // 如果是视图或表中无数据,rowTotal 都为0 + // 视图不需要加索引,无数据相当于散粒度为1 + if len(tbStatus.Rows) == 0 { + common.Log.Debug("(db *Connector) ColumnCardinality() No table status: %s", tb) + return 1 + } + rowTotal := tbStatus.Rows[0].Rows + if rowTotal == 0 { + if common.Config.Sampling { + common.Log.Debug("ColumnCardinality, %s rowTotal == 0", tb) + } + return 1 + } + + // rowTotal > xxx 时保护数据库,不对该值计算散粒度,xxx可以在配置中设置 + if rowTotal > common.Config.MaxTotalRows { + return 0.5 + } + + // 计算该列散粒度 + res, err := db.Query("select count(distinct `%s`) from `%s`.`%s`", col, db.Database, tb) + if err != nil { + common.Log.Warn("(db *Connector) ColumnCardinality() Query Error: %v", err) + return 0 + } + + colNum := res.Rows[0].Float(0) + + // 散粒度区间:[0,1] + return colNum / float64(rowTotal) +} + +// IsView 判断表是否是视图 +func (db *Connector) IsView(tbName string) bool { + tbStatus, err := db.ShowTableStatus(tbName) + if err != nil { + common.Log.Error("(db *Connector) IsView Error: %v:", err) + return false + } + + if len(tbStatus.Rows) > 0 { + if tbStatus.Rows[0].Comment == "VIEW" { + return true + } + } + + return false + +} + +// RemoveSQLComments 去除SQL中的注释 +func RemoveSQLComments(sql []byte) []byte { + cmtReg := regexp.MustCompile(`("(""|[^"])*")|('(''|[^'])*')|(--[^\n\r]*)|(#.*)|(/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/)`) + + return cmtReg.ReplaceAllFunc(sql, func(s []byte) []byte { + if (s[0] == '"' && s[len(s)-1] == '"') || + (s[0] == '\'' && s[len(s)-1] == '\'') || + (string(s[:3]) == "/*!") { + return s + } + return []byte("") + }) +} + +// 为了防止在Online环境进行误操作,通过dangerousQuery来判断能否在Online执行 +func (db *Connector) dangerousQuery(query string) bool { + queries, err := sqlparser.SplitStatementToPieces(strings.TrimSpace(strings.ToLower(query))) + if err != nil { + return true + } + + for _, sql := range queries { + dangerous := true + whiteList := []string{ + "select", + "show", + "explain", + "describe", + } + + for _, prefix := range whiteList { + if strings.HasPrefix(sql, prefix) { + dangerous = false + break + } + } + + if dangerous { + return true + } + } + + return false +} diff --git a/database/mysql_test.go b/database/mysql_test.go new file mode 100644 index 00000000..2914c889 --- /dev/null +++ b/database/mysql_test.go @@ -0,0 +1,90 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "fmt" + "testing" + + "github.com/XiaoMi/soar/common" + "github.com/kr/pretty" +) + +// TODO: go test -race不通过待解决 +func TestQuery(t *testing.T) { + common.Config.QueryTimeOut = 1 + _, err := connTest.Query("select sleep(2)") + if err == nil { + t.Error("connTest.Query not timeout") + } +} + +func TestColumnCardinality(_ *testing.T) { + connTest.Database = "information_schema" + a := connTest.ColumnCardinality("TABLES", "TABLE_SCHEMA") + fmt.Println("TABLES.TABLE_SCHEMA:", a) +} + +func TestDangerousSQL(t *testing.T) { + testCase := map[string]bool{ + "select * from tb;delete from tb;": true, + "show database;": false, + "select * from t;": false, + "explain delete from t;": false, + } + + db := Connector{} + for sql, want := range testCase { + got := db.dangerousQuery(sql) + if got != want { + t.Errorf("SQL:%s got:%v want:%v", sql, got, want) + } + } +} + +func TestWarningsAndQueryCost(t *testing.T) { + common.Config.ShowWarnings = true + common.Config.ShowLastQueryCost = true + res, err := connTest.Query("explain select * from sakila.film") + if err != nil { + t.Error("Query Error: ", err) + } else { + for _, w := range res.Warning { + pretty.Println(w.Str(2)) + } + fmt.Println(res.QueryCost) + pretty.Println(err) + } +} + +func TestVersion(t *testing.T) { + version, err := connTest.Version() + if err != nil { + t.Error(err.Error()) + } + fmt.Println(version) +} + +func TestSource(t *testing.T) { + res, err := connTest.Source("testdata/" + t.Name() + ".sql") + if err != nil { + t.Error("Query Error: ", err) + } + if res[0].Rows[0].Int(0) != 1 || res[1].Rows[0].Int(0) != 1 { + t.Error("Source result not match, expect 1, 1") + } +} diff --git a/database/profiling.go b/database/profiling.go new file mode 100644 index 00000000..3de00dde --- /dev/null +++ b/database/profiling.go @@ -0,0 +1,135 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "errors" + "fmt" + "io" + "strings" + "time" + + "github.com/XiaoMi/soar/common" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// Profiling show profile输出的结果 +type Profiling struct { + Rows []ProfilingRow +} + +// ProfilingRow show profile每一行信息 +type ProfilingRow struct { + Status string + Duration float64 + // TODO: 支持show profile all,不过目前看all的信息过多有点眼花缭乱 +} + +// Profiling 执行SQL,并对其Profiling +func (db *Connector) Profiling(sql string, params ...interface{}) (*QueryResult, error) { + // 过滤不需要profiling的SQL + switch sqlparser.Preview(sql) { + case sqlparser.StmtSelect, sqlparser.StmtUpdate, sqlparser.StmtDelete: + default: + return nil, errors.New("no need profiling") + } + + // 测试环境如果检查是关闭的,则SQL不会被执行 + if common.Config.TestDSN.Disable { + return nil, errors.New("TestDsn Disable") + } + + // 数据库安全性检查:如果Connector的IP端口与TEST环境不一致,则启用SQL白名单 + // 不在白名单中的SQL不允许执行 + // 执行环境与test环境不相同 + if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) { + return nil, fmt.Errorf("query execution deny: Execute SQL with DSN(%s/%s) '%s'", + db.Addr, db.Database, fmt.Sprintf(sql, params...)) + } + + common.Log.Debug("Execute SQL with DSN(%s/%s) : %s", db.Addr, db.Database, sql) + conn := db.NewConnection() + + // 设置SQL连接超时时间 + conn.SetTimeout(time.Duration(common.Config.ConnTimeOut) * time.Second) + defer conn.Close() + err := conn.Connect() + if err != nil { + return nil, err + } + + // 添加SQL执行超时限制 + ch := make(chan QueryResult, 1) + go func() { + // 开启Profiling + _, _, err = conn.Query("set @@profiling=1") + common.LogIfError(err, "") + + // 执行SQL,抛弃返回结果 + result, err := conn.Start(sql, params...) + if err != nil { + ch <- QueryResult{ + Error: err, + } + return + } + row := result.MakeRow() + for { + err = result.ScanRow(row) + if err == io.EOF { + break + } + } + + // 返回Profiling结果 + res := QueryResult{} + res.Rows, res.Result, res.Error = conn.Query("show profile") + _, _, err = conn.Query("set @@profiling=0") + common.LogIfError(err, "") + ch <- res + }() + + select { + case res := <-ch: + return &res, res.Error + case <-time.After(time.Duration(common.Config.QueryTimeOut) * time.Second): + return nil, errors.New("query execution timeout") + } +} + +func getProfiling(res *QueryResult) Profiling { + var rows []ProfilingRow + for _, row := range res.Rows { + rows = append(rows, ProfilingRow{ + Status: row.Str(0), + Duration: row.Float(1), + }) + } + return Profiling{Rows: rows} +} + +// FormatProfiling 格式化输出Profiling信息 +func FormatProfiling(res *QueryResult) string { + profiling := getProfiling(res) + str := []string{"| Status | Duration |"} + str = append(str, "| --- | --- |") + for _, row := range profiling.Rows { + str = append(str, fmt.Sprintf("| %s | %f |", row.Status, row.Duration)) + } + return strings.Join(str, "\n") +} diff --git a/database/profiling_test.go b/database/profiling_test.go new file mode 100644 index 00000000..4b226ba9 --- /dev/null +++ b/database/profiling_test.go @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "testing" + + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" +) + +func TestProfiling(t *testing.T) { + common.Config.QueryTimeOut = 1 + res, err := connTest.Profiling("select 1") + if err == nil { + pretty.Println(res) + } else { + t.Error(err) + } +} + +func TestFormatProfiling(t *testing.T) { + res, err := connTest.Profiling("select 1") + if err == nil { + pretty.Println(FormatProfiling(res)) + } else { + t.Error(err) + } +} + +func TestGetProfiling(t *testing.T) { + res, err := connTest.Profiling("select 1") + if err == nil { + pretty.Println(getProfiling(res)) + } else { + t.Error(err) + } +} diff --git a/database/sampling.go b/database/sampling.go new file mode 100644 index 00000000..6190c18e --- /dev/null +++ b/database/sampling.go @@ -0,0 +1,230 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/XiaoMi/soar/common" + "github.com/ziutek/mymysql/mysql" +) + +/*-------------------- +* The following choice of minrows is based on the paper +* "Random sampling for histogram construction: how much is enough?" +* by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in +* Proceedings of ACM SIGMOD International Conference on Management +* of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5 +* says that for table size n, histogram size k, maximum relative +* error in bin size f, and error probability gamma, the minimum +* random sample size is +* r = 4 * k * ln(2*n/gamma) / f^2 +* Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain +* r = 305.82 * k +* Note that because of the log function, the dependence on n is +* quite weak; even at n = 10^12, a 300*k sample gives <= 0.66 +* bin size error with probability 0.99. So there's no real need to +* scale for n, which is a good thing because we don't necessarily +* know it at this point. +*-------------------- + */ + +// SamplingData 将数据从Remote拉取到 db 中 +func (db *Connector) SamplingData(remote Connector, tables ...string) error { + // 计算需要泵取的数据量 + wantRowsCount := 300 * common.Config.SamplingStatisticTarget + + // 设置数据采样单条SQL中value的数量 + // 该数值越大,在内存中缓存的data就越多,但相对的,插入时速度就越快 + maxValCount := 200 + + // 获取数据库连接对象 + conn := remote.NewConnection() + localConn := db.NewConnection() + + // 连接数据库 + err := conn.Connect() + defer conn.Close() + if err != nil { + return err + } + + err = localConn.Connect() + defer localConn.Close() + if err != nil { + return err + } + + for _, table := range tables { + // 表类型检查 + if remote.IsView(table) { + return nil + } + + tableStatus, err := remote.ShowTableStatus(table) + if err != nil { + return err + } + + if len(tableStatus.Rows) == 0 { + common.Log.Info("SamplingData, Table %s with no data, stop sampling", table) + return nil + } + + tableRows := tableStatus.Rows[0].Rows + if tableRows == 0 { + common.Log.Info("SamplingData, Table %s with no data, stop sampling", table) + return nil + } + + factor := float64(wantRowsCount) / float64(tableRows) + common.Log.Debug("SamplingData, tableRows: %d, wantRowsCount: %d, factor: %f", tableRows, wantRowsCount, factor) + + err = startSampling(conn, localConn, db.Database, table, factor, wantRowsCount, maxValCount) + if err != nil { + common.Log.Error("(db *Connector) SamplingData Error : %v", err) + } + } + return nil +} + +// 开始从环境中泵取数据 +// 因为涉及到的数据量问题,所以泵取与插入时同时进行的 +// TODO 加 ref link +func startSampling(conn, localConn mysql.Conn, database, table string, factor float64, wants, maxValCount int) error { + // 从线上数据库获取所需dump的表中所有列的数据类型,备用 + // 由于测试库中的库表为刚建立的,所以在information_schema中很可能没有这个表的信息 + var dataTypes []string + q := fmt.Sprintf("select DATA_TYPE from information_schema.COLUMNS where TABLE_SCHEMA='%s' and TABLE_NAME = '%s'", + database, table) + common.Log.Debug("Sampling data execute: %s", q) + rs, _, err := localConn.Query(q) + if err != nil { + common.Log.Debug("Sampling data got data type Err: %v", err) + } else { + for _, r := range rs { + dataTypes = append(dataTypes, r.Str(0)) + } + } + + // 生成where条件 + where := fmt.Sprintf("where RAND()<=%f", factor) + if factor >= 1 { + where = "" + } + + sql := fmt.Sprintf("select * from `%s` %s limit %d;", table, where, wants) + res, err := conn.Start(sql) + if err != nil { + return err + } + + // GetRow method allocates a new chunk of memory for every received row. + row := res.MakeRow() + rowCount := 0 + valCount := 0 + + // 获取所有的列名 + columns := make([]string, len(res.Fields())) + for i, filed := range res.Fields() { + columns[i] = filed.Name + } + colDef := strings.Join(columns, ",") + + // 开始填充数据 + var valList []string + for { + err := res.ScanRow(row) + if err == io.EOF { + // 扫描结束 + if len(valList) > 0 { + // 如果缓存中还存在未插入的数据,则把缓存中的数据刷新到DB中 + doSampling(localConn, database, table, colDef, strings.Join(valList, ",")) + } + break + } + + if err != nil { + return err + } + + values := make([]string, len(columns)) + for i := range row { + // TODO 不支持坐标类型的导出 + switch data := row[i].(type) { + case nil: + // str = "" + case []byte: + // 先尝试转成数字,如果报错则转换成string + v, err := row.Int64Err(i) + values[i] = strconv.FormatInt(v, 10) + if err != nil { + values[i] = string(data) + } + case time.Time: + values[i] = mysql.TimeString(data) + case time.Duration: + values[i] = mysql.DurationString(data) + default: + values[i] = fmt.Sprint(data) + } + + // 非text/varchar类的数据类型,如果dump出的数据为空,则说明该值为null值 + // 应转换其value为null,如果用空('')进行替代,会导致出现语法错误。 + if len(dataTypes) == len(res.Fields()) && values[i] == "" && + (!strings.Contains(dataTypes[i], "char") || + !strings.Contains(dataTypes[i], "text")) { + values[i] = "null" + } else { + values[i] = "'" + values[i] + "'" + } + } + + valuesStr := fmt.Sprintf(`(%s)`, strings.Join(values, `,`)) + valList = append(valList, valuesStr) + + rowCount++ + valCount++ + + if rowCount%maxValCount == 0 { + doSampling(localConn, database, table, colDef, strings.Join(valList, ",")) + valCount = 0 + valList = make([]string, 0) + + } + } + + common.Log.Debug("%d rows sampling out", rowCount) + return nil +} + +// 将泵取的数据转换成Insert语句并在数据库中执行 +func doSampling(conn mysql.Conn, dbName, table, colDef, values string) { + sql := fmt.Sprintf("Insert into `%s`.`%s`(%s) values%s;", dbName, table, + colDef, values) + + _, _, err := conn.Query(sql) + + if err != nil { + common.Log.Error("doSampling Error from %s.%s: %v", dbName, table, err) + } + +} diff --git a/database/sampling_test.go b/database/sampling_test.go new file mode 100644 index 00000000..082d5e9f --- /dev/null +++ b/database/sampling_test.go @@ -0,0 +1,50 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "testing" + + "github.com/XiaoMi/soar/common" +) + +func init() { + common.BaseDir = common.DevPath +} + +func TestSamplingData(t *testing.T) { + online := &Connector{ + Addr: common.Config.OnlineDSN.Addr, + User: common.Config.OnlineDSN.User, + Pass: common.Config.OnlineDSN.Password, + Database: common.Config.OnlineDSN.Schema, + } + + offline := &Connector{ + Addr: common.Config.TestDSN.Addr, + User: common.Config.TestDSN.User, + Pass: common.Config.TestDSN.Password, + Database: common.Config.TestDSN.Schema, + } + + offline.Database = "test" + + err := connTest.SamplingData(*online, "film") + if err != nil { + t.Error(err) + } +} diff --git a/database/show.go b/database/show.go new file mode 100644 index 00000000..40440189 --- /dev/null +++ b/database/show.go @@ -0,0 +1,584 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/XiaoMi/soar/common" +) + +// SHOW TABLE STATUS Syntax +// https://dev.mysql.com/doc/refman/5.7/en/show-table-status.html + +// TableStatInfo 用以保存 show table status 之后获取的table信息 +type TableStatInfo struct { + Name string + Rows []tableStatusRow +} + +// tableStatusRow 用于 show table status value +type tableStatusRow struct { + Name string // 表名 + Engine string // 该表使用的存储引擎 + Version int // 该表的 .frm 文件版本号 + RowFormat string // 该表使用的行存储格式 + Rows int64 // 表行数,InnoDB 引擎中为预估值,甚至可能会有40%~50%的数值偏差 + AvgRowLength int // 平均行长度 + + // MyISAM: Data_length 为数据文件的大小,单位为 bytes + // InnoDB: Data_length 为聚簇索引分配的近似内存量,单位为 bytes, 计算方式为聚簇索引数量乘以 InnoDB 页面大小 + // 其他不同的存储引擎中该值的意义可能不尽相同 + DataLength int + + // MyISAM: Max_data_length 为数据文件长度的最大值。这是在给定使用的数据指针大小的情况下,可以存储在表中的数据的最大字节数 + // InnoDB: 未使用 + // 其他不同的存储引擎中该值的意义可能不尽相同 + MaxDataLength int + + // MyISAM: Index_length 为 index 文件的大小,单位为 bytes + // InnoDB: Index_length 为非聚簇索引分配的近似内存量,单位为 bytes,计算方式为非聚簇索引数量乘以 InnoDB 页面大小 + // 其他不同的存储引擎中该值的意义可能不尽相同 + IndexLength int + + DataFree int // 已分配但未使用的字节数 + AutoIncrement int // 下一个自增值 + CreateTime time.Time // 创建时间 + UpdateTime time.Time // 最近一次更新时间,该值不准确 + CheckTime time.Time // 上次检查时间 + Collation string // 字符集及排序规则信息 + Checksum string // 校验和 + CreateOptions string // 创建表的时候的时候一切其他属性 + Comment string // 注释 +} + +// newTableStat 构造 table Stat 对象 +func newTableStat(tableName string) *TableStatInfo { + return &TableStatInfo{ + Name: tableName, + Rows: make([]tableStatusRow, 0), + } +} + +// ShowTables 执行 show tables +func (db *Connector) ShowTables() ([]string, error) { + defer func() { + err := recover() + if err != nil { + common.Log.Error("recover ShowTableStatus()", err) + } + }() + + // 执行 show table status + res, err := db.Query("show tables") + if err != nil { + return []string{}, err + } + + // 获取值 + var tables []string + for _, row := range res.Rows { + tables = append(tables, row.Str(0)) + } + + return tables, err +} + +// ShowTableStatus 执行 show table status +func (db *Connector) ShowTableStatus(tableName string) (*TableStatInfo, error) { + defer func() { + err := recover() + if err != nil { + common.Log.Error("recover ShowTableStatus()", err) + } + }() + + // 初始化struct + ts := newTableStat(tableName) + + // 执行 show table status + res, err := db.Query("show table status where name = '%s'", ts.Name) + if err != nil { + return ts, err + } + + rs := res.Result.Map("Rows") + name := res.Result.Map("Name") + df := res.Result.Map("Data_free") + sum := res.Result.Map("Checksum") + engine := res.Result.Map("Engine") + version := res.Result.Map("Version") + comment := res.Result.Map("Comment") + ai := res.Result.Map("Auto_increment") + collation := res.Result.Map("Collation") + rowFormat := res.Result.Map("Row_format") + checkTime := res.Result.Map("Check_time") + dataLength := res.Result.Map("Data_length") + idxLength := res.Result.Map("Index_length") + createTime := res.Result.Map("Create_time") + updateTime := res.Result.Map("Update_time") + options := res.Result.Map("Create_options") + avgRowLength := res.Result.Map("Avg_row_length") + maxDataLength := res.Result.Map("Max_data_length") + + // 获取值 + for _, row := range res.Rows { + value := tableStatusRow{ + Name: row.Str(name), + Engine: row.Str(engine), + Version: row.Int(version), + Rows: row.Int64(rs), + RowFormat: row.Str(rowFormat), + AvgRowLength: row.Int(avgRowLength), + DataLength: row.Int(dataLength), + MaxDataLength: row.Int(maxDataLength), + IndexLength: row.Int(idxLength), + DataFree: row.Int(df), + AutoIncrement: row.Int(ai), + CreateTime: row.Time(createTime, time.Local), + UpdateTime: row.Time(updateTime, time.Local), + CheckTime: row.Time(checkTime, time.Local), + Collation: row.Str(collation), + Checksum: row.Str(sum), + CreateOptions: row.Str(options), + Comment: row.Str(comment), + } + ts.Rows = append(ts.Rows, value) + } + + return ts, err +} + +// https://dev.mysql.com/doc/refman/5.7/en/show-index.html + +// TableIndexInfo 用以保存 show index 之后获取的 index 信息 +type TableIndexInfo struct { + TableName string + IdxRows []TableIndexRow +} + +// TableIndexRow 用以存放show index之后获取的每一条index信息 +type TableIndexRow struct { + Table string // 表名 + NonUnique int // 0:unique key,1:not unique + KeyName string // index的名称,如果是主键则为 "PRIMARY" + SeqInIndex int // 该列在索引中的位置。计数从 1 开始 + ColumnName string // 列名 + Collation string // A or Null + Cardinality int // 索引中唯一值的数量,"ANALYZE TABLE" 可更新该值 + SubPart int // 索引前缀字节数 + Packed int + Null string // 表示该列是否可以为空,如果可以为 'YES',反之'' + IndexType string // BTREE, FULLTEXT, HASH, RTREE + Comment string + IndexComment string +} + +// NewTableIndexInfo 构造 TableIndexInfo +func NewTableIndexInfo(tableName string) *TableIndexInfo { + return &TableIndexInfo{ + TableName: tableName, + IdxRows: make([]TableIndexRow, 0), + } +} + +// ShowIndex show Index +func (db *Connector) ShowIndex(tableName string) (*TableIndexInfo, error) { + tbIndex := NewTableIndexInfo(tableName) + + // 执行 show create table + res, err := db.Query("show index from `%s`.`%s`", db.Database, tableName) + if err != nil { + return nil, err + } + + table := res.Result.Map("Table") + unique := res.Result.Map("Non_unique") + keyName := res.Result.Map("Key_name") + seq := res.Result.Map("Seq_in_index") + cName := res.Result.Map("Column_name") + collation := res.Result.Map("Collation") + cardinality := res.Result.Map("Cardinality") + subPart := res.Result.Map("Sub_part") + packed := res.Result.Map("Packed") + null := res.Result.Map("Null") + idxType := res.Result.Map("Index_type") + comment := res.Result.Map("Comment") + idxComment := res.Result.Map("Index_comment") + + // 获取值 + for _, row := range res.Rows { + value := TableIndexRow{ + Table: row.Str(table), + NonUnique: row.Int(unique), + KeyName: row.Str(keyName), + SeqInIndex: row.Int(seq), + ColumnName: row.Str(cName), + Collation: row.Str(collation), + Cardinality: row.Int(cardinality), + SubPart: row.Int(subPart), + Packed: row.Int(packed), + Null: row.Str(null), + IndexType: row.Str(idxType), + Comment: row.Str(comment), + IndexComment: row.Str(idxComment), + } + tbIndex.IdxRows = append(tbIndex.IdxRows, value) + } + return tbIndex, err +} + +// IndexSelectKey 用以对 TableIndexInfo 进行查询 +type IndexSelectKey string + +// 索引相关 +const ( + IndexKeyName = IndexSelectKey("KeyName") // 索引名称 + IndexColumnName = IndexSelectKey("ColumnName") // 索引列名称 + IndexIndexType = IndexSelectKey("IndexType") // 索引类型 + IndexNonUnique = IndexSelectKey("NonUnique") // 唯一索引 +) + +// FindIndex 获取TableIndexInfo中需要的索引 +func (tbIndex *TableIndexInfo) FindIndex(arg IndexSelectKey, value string) []TableIndexRow { + var result []TableIndexRow + if tbIndex == nil { + return result + } + + value = strings.ToLower(value) + + switch arg { + case IndexKeyName: + for _, index := range tbIndex.IdxRows { + if strings.ToLower(index.KeyName) == value { + result = append(result, index) + } + } + + case IndexColumnName: + for _, index := range tbIndex.IdxRows { + if strings.ToLower(index.ColumnName) == value { + result = append(result, index) + } + } + + case IndexIndexType: + for _, index := range tbIndex.IdxRows { + if strings.ToLower(index.IndexType) == value { + result = append(result, index) + } + } + + case IndexNonUnique: + for _, index := range tbIndex.IdxRows { + unique := strconv.Itoa(index.NonUnique) + if unique == value { + result = append(result, index) + } + } + + default: + common.Log.Error("no such args: TableIndexRow") + } + + return result +} + +// desc table +// https://dev.mysql.com/doc/refman/5.7/en/show-columns.html + +// TableDesc show columns from rental; +type TableDesc struct { + Name string + DescValues []TableDescValue +} + +// TableDescValue 含有每一列的属性 +type TableDescValue struct { + Field string // 列名 + Type string // 数据类型 + Null string // 是否有NULL(NO、YES) + Collation string // 字符集 + Privileges string // 权限s + Key string // 键类型 + Default string // 默认值 + Extra string // 其他 + Comment string // 备注 +} + +// NewTableDesc 初始化一个*TableDesc +func NewTableDesc(tableName string) *TableDesc { + return &TableDesc{ + Name: tableName, + DescValues: make([]TableDescValue, 0), + } +} + +// ShowColumns 获取DB中所有的columns +func (db *Connector) ShowColumns(tableName string) (*TableDesc, error) { + tbDesc := NewTableDesc(tableName) + + // 执行 show create table + res, err := db.Query("show full columns from `%s`.`%s`", db.Database, tableName) + if err != nil { + return nil, err + } + + field := res.Result.Map("Field") + tp := res.Result.Map("Type") + null := res.Result.Map("Null") + key := res.Result.Map("Key") + def := res.Result.Map("Default") + extra := res.Result.Map("Extra") + collation := res.Result.Map("Collation") + privileges := res.Result.Map("Privileges") + comm := res.Result.Map("Comment") + + // 获取值 + for _, row := range res.Rows { + value := TableDescValue{ + Field: row.Str(field), + Type: row.Str(tp), + Null: row.Str(null), + Key: row.Str(key), + Default: row.Str(def), + Extra: row.Str(extra), + Privileges: row.Str(privileges), + Collation: row.Str(collation), + Comment: row.Str(comm), + } + tbDesc.DescValues = append(tbDesc.DescValues, value) + } + return tbDesc, err +} + +// Columns 用于获取TableDesc中所有列的名称 +func (td TableDesc) Columns() []string { + var cols []string + for _, col := range td.DescValues { + cols = append(cols, col.Field) + } + return cols +} + +// showCreate show create +func (db *Connector) showCreate(createType, name string) (string, error) { + // 执行 show create table + res, err := db.Query("show create %s `%s`", createType, name) + if err != nil { + return "", err + } + + // 获取ddl + var ddl string + for _, row := range res.Rows { + ddl = row.Str(1) + } + + return ddl, err +} + +// ShowCreateDatabase show create database +func (db *Connector) ShowCreateDatabase(dbName string) (string, error) { + defer func() { + err := recover() + if err != nil { + common.Log.Error("recover ShowCreateDatabase()", err) + } + }() + return db.showCreate("database", dbName) +} + +// ShowCreateTable show create table +func (db *Connector) ShowCreateTable(tableName string) (string, error) { + defer func() { + err := recover() + if err != nil { + common.Log.Error("recover ShowCreateTable()", err) + } + }() + + ddl, err := db.showCreate("table", tableName) + + // 去除外键关联条件 + var noConstraint []string + relationReg, _ := regexp.Compile("CONSTRAINT") + for _, line := range strings.Split(ddl, "\n") { + + if relationReg.Match([]byte(line)) { + continue + } + + // 去除外键语句会使DDL中多一个','导致语法错误,要把多余的逗号去除 + if strings.Index(line, ")") == 0 { + lineWrongSyntax := noConstraint[len(noConstraint)-1] + // 如果')'前一句的末尾是',' 删除 ',' 保证语法正确性 + if strings.Index(lineWrongSyntax, ",") == len(lineWrongSyntax)-1 { + noConstraint[len(noConstraint)-1] = lineWrongSyntax[:len(lineWrongSyntax)-1] + } + } + + noConstraint = append(noConstraint, line) + } + + return strings.Join(noConstraint, "\n"), err +} + +// FindColumn find column +func (db *Connector) FindColumn(name, dbName string, tables ...string) ([]*common.Column, error) { + // 执行 show create table + var columns []*common.Column + sql := fmt.Sprintf("SELECT "+ + "c.TABLE_NAME,c.TABLE_SCHEMA,c.COLUMN_TYPE,c.CHARACTER_SET_NAME, c.COLLATION_NAME "+ + "FROM `INFORMATION_SCHEMA`.`COLUMNS` as c where c.COLUMN_NAME = '%s' ", name) + + if len(tables) > 0 { + var tmp []string + for _, table := range tables { + tmp = append(tmp, "'"+table+"'") + } + sql += fmt.Sprintf(" and c.table_name in (%s)", strings.Join(tmp, ",")) + } + + if dbName != "" { + sql += fmt.Sprintf(" and c.table_schema = '%s'", dbName) + } + + res, err := db.Query(sql) + if err != nil { + common.Log.Error("(db *Connector) FindColumn Error : ", err) + return columns, err + } + + tbName := res.Result.Map("TABLE_NAME") + schema := res.Result.Map("TABLE_SCHEMA") + colTyp := res.Result.Map("COLUMN_TYPE") + colCharset := res.Result.Map("CHARACTER_SET_NAME") + collation := res.Result.Map("COLLATION_NAME") + + // 获取ddl + for _, row := range res.Rows { + col := &common.Column{ + Name: name, + Table: row.Str(tbName), + DB: row.Str(schema), + DataType: row.Str(colTyp), + Character: row.Str(colCharset), + Collation: row.Str(collation), + } + + // 填充字符集和排序规则 + if col.Character == "" { + // 当从`INFORMATION_SCHEMA`.`COLUMNS`表中查询不到相关列的character和collation的信息时 + // 认为该列使用的character和collation与其所处的表一致 + // 由于`INFORMATION_SCHEMA`.`TABLES`表中未找到表的character,所以从按照MySQL中collation的规则从中截取character + + sql = fmt.Sprintf("SELECT `t`.`TABLE_COLLATION` FROM `INFORMATION_SCHEMA`.`TABLES` AS `t` "+ + "WHERE `t`.`TABLE_NAME`='%s' AND `t`.`TABLE_SCHEMA` = '%s'", col.Table, col.DB) + var newRes *QueryResult + newRes, err = db.Query(sql) + if err != nil { + common.Log.Error("(db *Connector) FindColumn Error : ", err) + return columns, err + } + + tbCollation := newRes.Rows[0].Str(0) + if tbCollation != "" { + col.Character = strings.Split(tbCollation, "_")[0] + col.Collation = tbCollation + } + } + + columns = append(columns, col) + } + + return columns, err +} + +// IsFKey 判断列是否是外键 +func (db *Connector) IsFKey(dbName, tbName, column string) bool { + sql := fmt.Sprintf("SELECT REFERENCED_COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE C "+ + "WHERE REFERENCED_TABLE_SCHEMA <> 'NULL' AND"+ + " TABLE_NAME='%s' AND"+ + " TABLE_SCHEMA='%s' AND"+ + " COLUMN_NAME='%s'", tbName, dbName, column) + + res, err := db.Query(sql) + if err == nil && len(res.Rows) == 0 { + return false + } + + return true +} + +// Reference 用于存储关系 +type Reference map[string][]ReferenceValue + +// ReferenceValue 用于处理表之间的关系 +type ReferenceValue struct { + RefDBName string // 夫表所属数据库 + RefTable string // 父表 + DBName string // 子表所属数据库 + Table string // 子表 + ConstraintName string // 关系名称 +} + +// ShowReference 查找所有的外键信息 +func (db *Connector) ShowReference(dbName string, tbName ...string) ([]ReferenceValue, error) { + var referenceValues []ReferenceValue + sql := `SELECT C.REFERENCED_TABLE_SCHEMA,C.REFERENCED_TABLE_NAME,C.TABLE_SCHEMA,C.TABLE_NAME,C.CONSTRAINT_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE C JOIN INFORMATION_SCHEMA. TABLES T ON T.TABLE_NAME = C.TABLE_NAME +WHERE C.REFERENCED_TABLE_NAME IS NOT NULL` + sql = sql + fmt.Sprintf(` AND C.TABLE_SCHEMA = "%s"`, dbName) + + if len(tbName) > 0 { + extra := fmt.Sprintf(` AND C.TABLE_NAME IN ("%s")`, strings.Join(tbName, `","`)) + sql = sql + extra + } + + // 执行SQL查找外键关联关系 + res, err := db.Query(sql) + if err != nil { + return referenceValues, err + } + + refDb := res.Result.Map("REFERENCED_TABLE_SCHEMA") + refTb := res.Result.Map("REFERENCED_TABLE_NAME") + schema := res.Result.Map("TABLE_SCHEMA") + tb := res.Result.Map("TABLE_NAME") + cName := res.Result.Map("CONSTRAINT_NAME") + + // 获取值 + for _, row := range res.Rows { + value := ReferenceValue{ + RefDBName: row.Str(refDb), + RefTable: row.Str(refTb), + DBName: row.Str(schema), + Table: row.Str(tb), + ConstraintName: row.Str(cName), + } + referenceValues = append(referenceValues, value) + } + + return referenceValues, err + +} diff --git a/database/show_test.go b/database/show_test.go new file mode 100644 index 00000000..66f98d47 --- /dev/null +++ b/database/show_test.go @@ -0,0 +1,94 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "fmt" + "testing" + + "github.com/kr/pretty" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestShowTableStatus(t *testing.T) { + connTest.Database = "information_schema" + ts, err := connTest.ShowTableStatus("TABLES") + if err != nil { + t.Error("ShowTableStatus Error: ", err) + } + pretty.Println(ts) +} + +func TestShowTables(t *testing.T) { + connTest.Database = "information_schema" + ts, err := connTest.ShowTables() + if err != nil { + t.Error("ShowTableStatus Error: ", err) + } + pretty.Println(ts) +} + +func TestShowCreateTable(t *testing.T) { + connTest.Database = "information_schema" + ts, err := connTest.ShowCreateTable("TABLES") + if err != nil { + t.Error("ShowCreateTable Error: ", err) + } + fmt.Println(ts) + stmt, err := sqlparser.Parse(ts) + pretty.Println(stmt, err) +} + +func TestShowIndex(t *testing.T) { + connTest.Database = "information_schema" + ti, err := connTest.ShowIndex("TABLES") + if err != nil { + t.Error("ShowIndex Error: ", err) + } + pretty.Println(ti.FindIndex(IndexKeyName, "idx_store_id_film_id")) +} + +func TestShowColumns(t *testing.T) { + connTest.Database = "information_schema" + ti, err := connTest.ShowColumns("TABLES") + if err != nil { + t.Error("ShowColumns Error: ", err) + } + pretty.Println(ti) +} + +func TestFindColumn(t *testing.T) { + ti, err := connTest.FindColumn("id", "") + if err != nil { + t.Error("FindColumn Error: ", err) + } + pretty.Println(ti) +} + +func TestShowReference(t *testing.T) { + rv, err := connTest.ShowReference("test2", "homeImg") + if err != nil { + t.Error("ShowReference Error: ", err) + } + pretty.Println(rv) +} + +func TestIsFKey(t *testing.T) { + if !connTest.IsFKey("sakila", "film", "language_id") { + t.Error("want True. got false") + } +} diff --git a/database/testdata/TestExplain.golden b/database/testdata/TestExplain.golden new file mode 100644 index 00000000..4238525f --- /dev/null +++ b/database/testdata/TestExplain.golden @@ -0,0 +1,159 @@ +&database.ExplainInfo{ + SQL: "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + ExplainFormat: 0, + ExplainRows: { + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "country", + Partitions: "NULL", + AccessType: "index", + PossibleKeys: {"PRIMARY"}, + Key: "PRIMARY", + KeyLen: "2", + Ref: {""}, + Rows: 109, + Filtered: 100, + Scalability: "O(n)", + Extra: "Using index; Using temporary; Using filesort", + }, + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "city", + Partitions: "NULL", + AccessType: "ref", + PossibleKeys: {"PRIMARY", "idx_fk_country_id"}, + Key: "idx_fk_country_id", + KeyLen: "2", + Ref: {"sakila.country.country_id"}, + Rows: 5, + Filtered: 100, + Scalability: "O(log n)", + Extra: "NULL", + }, + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "c", + Partitions: "NULL", + AccessType: "ALL", + PossibleKeys: {""}, + Key: "NULL", + KeyLen: "", + Ref: {""}, + Rows: 600, + Filtered: 10, + Scalability: "O(n)", + Extra: "Using where; Using join buffer (Block Nested Loop)", + }, + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "a", + Partitions: "NULL", + AccessType: "ref", + PossibleKeys: {"PRIMARY", "idx_fk_city_id"}, + Key: "idx_fk_city_id", + KeyLen: "2", + Ref: {"sakila.city.city_id"}, + Rows: 1, + Filtered: 100, + Scalability: "O(log n)", + Extra: "NULL", + }, + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "cu", + Partitions: "NULL", + AccessType: "ref", + PossibleKeys: {"idx_fk_address_id"}, + Key: "idx_fk_address_id", + KeyLen: "2", + Ref: {"sakila.a.address_id"}, + Rows: 1, + Filtered: 100, + Scalability: "O(log n)", + Extra: "NULL", + }, + &database.ExplainRow{ + ID: 1, + SelectType: "PRIMARY", + TableName: "", + Partitions: "NULL", + AccessType: "ref", + PossibleKeys: {""}, + Key: "", + KeyLen: "152", + Ref: {"sakila.a.address"}, + Rows: 6, + Filtered: 100, + Scalability: "O(log n)", + Extra: "Using index", + }, + &database.ExplainRow{ + ID: 2, + SelectType: "DERIVED", + TableName: "a", + Partitions: "NULL", + AccessType: "ALL", + PossibleKeys: {"PRIMARY", "idx_fk_city_id"}, + Key: "NULL", + KeyLen: "", + Ref: {""}, + Rows: 603, + Filtered: 100, + Scalability: "O(n)", + Extra: "Using filesort", + }, + &database.ExplainRow{ + ID: 2, + SelectType: "DERIVED", + TableName: "cu", + Partitions: "NULL", + AccessType: "ref", + PossibleKeys: {"idx_fk_store_id", "idx_fk_address_id"}, + Key: "idx_fk_address_id", + KeyLen: "2", + Ref: {"sakila.a.address_id"}, + Rows: 1, + Filtered: 54.42, + Scalability: "O(log n)", + Extra: "Using where", + }, + &database.ExplainRow{ + ID: 2, + SelectType: "DERIVED", + TableName: "city", + Partitions: "NULL", + AccessType: "eq_ref", + PossibleKeys: {"PRIMARY", "idx_fk_country_id"}, + Key: "PRIMARY", + KeyLen: "2", + Ref: {"sakila.a.city_id"}, + Rows: 1, + Filtered: 100, + Scalability: "O(log n)", + Extra: "NULL", + }, + &database.ExplainRow{ + ID: 2, + SelectType: "DERIVED", + TableName: "country", + Partitions: "NULL", + AccessType: "eq_ref", + PossibleKeys: {"PRIMARY"}, + Key: "PRIMARY", + KeyLen: "2", + Ref: {"sakila.city.country_id"}, + Rows: 1, + Filtered: 100, + Scalability: "O(log n)", + Extra: "Using index", + }, + }, + ExplainJSON: (*database.ExplainJSON)(nil), + Warnings: nil, + QueryCost: 0, +} diff --git a/database/testdata/TestExplainInfoTranslator.golden b/database/testdata/TestExplainInfoTranslator.golden new file mode 100644 index 00000000..e69de29b diff --git a/database/testdata/TestFormatProfiling.golden b/database/testdata/TestFormatProfiling.golden new file mode 100644 index 00000000..e69de29b diff --git a/database/testdata/TestMySQLExplainQueryCost.golden b/database/testdata/TestMySQLExplainQueryCost.golden new file mode 100644 index 00000000..e69de29b diff --git a/database/testdata/TestMySQLExplainWarnings.golden b/database/testdata/TestMySQLExplainWarnings.golden new file mode 100644 index 00000000..e69de29b diff --git a/database/testdata/TestPrintMarkdownExplainTable.golden b/database/testdata/TestPrintMarkdownExplainTable.golden new file mode 100644 index 00000000..e69de29b diff --git a/database/testdata/TestSource.sql b/database/testdata/TestSource.sql new file mode 100644 index 00000000..2bba4d12 --- /dev/null +++ b/database/testdata/TestSource.sql @@ -0,0 +1,2 @@ +select 1; +select 1; diff --git a/database/trace.go b/database/trace.go new file mode 100644 index 00000000..83e2c75b --- /dev/null +++ b/database/trace.go @@ -0,0 +1,159 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "time" + + "github.com/XiaoMi/soar/common" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// Trace 用于存放 Select * From Information_Schema.Optimizer_Trace;输出的结果 +type Trace struct { + Rows []TraceRow +} + +// TraceRow 中含有trace的基本信息 +type TraceRow struct { + Query string + Trace string + MissingBytesBeyondMaxMemSize int + InsufficientPrivileges int +} + +// Trace 执行SQL,并对其Trace +func (db *Connector) Trace(sql string, params ...interface{}) (*QueryResult, error) { + common.Log.Debug("Trace SQL: %s", sql) + if common.Config.TestDSN.Version < 560 { + return nil, errors.New("version < 5.6, not support trace") + } + + // 过滤不需要Trace的SQL + switch sqlparser.Preview(sql) { + case sqlparser.StmtSelect, sqlparser.StmtUpdate, sqlparser.StmtDelete: + sql = "explain " + sql + case sqlparser.EXPLAIN: + default: + return nil, errors.New("no need trace") + } + + // 测试环境如果检查是关闭的,则SQL不会被执行 + if common.Config.TestDSN.Disable { + return nil, errors.New("TestDsn Disable") + } + + // 数据库安全性检查:如果Connector的IP端口与TEST环境不一致,则启用SQL白名单 + // 不在白名单中的SQL不允许执行 + // 执行环境与test环境不相同 + if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) { + return nil, fmt.Errorf("query Execution Deny: Execute SQL with DSN(%s/%s) '%s'", + db.Addr, db.Database, fmt.Sprintf(sql, params...)) + } + + common.Log.Debug("Execute SQL with DSN(%s/%s) : %s", db.Addr, db.Database, sql) + conn := db.NewConnection() + + // 设置SQL连接超时时间 + conn.SetTimeout(time.Duration(common.Config.ConnTimeOut) * time.Second) + defer conn.Close() + err := conn.Connect() + if err != nil { + return nil, err + } + + // 添加SQL执行超时限制 + ch := make(chan QueryResult, 1) + go func() { + // 开启Trace + common.Log.Debug("SET SESSION OPTIMIZER_TRACE='enabled=on'") + _, _, err = conn.Query("SET SESSION OPTIMIZER_TRACE='enabled=on'") + common.LogIfError(err, "") + + // 执行SQL,抛弃返回结果 + result, err := conn.Start(sql, params...) + if err != nil { + ch <- QueryResult{ + Error: err, + } + return + } + row := result.MakeRow() + for { + err = result.ScanRow(row) + if err == io.EOF { + break + } + } + + // 返回Trace结果 + res := QueryResult{} + res.Rows, res.Result, res.Error = conn.Query("SELECT * FROM information_schema.OPTIMIZER_TRACE") + + // 关闭Trace + common.Log.Debug("SET SESSION OPTIMIZER_TRACE='enabled=off'") + _, _, err = conn.Query("SET SESSION OPTIMIZER_TRACE='enabled=off'") + if err != nil { + fmt.Println(err.Error()) + } + ch <- res + }() + + select { + case res := <-ch: + return &res, res.Error + case <-time.After(time.Duration(common.Config.QueryTimeOut) * time.Second): + return nil, errors.New("query execution timeout") + } +} + +// getTrace 获取trace信息 +func getTrace(res *QueryResult) Trace { + var rows []TraceRow + for _, row := range res.Rows { + rows = append(rows, TraceRow{ + Query: row.Str(0), + Trace: row.Str(1), + MissingBytesBeyondMaxMemSize: row.Int(2), + InsufficientPrivileges: row.Int(3), + }) + } + return Trace{Rows: rows} +} + +// FormatTrace 格式化输出Trace信息 +func FormatTrace(res *QueryResult) string { + explainReg := regexp.MustCompile(`(?i)^explain\s+`) + trace := getTrace(res) + str := []string{""} + for _, row := range trace.Rows { + str = append(str, "```sql") + sql := explainReg.ReplaceAllString(row.Query, "") + str = append(str, sql) + str = append(str, "```\n") + str = append(str, "```json") + str = append(str, row.Trace) + str = append(str, "```\n") + } + return strings.Join(str, "\n") +} diff --git a/database/trace_test.go b/database/trace_test.go new file mode 100644 index 00000000..8dea2d7f --- /dev/null +++ b/database/trace_test.go @@ -0,0 +1,58 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package database + +import ( + "flag" + "testing" + + "github.com/XiaoMi/soar/common" + + "github.com/kr/pretty" +) + +var update = flag.Bool("update", false, "update .golden files") + +func TestTrace(t *testing.T) { + common.Config.QueryTimeOut = 1 + res, err := connTest.Trace("select 1") + if err == nil { + common.GoldenDiff(func() { + pretty.Println(res) + }, t.Name(), update) + } else { + t.Error(err) + } +} + +func TestFormatTrace(t *testing.T) { + res, err := connTest.Trace("select 1") + if err == nil { + pretty.Println(FormatTrace(res)) + } else { + t.Error(err) + } +} + +func TestGetTrace(t *testing.T) { + res, err := connTest.Trace("select 1") + if err == nil { + pretty.Println(getTrace(res)) + } else { + t.Error(err) + } +} diff --git a/deps.sh b/deps.sh new file mode 100755 index 00000000..afeb3e08 --- /dev/null +++ b/deps.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +NEEDED_COMMANDS="mysql docker git go govendor retool" + +for cmd in ${NEEDED_COMMANDS} ; do + if ! command -v "${cmd}" &> /dev/null ; then + echo -e "\033[91m${cmd} missing\033[0m" + exit 1 + else + echo "${cmd} found" + fi +done diff --git a/doc/FAQ.md b/doc/FAQ.md new file mode 100644 index 00000000..a9648b1b --- /dev/null +++ b/doc/FAQ.md @@ -0,0 +1,71 @@ +## 常见问题 + +### 软件依赖 + +* [git](https://git-scm.co) 项目代码管理工具 +* [go](https://golang.org/) 源码编译依赖 +* [govendor](https://github.com/kardianos/govendor) 管理第三方包 +* [docker](https://www.docker.com) 主要用于构建测试环境 +* [mysql](https://www.mysql.com/) 测试时用来连接测试环境 +* [retool](https://github.com/twitchtv/retool): 管理测试开发工具,首次安装耗时会比较长,如:`gometalinter.v2`, `revive`, `golangci-lint` + +### 提示语法错误 + +* 请检查SQL语句中是否出现了不配对的引号,如 `, ", ' + +### 输出结果返回慢 + +* 如果配置了online-dsn或test-dsn SOAR会请求这些数据库以支持更多的功能,这时评审一条SQL就会耗时变长。 +* 如果又开启了`-sampling=true`的话会将线上的数据导入到测试环境,数据采样也会消耗一些时间。 + +## 如何搭建测试环境 + +```bash +# 创建测试数据库 +wget http://downloads.mysql.com/doc/sakila-db.tar.gz +tar zxf sakila-db.tar.gz && cd sakila-db +mysql -u root -p -f < sakila-schema.sql +mysql -u root -p -f < sakila-data.sql + +# 创建测试用户 +CREATE USER root@'hostname' IDENTIFIED BY "1t'sB1g3rt"; +GRANT ALL ON *.* TO root@'hostname'; +``` + +## 更新vitess依赖 + +使用`govendor fetch`或`git clone` [vitess](https://github.com/vitessio/vitess) 在某些地区更新vitess可能会比较慢,导致项目编译不过,所以将vitess整个代码库加到了代码仓库。 + +如属更新vitess仓库可以使用如下命令。 + +```bash +$ make vitess +``` + +## 生成报告并发邮件 + +```bash +#!/bin/bash + +soar -query "select * from film" > ./index.html + +( + echo To: youmail@example.com + echo From: robot@example.com + echo "Content-Type: text/html; " + echo Subject: SQL Analyze Report + echo + cat ./index.html +) | sendmail -t + +``` + +## 如何新增一条启发式建议 + +```bash +advisor/rules.go HeuristicRules 加一个条新的规则 +advisor/heuristic.go 实现一个规则函数 +advisor/heuristic_test.go 添加相应规则函数的测试用例 +make heuristic +make daily +``` diff --git a/doc/FAQ_en.md b/doc/FAQ_en.md new file mode 100644 index 00000000..dde04c96 --- /dev/null +++ b/doc/FAQ_en.md @@ -0,0 +1,74 @@ +## FAQ + +### Dependency Tools + +* [git](https://git-scm.co): clone code from git repository +* [go](https://golang.org/): build source +* [govendor](https://github.com/kardianos/govendor): manager third party dependency +* [docker](https://www.docker.com): manager test envirment +* [mysql](https://www.mysql.com/): connect test envirment +* [retool](https://github.com/twitchtv/retool): manager test tools such as `gometalinter.v2`, `revive`, `golangci-lint` + +### Syntax Error + +* Unexpected quote, like `, ", ' +* vitess syntax not supported yet + +### Program running slowly + +* SOAR will use online-dsn, test-dsn for data sampling and testing if they are on a different host to access these instance will cost much time. This may cause analyze slowly, especially when you are optimizing lots of queries. +* As mentioned above, if you set `-sampling=true`(by default), data sampling will take some time for more accurate suggestions. + +## build test env + +```bash +# create test database +wget http://downloads.mysql.com/doc/sakila-db.tar.gz +tar zxf sakila-db.tar.gz && cd sakila-db +mysql -u root -p -f < sakila-schema.sql +mysql -u root -p -f < sakila-data.sql + +# create test user +CREATE USER root@'hostname' IDENTIFIED BY "1t'sB1g3rt"; +GRANT ALL ON *.* TO root@'hostname'; +``` + +## update vitess in vendor + +`govendor fetch` or `git clone` [vitess](https://github.com/vitessio/vitess) in somewhere maybe very slow or be blocked, so we add vitess source code in vendor directory. + +If you what to update vitess package, you should bypass that block using yourself method. + +```bash +$ make vitess +``` + +## HTML Format Report + +```bash +#!/bin/bash + +soar -query "select * from film" > ./index.html + +( + echo To: youmail@example.com + echo From: robot@example.com + echo "Content-Type: text/html; " + echo Subject: SQL Analyze Report + echo + cat ./index.html +) | sendmail -t + +``` + +## Add a new heuristict rule + +```bash +advisor/rules.go HeuristicRules add a new item +advisor/heuristic.go add a new rule function +advisor/heuristic_test.go add a new test function +make doc +go test github.com/XiaoMi/soar/advisor -v -update -run TestListHeuristicRules +go test github.com/XiaoMi/soar/advisor -v -update -run TestMergeConflictHeuristicRules +make daily +``` diff --git a/doc/cheatsheet.md b/doc/cheatsheet.md new file mode 100644 index 00000000..f1145879 --- /dev/null +++ b/doc/cheatsheet.md @@ -0,0 +1,174 @@ +[toc] + +# 常用命令 + +## 基本用法 + +```bash +echo "select title from sakila.film" | ./soar -log-output=soar.log +``` + +## 指定配置文件 + +```bash +vi soar.yaml +# yaml format config file +online-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1t'sB1g3rt" + disable: false + +test-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1t'sB1g3rt" + disable: false +``` + +```bash +echo "select title from sakila.film" | ./soar -test-dsn="root:1t'sB1g3rt@127.0.0.1:3306/sakila" -allow-online-as-test -log-output=soar.log +``` + +## 打印所有的启发式规则 + +```bash +$ soar -list-heuristic-rules +``` + +## 忽略某些规则 + +```bash +$ soar -ignore-rules "ALI.001,IDX.*" +``` + +## 打印支持的报告格式 + +```bash +$ soar -list-report-types +``` + +## 以指定格式输出报告 + +```bash +$ soar -report-type json +``` + +## 语法检查工具 + +```bash +$ echo "select * from tb" | soar -only-syntax-check +$ echo $? +0 + +$ echo "select * fromtb" | soar -only-syntax-check +At SQL 0 : syntax error at position 16 near 'fromtb' +$ echo $? +1 + +``` + +## 慢日志进行分析示例 + +```bash +$ pt-query-digest slow.log > slow.log.digest +# parse pt-query-digest's output which example script +$ python2.7 doc/example/digest_pt.py slow.log.digest > slow.md +``` + + +## SQL指纹 + +```bash +$ echo "select * from film where col='abc'" | soar -report-type=fingerprint +``` + +输出 + +```sql +select * from film where col=? +``` + +## 将UPDATE/DELETE/INSERT语法转为SELECT + +```bash +$ echo "update film set title = 'abc'" | soar -rewrite-rules dml2select,delimiter -report-type rewrite +``` + +输出 + +```sql +select * from film; +``` + + +## 合并多条ALTER语句 + +```bash +$ echo "alter table tb add column a int; alter table tb add column b int;" | soar -report-type rewrite -rewrite-rules mergealter +``` + +输出 + +```sql +ALTER TABLE `tb` add column a int, add column b int ; +``` + +## SQL美化 + +```bash +$ echo "select * from tbl where col = 'val'" | ./soar -report-type=pretty +``` + +输出 + +```sql +SELECT + * +FROM + tbl +WHERE + col = 'val'; +``` + +## EXPLAIN信息分析报告 + +```bash +$ soar -report-type explain-digest << EOF ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +| 1 | SIMPLE | film | ALL | NULL | NULL | NULL | NULL | 1131 | | ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +EOF +``` + +```text +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | NULL | NULL | NULL | NULL | 0 | 0.00% | ☠️ **O(n)** | | + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. +``` + +## markdown转HTML + +通过指定-report-css, -report-javascript, -markdown-extensions, -markdown-html-flags这些参数,你还可以控制HTML的显示格式。 + +```bash +$ cat test.md | soar -report-type md2html > test.html +``` + diff --git a/doc/cheatsheet_en.md b/doc/cheatsheet_en.md new file mode 100644 index 00000000..dc9ba765 --- /dev/null +++ b/doc/cheatsheet_en.md @@ -0,0 +1,143 @@ +[toc] + +# Useful Commands + +## Basic suggest + +```bash +echo "select title from sakila.film" | ./soar -log-output=soar.log +``` + +## Analyze SQL with test environment + +```bash +vi soar.yaml +# yaml format config file +online-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1t'sB1g3rt" + disable: false + +test-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1t'sB1g3rt" + disable: false +``` + +```bash +echo "select title from sakila.film" | ./soar -test-dsn="root:1t'sB1g3rt@127.0.0.1:3306/sakila" -allow-online-as-test -log-output=soar.log +``` + +## List supported heuristic rules + +```bash +$ soar -list-heuristic-rules +``` + +## Ignore Rules + +```bash +$ soar -ignore-rules "ALI.001,IDX.*" +``` + +## List supported report-type + +```bash +$ soar -list-report-types +``` + +## Set report-type for output + +```bash +$ soar -report-type json +``` + +## Syntax Check + +```bash +$ echo "select * from tb" | soar -only-syntax-check +$ echo $? +0 + +$ echo "select * fromtb" | soar -only-syntax-check +At SQL 0 : syntax error at position 16 near 'fromtb' +$ echo $? +1 + +``` + +## Slow log analyzing + +```bash +$ pt-query-digest slow.log > slow.log.digest +# parse pt-query-digest's output which example script +$ python2.7 doc/example/digest_pt.py slow.log.digest > slow.md +``` + + +## SQL FingerPrint + +```bash +$ echo "select * from film where col='abc'" | soar -report-type=fingerprint +``` + +Output + +```sql +select * from film where col=? +``` + +## Convert UPDATE/DELETE/INSERT into SELECT + +```bash +$ echo "update film set title = 'abc'" | soar -rewrite-rules dml2select,delimiter -report-type rewrite +``` + +Output + +```sql +select * from film; +``` + + +## Merge ALTER SQLs + +```bash +$ echo "alter table tb add column a int; alter table tb add column b int;" | soar -report-type rewrite -rewrite-rules mergealter +``` + +Output + +```sql +ALTER TABLE `tb` add column a int, add column b int ; +``` + +## SQL Pretty + +```bash +$ echo "select * from tbl where col = 'val'" | ./soar -report-type=pretty +``` + +Output + +```sql +SELECT + * +FROM + tbl +WHERE + col = 'val'; +``` + +## Convert markdown to HTML + +md2html comes with other flags, such as `-report-css`, `-report-javascript`, `-markdown-extensions`, `-markdown-html-flags`, you can get more self control HTML report. + +```bash +$ cat test.md | soar -report-type md2html > test.html +``` + diff --git a/doc/comparison.md b/doc/comparison.md new file mode 100644 index 00000000..90f70878 --- /dev/null +++ b/doc/comparison.md @@ -0,0 +1,13 @@ +## 业内其他优秀产品对比 + +| | SOAR | sqlcheck | pt-query-advisor | SQL Advisor | Inception | sqlautoreview | +| --- | --- | --- | --- | --- | --- | --- | +| 启发式建议 | ✔️ | ✔️ | ✔️ | ❌ | ✔️ | ✔️ | +| 索引建议 | ✔️ | ❌ | ❌ | ✔️ | ❌ | ✔️ | +| 查询重写 | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| 执行计划展示 | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Profiling | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Trace | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| SQL在线执行 | ❌ | ❌ | ❌ | ❌ | ✔️ | ❌ | +| 数据备份 | ❌ | ❌ | ❌ | ❌ | ✔️ | ❌ | + diff --git a/doc/comparison_en.md b/doc/comparison_en.md new file mode 100644 index 00000000..1aced8c9 --- /dev/null +++ b/doc/comparison_en.md @@ -0,0 +1,13 @@ +## Compare with other wonderful product + +| | SOAR | sqlcheck | pt-query-advisor | SQL Advisor | Inception | sqlautoreview | +| --- | --- | --- | --- | --- | --- | --- | +| Heuristic Rules | ✔️ | ✔️ | ✔️ | ❌ | ✔️ | ✔️ | +| Index Suggest | ✔️ | ❌ | ❌ | ✔️ | ❌ | ✔️ | +| Rewrite Query | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Explain | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Profiling | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Trace | ✔️ | ❌ | ❌ | ❌ | ❌ | ❌ | +| Execute SQL Online | ❌ | ❌ | ❌ | ❌ | ✔️ | ❌ | +| Backup Data | ❌ | ❌ | ❌ | ❌ | ✔️ | ❌ | + diff --git a/doc/config.md b/doc/config.md new file mode 100644 index 00000000..65f8ef99 --- /dev/null +++ b/doc/config.md @@ -0,0 +1,102 @@ +## 配置文件说明 + +配置文件为[yaml](https://en.wikipedia.org/wiki/YAML)格式。一般情况下只需要配置online-dsn, test-dsn, log-output等少数几个参数。即使不创建配置文件SOAR仍然会给出基本的启发式建议。 + +```text +# 线上环境配置 +online-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: 1t'sB1g3rt + disable: false +# 测试环境配置 +test-dsn: + addr: 127.0.0.1:3307 + schema: test + user: root + password: 1t'sB1g3rt + disable: false +# 是否允许测试环境与线上环境配置相同 +allow-online-as-test: true +# 是否清理测试时产生的临时文件 +drop-test-temporary: true +# 语法检查小工具 +only-syntax-check: false +sampling-data-factor: 100 +sampling: true +# 日志级别,[0:Emergency, 1:Alert, 2:Critical, 3:Error, 4:Warning, 5:Notice, 6:Informational, 7:Debug] +log-level: 7 +log-output: ${BASE_DIR}/soar.log +# 优化建议输出格式 +report-type: markdown +ignore-rules: +- "" +blacklist: ${BASE_DIR}/soar.blacklist +# 启发式算法相关配置 +max-join-table-count: 5 +max-group-by-cols-count: 5 +max-distinct-count: 5 +max-index-cols-count: 5 +max-total-rows: 9999999 +spaghetti-query-length: 2048 +allow-drop-index: false +# EXPLAIN相关配置 +explain-sql-report-type: pretty +explain-type: extended +explain-format: traditional +explain-warn-select-type: +- "" +explain-warn-access-type: +- ALL +explain-max-keys: 3 +explain-min-keys: 0 +explain-max-rows: 10000 +explain-warn-extra: +- "" +explain-max-filtered: 100 +explain-warn-scalability: +- O(n) +query: "" +list-heuristic-rules: false +list-test-sqls: false +verbose: true +``` + +## 命令行参数 + +几乎所有配置文件中指定的参数都通通过命令行参数进行修改,且命令行参数优先级较配置文件优先级高。 + +```bash +$ soar -h +``` + +### 命令行参数配置DSN + +```bash +$ soar -online-dsn "user:password@hostname:port/database" + +$ soar -test-dsn "user:password@hostname:port/database" +``` + +#### DSN格式支持 +* "user:password@hostname:3307/database" +* "user:password@hostname:3307" +* "user:password@hostname:/database" +* "user:password@:3307/database" +* "user:password@" +* "hostname:3307/database" +* "@hostname:3307/database" +* "@hostname" +* "hostname" +* "@/database" +* "@hostname:3307" +* "@:3307/database" +* ":3307/database" +* "/database" + +### SQL评分 + +不同类型的建议指定的Severity不同,严重程度数字由低到高依次排序。满分100分,扣到0分为止。L0不扣分只给出建议,L1扣5分,L2扣10分,每级多扣5分以此类推。当由时给出L1, L2两要建议时扣分叠加,即扣15分。 + +如果您想给出不同的扣分建议或者对指引中的文字内容不满意可以为在git中提ISSUE,也可直接修改rules.go的相应配置然后重新编译自己的版本。 diff --git a/doc/editor_plugin.md b/doc/editor_plugin.md new file mode 100644 index 00000000..8324d340 --- /dev/null +++ b/doc/editor_plugin.md @@ -0,0 +1,34 @@ +## Vim插件安装 + +* 首先安装Syntastic,安装方法参见[官方文档](https://github.com/vim-syntastic/syntastic#installation) +* 将`soar`二进制文件拷贝到可执行文件的查找路径($PATH)下,添加可执行权限`chmod a+x soar` +* 将doc/example/[soar.vim](http://github.com/XiaoMi/soar/raw/master/doc/example/soar.vim)文件拷贝至${SyntasticInstalledPath}/syntax_checkers/sql目录下 +* 修改${SyntasticInstalledPath}/plugin/syntastic/registry.vim文件,增加sql文件的检查工具,`'sql':['soar', 'sqlint']` + +### 插件演示 + +![Vim插件示例](http://github.com/XiaoMi/soar/raw/master/doc/images/vim_plugin.png) + +### 常见问题 + +#### 安装插件后无任何变化 + +安装了Syntastic没有任何显示,官方推荐通过如下配置来开启自动提示,不然用户无法看到SOAR给出的建议。 + +```vim +set statusline+=%#warningmsg# +set statusline+=%{SyntasticStatuslineFlag()} +set statusline+=%* + +let g:syntastic_always_populate_loc_list = 1 +let g:syntastic_auto_loc_list = 1 +let g:syntastic_check_on_open = 1 +let g:syntastic_check_on_wq = 0 +``` + +如果soar二进制未在可执行文件查找路径下,或未添加可执行文件也会导致无法提供建议,可通过如下命令确认。 + +```bash +$ which soar +/usr/local/bin/soar +``` diff --git a/doc/enviorment.md b/doc/enviorment.md new file mode 100644 index 00000000..a53a4863 --- /dev/null +++ b/doc/enviorment.md @@ -0,0 +1,23 @@ +## 集成环境 + +![集成环境](http://github.com/XiaoMi/soar/raw/master/doc/images/env.png) + +| 线上环境 | 测试环境 | 场景 | +| --- | --- | --- | +| 有 | 有 | 日常优化,完整的建议,推荐 | +| 无 | 有 | 新申请资源,环境初始化测试 | +| 无 | 无 | 盲测,试用,无EXPLAIN和索引建议 | +| 有 | 无 | 用线上环境当测试环境,不推荐 | + +## 线上环境 + +* 数据字典 +* 数据采样 +* EXPLAIN + +## 测试环境 + +* 库表映射 +* 语法检查 +* 模拟执行 +* 索引建议/去重 diff --git a/doc/example/digest_pt.py b/doc/example/digest_pt.py new file mode 100755 index 00000000..d6b0c88f --- /dev/null +++ b/doc/example/digest_pt.py @@ -0,0 +1,94 @@ +#!/usr/bin/python -u +#-*- coding: utf-8 -*- + +import sys, re, subprocess +import os.path +reload(sys) +sys.setdefaultencoding("utf-8") + +SOAR_ARGS=["-ignore-rules=OK"] +USE_DATABASE="" + +# 打印pt-query-digest的统计信息 +def printStatInfo(buf): + if buf.strip() == "": + return + if re.match("^# Query [0-9]", buf): + sys.stdout.write(buf.split(":", 1)[0] + "\n") + sys.stdout.write("\n```text\n") + sys.stdout.write(buf) + sys.stdout.write("```\n") + +# 打印每条SQL的SOAR结果 +def printSqlAdvisor(buf): + global USE_DATABASE + buf = re.sub("\\\G$", "", USE_DATABASE + buf) + if buf.strip() == "": + return + + cmd = ["soar"] + if len(SOAR_ARGS) > 0: + cmd = cmd + SOAR_ARGS + + p = subprocess.Popen(["soar"], stdout=subprocess.PIPE, stdin=subprocess.PIPE) + adv = p.communicate(input=buf)[0] + + # 清理环境 + USE_DATABASE = "" + + # 删除第一行"# Query: xxxxx" + try: + adv = adv.split('\n', 1)[1] + except: + pass + sys.stdout.write(adv + "\n") + +# 从统计信息中获取database信息 +def getUseDB(line): + global USE_DATABASE + USE_DATABASE = "USE " + re.sub(' +', " ", line).split(" ")[2] + ";" + +def parsePtQueryDisget(f): + statBuf = "" + sqlBuf = "" + for line in f: + if line.strip() == "": + continue + + if line.startswith("#"): + if line.startswith("# Databases ") and not line.strip().endswith("more"): + getUseDB(line) + if re.match("^# Query [0-9]", line): + # pt-query-digest的头部统计信息 + if line.startswith("# Query 1:"): + sys.stdout.write("# pt-query-digest统计信息" + "\n") + printStatInfo(statBuf) + statBuf = line + else: + statBuf += line + else: + if not line.strip().endswith("\G"): + sqlBuf += line + else: + sqlBuf += line + printStatInfo(statBuf) + statBuf = "" + printSqlAdvisor(sqlBuf) + sqlBuf = "" + +def main(): + global SOAR_ARGS + if len(sys.argv) == 1: + f = sys.stdin + parsePtQueryDisget(f) + else: + if os.path.isfile(sys.argv[-1]): + SOAR_ARGS = sys.argv[1:-1] + f = open(sys.argv[-1]) + else: + SOAR_ARGS = sys.argv[1:] + f = sys.stdin + parsePtQueryDisget(f) + +if __name__ == '__main__': + main() diff --git a/doc/example/main_test.md b/doc/example/main_test.md new file mode 100644 index 00000000..149da347 --- /dev/null +++ b/doc/example/main_test.md @@ -0,0 +1,4438 @@ +# Query: C3FAEDA6AD6D762B + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH = 86 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: E969B9297DA79BA6 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH IS NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item:** ARG.006 + +* **Severity:** L1 + +* **Content:** 使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 8A106444D14B9880 + +★ ★ ★ ☆ ☆ 60分 + +```sql + +SELECT + * +FROM + film +HAVING + title = 'abc' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用HAVING子句 + +* **Item:** CLA.013 + +* **Severity:** L3 + +* **Content:** 将查询的HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: A0C5E62C724A121A + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + sakila. film +WHERE + LENGTH >= 60 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 868317D1973FD1B0 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH BETWEEN 60 + AND 84 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 11.11% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 707FE669669FA075 + +★ ★ ★ ★ ☆ 95分 + +```sql + +SELECT + * +FROM + film +WHERE + title LIKE 'AIR%' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | range | idx\_title | idx\_title | 767 | | 2 | ☠️ **100.00%** | ☠️ **O(n)** | Using index condition | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **range**: 只检索给定范围的行, 使用一个索引来选择行. key列显示使用了哪个索引. key_len包含所使用索引的最长关键元素. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* **Using index condition**: 在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。 + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: DF916439ABD07664 + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + * +FROM + film +WHERE + title IS NOT NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | idx\_title | NULL | | | 1000 | 90.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item:** ARG.006 + +* **Severity:** L1 + +* **Content:** 使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: B9336971FF3D3792 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH = 114 + AND title = 'ALABAMA DEVIL' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_title | idx\_title | 767 | const | 1 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列title添加索引,散粒度为: 100.00%; 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_title\_length\` (\`title\`,\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 68E48001ECD53152 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 + AND title = 'ALABAMA DEVIL' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_title | idx\_title | 767 | const | 1 | 33.33% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列title添加索引,散粒度为: 100.00%; 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_title\_length\` (\`title\`,\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 12FF1DAA3D425FA9 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 + AND language_id < 10 + AND title = 'xyz' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_title,
idx\_fk\_language\_id | idx\_title | 767 | const | 1 | 33.33% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列title添加索引,散粒度为: 100.00%; 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_title\_length\` (\`title\`,\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: E84CBAAC2E12BDEA + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 + AND language_id < 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | idx\_fk\_language\_id | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 6A0F035BD4E01018 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + release_year, SUM( LENGTH) +FROM + film +WHERE + LENGTH = 123 + AND language_id = 1 +GROUP BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | idx\_fk\_language\_id | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列language\_id添加索引,散粒度为: 0.10%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_language\_id\_release\_year\` (\`length\`,\`language\_id\`,\`release\_year\`) ; + + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +# Query: 23D176AEA2947002 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + release_year, SUM( LENGTH) +FROM + film +WHERE + LENGTH >= 123 +GROUP BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where; Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +# Query: 73DDF6E6D9E40384 + +★ ★ ★ ☆ ☆ 60分 + +```sql + +SELECT + release_year, language_id, SUM( LENGTH) +FROM + film +GROUP BY + release_year, language_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列release\_year添加索引,散粒度为: 0.10%; 为列language\_id添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_release\_year\_language\_id\` (\`release\_year\`,\`language\_id\`) ; + + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +# Query: B3C502B4AA344196 + +★ ★ ★ ☆ ☆ 70分 + +```sql + +SELECT + release_year, SUM( LENGTH) +FROM + film +WHERE + LENGTH = 123 +GROUP BY + release_year, (LENGTH+ language_id) +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +## GROUP BY的条件为表达式 + +* **Item:** CLA.010 + +* **Severity:** L2 + +* **Content:** 当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 + +# Query: 47044E1FE1A965A5 + +★ ★ ★ ☆ ☆ 60分 + +```sql + +SELECT + release_year, SUM( film_id) +FROM + film +GROUP BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_release\_year\` (\`release\_year\`) ; + + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +# Query: 2BA1217F6C8CF0AB + +☆ ☆ ☆ ☆ ☆ 0分 + +```sql + +SELECT + * +FROM + address +GROUP BY + address, district +``` + +## MySQL返回信息 + +Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'optimizer_RSq3xBEF0TXgZsHj.address.address_id' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by + +## 为sakila库的address表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列address添加索引,散粒度为: 100.00%; 为列district添加索引,散粒度为: 100.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`address\` add index \`idx\_address\_district\` (\`address\`,\`district\`) ; + + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 非确定性的GROUP BY + +* **Item:** RES.001 + +* **Severity:** L4 + +* **Content:** SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。 + +# Query: 863A85207E4F410D + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + title +FROM + film +WHERE + ABS( language_id) = 3 +GROUP BY + title +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | index | idx\_title | idx\_title | 767 | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +## 避免在WHERE条件中使用函数或其他运算符 + +* **Item:** FUN.001 + +* **Severity:** L2 + +* **Content:** 虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。 + +# Query: DF59FD602E4AA368 + +☆ ☆ ☆ ☆ ☆ 0分 + +```sql + +SELECT + language_id +FROM + film +WHERE + LENGTH = 123 +GROUP BY + release_year +ORDER BY + language_id +``` + +## MySQL返回信息 + +Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'optimizer_RSq3xBEF0TXgZsHj.film.language_id' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +## 非确定性的GROUP BY + +* **Item:** RES.001 + +* **Severity:** L4 + +* **Content:** SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。 + +# Query: F6DBEAA606D800FC + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + release_year +FROM + film +WHERE + LENGTH = 123 +GROUP BY + release_year +ORDER BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using temporary; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +# Query: 6E9B96CA3F0E6BDA + +★ ★ ☆ ☆ ☆ 55分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH = 123 +ORDER BY + release_year ASC, language_id DESC +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引 + +* **Item:** CLA.007 + +* **Severity:** L2 + +* **Content:** ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## ORDER BY多个列但排序方向不同时可能无法使用索引 + +* **Item:** KEY.008 + +* **Severity:** L4 + +* **Content:** 在MySQL 8.0之前当ORDER BY多个列指定的排序方向不同时将无法使用已经建立的索引。 + +# Query: 2EAACFD7030EA528 + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + release_year +FROM + film +WHERE + LENGTH = 123 +GROUP BY + release_year +ORDER BY + release_year +LIMIT + 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using temporary; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +# Query: 5CE2F187DBF2A710 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH = 123 +ORDER BY + release_year +LIMIT + 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: E75234155B5E2E14 + +★ ★ ★ ☆ ☆ 65分 + +```sql + +SELECT + * +FROM + film +ORDER BY + release_year +LIMIT + 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_release\_year\` (\`release\_year\`) ; + + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 965D5AC955824512 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 +ORDER BY + LENGTH +LIMIT + 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 1E2CF4145EE706A5 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH < 100 +ORDER BY + LENGTH +LIMIT + 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: A314542EEE8571EE + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + customer +WHERE + address_id in (224, 510) +ORDER BY + last_name +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *customer* | NULL | range | idx\_fk\_address\_id | idx\_fk\_address\_id | 2 | | 2 | ☠️ **100.00%** | ☠️ **O(n)** | Using index condition; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **range**: 只检索给定范围的行, 使用一个索引来选择行. key列显示使用了哪个索引. key_len包含所使用索引的最长关键元素. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* **Using index condition**: 在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。 + + +## 为sakila库的customer表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列address\_id添加索引,散粒度为: 100.00%; 为列last\_name添加索引,散粒度为: 100.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`customer\` add index \`idx\_address\_id\_last\_name\` (\`address\_id\`,\`last\_name\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 0BE2D79E2F1E7CB0 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + release_year = 2016 + AND LENGTH != 1 +ORDER BY + title +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 9.00% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列release\_year添加索引,散粒度为: 0.10%; 为列length添加索引,散粒度为: 14.00%; 为列title添加索引,散粒度为: 100.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_release\_year\_length\_title\` (\`release\_year\`,\`length\`,\`title\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## '!=' 运算符是非标准的 + +* **Item:** STA.001 + +* **Severity:** L0 + +* **Content:** "<>"才是标准SQL中的不等于运算符。 + +# Query: 4E73AA068370E6A8 + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + title +FROM + film +WHERE + release_year = 1995 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_release\_year\` (\`release\_year\`) ; + + + +# Query: BA7111449E4F1122 + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + title, replacement_cost +FROM + film +WHERE + language_id = 5 + AND LENGTH = 70 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_fk\_language\_id | idx\_fk\_language\_id | 1 | const | 1 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列language\_id添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_language\_id\` (\`length\`,\`language\_id\`) ; + + + +# Query: B13E0ACEAF8F3119 + +★ ★ ★ ★ ☆ 90分 + +```sql + +SELECT + title +FROM + film +WHERE + language_id > 5 + AND LENGTH > 70 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | range | idx\_fk\_language\_id | idx\_fk\_language\_id | 1 | | 1 | 33.33% | ☠️ **O(n)** | Using index condition; Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **range**: 只检索给定范围的行, 使用一个索引来选择行. key列显示使用了哪个索引. key_len包含所使用索引的最长关键元素. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* **Using index condition**: 在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +# Query: A3FAB6027484B88B + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH = 100 + AND title = 'xyz' +ORDER BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_title | idx\_title | 767 | const | 1 | 10.00% | ☠️ **O(n)** | Using index condition; Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* **Using index condition**: 在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列title添加索引,散粒度为: 100.00%; 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_title\_length\_release\_year\` (\`title\`,\`length\`,\`release\_year\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: CB42080E9F35AB07 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 + AND title = 'xyz' +ORDER BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ref | idx\_title | idx\_title | 767 | const | 1 | 33.33% | ☠️ **O(n)** | Using index condition; Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* **Using index condition**: 在5.6版本后加入的新特性(Index Condition Pushdown)。Using index condition 会先条件过滤索引,过滤完索引后找到所有符合索引条件的数据行,随后用 WHERE 子句中的其他条件去过滤这些数据行。 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列title添加索引,散粒度为: 100.00%; 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_title\_length\_release\_year\` (\`title\`,\`length\`,\`release\_year\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: C4A212A42400411D + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + LENGTH > 100 +ORDER BY + release_year +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 33.33% | ☠️ **O(n)** | Using where; Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; 为列release\_year添加索引,散粒度为: 0.10%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\_release\_year\` (\`length\`,\`release\_year\`) ; + + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 4ECCA9568BE69E68 + +★ ★ ★ ☆ ☆ 75分 + +```sql + +SELECT + * +FROM + city a + INNER JOIN country b ON a. country_id= b. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *b* | NULL | ALL | PRIMARY | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *a* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.b.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 485D56FC88BBBDB9 + +★ ★ ★ ☆ ☆ 75分 + +```sql + +SELECT + * +FROM + city a + LEFT JOIN country b ON a. country_id= b. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *a* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *b* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.a.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 0D0DABACEDFF5765 + +★ ★ ★ ☆ ☆ 75分 + +```sql + +SELECT + * +FROM + city a + RIGHT JOIN country b ON a. country_id= b. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *b* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *a* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.b.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 1E56C6CCEA2131CC + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + * +FROM + city a + LEFT JOIN country b ON a. country_id= b. country_id +WHERE + b. last_update IS NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *a* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *b* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.a.country\_id | 1 | 10.00% | ☠️ **O(n)** | Using where; Not exists | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Not exists**: MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的country表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列last\_update添加索引,散粒度为: 0.92%; + +* **Case:** ALTER TABLE \`sakila\`.\`country\` add index \`idx\_last\_update\` (\`last\_update\`) ; + + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item:** ARG.006 + +* **Severity:** L1 + +* **Content:** 使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: F5D30BCAC1E206A1 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + * +FROM + city a + RIGHT JOIN country b ON a. country_id= b. country_id +WHERE + a. last_update IS NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *b* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *a* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.b.country\_id | 5 | 10.00% | ☠️ **O(n)** | Using where; Not exists | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Not exists**: MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的city表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列last\_update添加索引,散粒度为: 0.17%; + +* **Case:** ALTER TABLE \`sakila\`.\`city\` add index \`idx\_last\_update\` (\`last\_update\`) ; + + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item:** ARG.006 + +* **Severity:** L1 + +* **Content:** 使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +# Query: 17D5BCF21DC2364C + +★ ★ ★ ☆ ☆ 65分 + +```sql + +SELECT + * +FROM + city a + LEFT JOIN country b ON a. country_id= b. country_id +UNION +SELECT + * +FROM + city a + RIGHT JOIN country b ON a. country_id= b. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *a* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *b* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.a.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *b* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *a* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.b.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 0 | UNION RESULT | ** | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **UNION**: UNION中的第二个或后面的SELECT查询, 不依赖于外部查询的结果集. + +* **UNION RESULT**: UNION查询的结果集. + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item:** SUB.002 + +* **Severity:** L2 + +* **Content:** 与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 + +# Query: A4911095C201896F + +★ ★ ★ ☆ ☆ 65分 + +```sql + +SELECT + * +FROM + city a + RIGHT JOIN country b ON a. country_id= b. country_id +WHERE + a. last_update IS NULL +UNION +SELECT + * +FROM + city a + LEFT JOIN country b ON a. country_id= b. country_id +WHERE + b. last_update IS NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *b* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *a* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.b.country\_id | 5 | 10.00% | ☠️ **O(n)** | Using where; Not exists | +| 2 | UNION | *a* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *b* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.a.country\_id | 1 | 10.00% | ☠️ **O(n)** | Using where; Not exists | +| 0 | UNION RESULT | ** | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **UNION**: UNION中的第二个或后面的SELECT查询, 不依赖于外部查询的结果集. + +* **UNION RESULT**: UNION查询的结果集. + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Not exists**: MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 为sakila库的city表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列last\_update添加索引,散粒度为: 0.17%; + +* **Case:** ALTER TABLE \`sakila\`.\`city\` add index \`idx\_last\_update\` (\`last\_update\`) ; + + + +## 为sakila库的country表添加索引 + +* **Item:** IDX.002 + +* **Severity:** L2 + +* **Content:** 为列last\_update添加索引,散粒度为: 0.92%; + +* **Case:** ALTER TABLE \`sakila\`.\`country\` add index \`idx\_last\_update\` (\`last\_update\`) ; + + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item:** SUB.002 + +* **Severity:** L2 + +* **Content:** 与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 + +# Query: 3FF20E28EC9CBEF9 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + country_id, last_update +FROM + city NATURAL + JOIN country +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *country* | NULL | ALL | PRIMARY | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *city* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.country.country\_id | 5 | 10.00% | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +# Query: 5C547F08EADBB131 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + country_id, last_update +FROM + city NATURAL + LEFT JOIN country +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *city* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *country* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.city.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +# Query: AF0C1EB58B23D2FA + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + country_id, last_update +FROM + city NATURAL + RIGHT JOIN country +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *country* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *city* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.country.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +# Query: 626571EAE84E2C8A + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + a. country_id, a. last_update +FROM + city a STRAIGHT_JOIN country b ON a. country_id= b. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *a* | NULL | ALL | idx\_fk\_country\_id | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *b* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.a.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +# Query: F76BFFC87914E3D5 + +☆ ☆ ☆ ☆ ☆ 0分 + +```sql + +SELECT + d. deptno, d. dname, d. loc +FROM + scott. dept d +WHERE + d. deptno IN ( +SELECT + e. deptno +FROM + scott. emp e) +``` + +## MySQL返回信息 + +Unknown database 'scott' + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## MySQL对子查询的优化效果不佳 + +* **Item:** SUB.001 + +* **Severity:** L4 + +* **Content:** MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 + +# Query: 18D2299710570E81 + +☆ ☆ ☆ ☆ ☆ 10分 + +```sql + +SELECT + visitor_id, url +FROM + ( +SELECT + id +FROM + LOG +WHERE + ip= "123.45.67.89" +ORDER BY + tsdesc +LIMIT + 50, 10) I + JOIN LOG ON (I. id= LOG. id) + JOIN url ON (url. id= LOG. url_id) +ORDER BY + TS desc +``` + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引 + +* **Item:** CLA.007 + +* **Severity:** L2 + +* **Content:** ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。 + +## ORDER BY的条件为表达式 + +* **Item:** CLA.009 + +* **Severity:** L2 + +* **Content:** 当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 + +## 同一张表被连接两次 + +* **Item:** JOI.002 + +* **Severity:** L4 + +* **Content:** 相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。 + +## 用字符类型存储IP地址 + +* **Item:** LIT.001 + +* **Severity:** L2 + +* **Content:** 字符串字面上看起来像IP地址,但不是INET\_ATON()的参数,表示数据被存储为字符而不是整数。将IP地址存储为整数更为有效。 + +## MySQL对子查询的优化效果不佳 + +* **Item:** SUB.001 + +* **Severity:** L4 + +* **Content:** MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 + +# Query: 7F02E23D44A38A6D + +★ ★ ★ ★ ☆ 80分 + +```sql +DELETE city, country +FROM + city + INNER JOIN country using (country_id) +WHERE + city. city_id = 1 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | DELETE | *city* | NULL | const | PRIMARY,
idx\_fk\_country\_id | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | DELETE | *country* | NULL | const | PRIMARY | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* **const**: const用于使用常数值比较PRIMARY KEY时, 当查询的表仅有一行时, 使用system. 例:SELECT * FROM tbl WHERE col =1. + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item:** SEC.003 + +* **Severity:** L0 + +* **Content:** 在执行高危操作之前对数据进行备份是十分有必要的。 + +# Query: F8314ABD1CBF2FF1 + +★ ★ ★ ☆ ☆ 70分 + +```sql +DELETE city +FROM + city + LEFT JOIN country ON city. country_id = country. country_id +WHERE + country. country IS NULL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | DELETE | *city* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *country* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.city.country\_id | 1 | 10.00% | ☠️ **O(n)** | Using where; Not exists | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Not exists**: MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的country表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列country添加索引,散粒度为: 100.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`country\` add index \`idx\_country\` (\`country\`) ; + + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item:** SEC.003 + +* **Severity:** L0 + +* **Content:** 在执行高危操作之前对数据进行备份是十分有必要的。 + +# Query: 1A53649C43122975 + +★ ★ ★ ★ ☆ 80分 + +```sql +DELETE a1, a2 +FROM + city AS a1 + INNER JOIN country AS a2 +WHERE + a1. country_id= a2. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | DELETE | *a2* | NULL | ALL | PRIMARY | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | DELETE | *a1* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.a2.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item:** SEC.003 + +* **Severity:** L0 + +* **Content:** 在执行高危操作之前对数据进行备份是十分有必要的。 + +# Query: B862978586C6338B + +★ ★ ★ ★ ☆ 80分 + +```sql + +DELETE FROM + a1, a2 USING city AS a1 + INNER JOIN country AS a2 +WHERE + a1. country_id= a2. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | DELETE | *a2* | NULL | ALL | PRIMARY | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | DELETE | *a1* | NULL | ref | idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.a2.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item:** SEC.003 + +* **Severity:** L0 + +* **Content:** 在执行高危操作之前对数据进行备份是十分有必要的。 + +# Query: F16FD63381EF8299 + +★ ★ ★ ★ ☆ 90分 + +```sql + +DELETE FROM + film +WHERE + LENGTH > 100 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | DELETE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列length添加索引,散粒度为: 14.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_length\` (\`length\`) ; + + + +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item:** SEC.003 + +* **Severity:** L0 + +* **Content:** 在执行高危操作之前对数据进行备份是十分有必要的。 + +# Query: 08CFE41C7D20AAC8 + +★ ★ ★ ★ ☆ 80分 + +```sql + +UPDATE + city + INNER JOIN country USING( country_id) +SET + city. city = 'Abha', + city. last_update = '2006-02-15 04:45:25', + country. country = 'Afghanistan' +WHERE + city. city_id= 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | UPDATE | *city* | NULL | const | PRIMARY,
idx\_fk\_country\_id | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | UPDATE | *country* | NULL | const | PRIMARY | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* **const**: const用于使用常数值比较PRIMARY KEY时, 当查询的表仅有一行时, 使用system. 例:SELECT * FROM tbl WHERE col =1. + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +# Query: C15BDF2C73B5B7ED + +★ ★ ★ ★ ☆ 80分 + +```sql + +UPDATE + city + INNER JOIN country ON city. country_id = country. country_id + INNER JOIN address ON city. city_id = address. city_id +SET + city. city = 'Abha', + city. last_update = '2006-02-15 04:45:25', + country. country = 'Afghanistan' +WHERE + city. city_id= 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | UPDATE | *city* | NULL | const | PRIMARY,
idx\_fk\_country\_id | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | UPDATE | *country* | NULL | const | PRIMARY | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *address* | NULL | ref | idx\_fk\_city\_id | idx\_fk\_city\_id | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **const**: const用于使用常数值比较PRIMARY KEY时, 当查询的表仅有一行时, 使用system. 例:SELECT * FROM tbl WHERE col =1. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## 不建议使用联表更新 + +* **Item:** JOI.007 + +* **Severity:** L4 + +* **Content:** 当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 + +# Query: FCD1ABF36F8CDAD7 + +★ ★ ★ ★ ★ 100分 + +```sql + +UPDATE + city, country +SET + city. city = 'Abha', + city. last_update = '2006-02-15 04:45:25', + country. country = 'Afghanistan' +WHERE + city. country_id = country. country_id + AND city. city_id= 10 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | UPDATE | *city* | NULL | const | PRIMARY,
idx\_fk\_country\_id | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | UPDATE | *country* | NULL | const | PRIMARY | PRIMARY | 2 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* **const**: const用于使用常数值比较PRIMARY KEY时, 当查询的表仅有一行时, 使用system. 例:SELECT * FROM tbl WHERE col =1. + + +# Query: FE409EB794EE91CF + +★ ★ ★ ★ ★ 100分 + +```sql + +UPDATE + film +SET + LENGTH = 10 +WHERE + language_id = 20 +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | UPDATE | *film* | NULL | range | idx\_fk\_language\_id | idx\_fk\_language\_id | 1 | const | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### Type信息解读 + +* **range**: 只检索给定范围的行, 使用一个索引来选择行. key列显示使用了哪个索引. key_len包含所使用索引的最长关键元素. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +# Query: 3656B13CC4F888E2 + +★ ★ ★ ☆ ☆ 65分 + +```sql +INSERT INTO city (country_id) +SELECT + country_id +FROM + country +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | INSERT | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *country* | NULL | index | | PRIMARY | 2 | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item:** LCK.001 + +* **Severity:** L3 + +* **Content:** INSERT INTO xx SELECT加锁粒度较大请谨慎 + +# Query: 2F7439623B712317 + +★ ★ ★ ★ ★ 100分 + +```sql +INSERT INTO city (country_id) +VALUES + (1), + (2), + (3) +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | INSERT | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + + +# Query: 11EC7AAACC97DC0F + +★ ★ ★ ★ ☆ 85分 + +```sql +INSERT INTO city (country_id) +SELECT + 10 +FROM + DUAL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | INSERT | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + + +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item:** LCK.001 + +* **Severity:** L3 + +* **Content:** INSERT INTO xx SELECT加锁粒度较大请谨慎 + +# Query: E3DDA1A929236E72 + +★ ★ ★ ☆ ☆ 65分 + +```sql +REPLACE INTO city (country_id) +SELECT + country_id +FROM + country +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | REPLACE | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *country* | NULL | index | | PRIMARY | 2 | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item:** LCK.001 + +* **Severity:** L3 + +* **Content:** INSERT INTO xx SELECT加锁粒度较大请谨慎 + +# Query: 466F1AC2F5851149 + +★ ★ ★ ★ ★ 100分 + +```sql +REPLACE INTO city (country_id) +VALUES + (1), + (2), + (3) +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | REPLACE | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + + +# Query: A7973BDD268F926E + +★ ★ ★ ★ ☆ 85分 + +```sql +REPLACE INTO city (country_id) +SELECT + 10 +FROM + DUAL +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | REPLACE | *city* | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | NULL | + + + +### Explain信息解读 + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + + +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item:** LCK.001 + +* **Severity:** L3 + +* **Content:** INSERT INTO xx SELECT加锁粒度较大请谨慎 + +# Query: 105C870D5DFB6710 + +★ ★ ★ ☆ ☆ 65分 + +```sql + +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + ( +SELECT + film_id +FROM + film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +) film +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | index | | idx\_fk\_language\_id | 1 | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 执行计划中嵌套连接深度过深 + +* **Item:** SUB.004 + +* **Severity:** L3 + +* **Content:** MySQL对子查询的优化效果不佳,MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。 + +# Query: 16C2B14E7DAA9906 + +★ ☆ ☆ ☆ ☆ 35分 + +```sql + +SELECT + * +FROM + film +WHERE + language_id = ( +SELECT + language_id +FROM + language +LIMIT + 1) +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *film* | NULL | ALL | idx\_fk\_language\_id | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | +| 2 | SUBQUERY | *language* | NULL | index | | PRIMARY | 1 | | 6 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **SUBQUERY**: 子查询中的第一个SELECT查询, 不依赖于外部查询的结果集. + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 未使用ORDER BY的LIMIT查询 + +* **Item:** RES.002 + +* **Severity:** L4 + +* **Content:** 没有ORDER BY的LIMIT会导致非确定性的结果,这取决于查询执行计划。 + +## MySQL对子查询的优化效果不佳 + +* **Item:** SUB.001 + +* **Severity:** L4 + +* **Content:** MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 + +# Query: 16CB4628D2597D40 + +★ ★ ★ ☆ ☆ 65分 + +```sql + +SELECT + * +FROM + city i + LEFT JOIN country o ON i. city_id= o. country_id +UNION +SELECT + * +FROM + city i + RIGHT JOIN country o ON i. city_id= o. country_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *i* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *o* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.i.city\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *o* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *i* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.o.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 0 | UNION RESULT | ** | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **UNION**: UNION中的第二个或后面的SELECT查询, 不依赖于外部查询的结果集. + +* **UNION RESULT**: UNION查询的结果集. + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item:** SUB.002 + +* **Severity:** L2 + +* **Content:** 与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 + +# Query: EA50643B01E139A8 + +☆ ☆ ☆ ☆ ☆ 0分 + +```sql + +SELECT + * +FROM + ( +SELECT + * +FROM + actor +WHERE + last_update= '2006-02-15 04:34:33' + AND last_name= 'CHASE' +) t +WHERE + last_update= '2006-02-15 04:34:33' + AND last_name= 'CHASE' +GROUP BY + first_name +``` + +## MySQL返回信息 + +Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 't.actor_id' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by + +## 为sakila库的actor表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列last\_name添加索引,散粒度为: 60.50%; 为列last\_update添加索引,散粒度为: 0.50%; 为列first\_name添加索引,散粒度为: 64.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`actor\` add index \`idx\_last\_name\_last\_update\_first\_name\` (\`last\_name\`,\`last\_update\`,\`first\_name\`) ; + + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 非确定性的GROUP BY + +* **Item:** RES.001 + +* **Severity:** L4 + +* **Content:** SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。 + +## MySQL对子查询的优化效果不佳 + +* **Item:** SUB.001 + +* **Severity:** L4 + +* **Content:** MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 + +# Query: 7598A4EDE6CFA6BE + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + city i + LEFT JOIN country o ON i. city_id= o. country_id +WHERE + o. country_id is null +UNION +SELECT + * +FROM + city i + RIGHT JOIN country o ON i. city_id= o. country_id +WHERE + i. city_id is null +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *i* | NULL | ALL | | NULL | | | 600 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *o* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.i.city\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using where; Not exists | +| 2 | UNION | *o* | NULL | ALL | | NULL | | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | UNION | *i* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.o.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using where; Not exists | +| 0 | UNION RESULT | ** | NULL | ALL | | NULL | | | 0 | 0.00% | ☠️ **O(n)** | Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **UNION**: UNION中的第二个或后面的SELECT查询, 不依赖于外部查询的结果集. + +* **UNION RESULT**: UNION查询的结果集. + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Not exists**: MySQL能够对LEFT JOIN查询进行优化, 并且在查找到符合LEFT JOIN条件的行后, 则不再查找更多的行. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item:** SUB.002 + +* **Severity:** L2 + +* **Content:** 与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 + +# Query: 1E8B70E30062FD13 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + first_name, last_name, email +FROM + customer STRAIGHT_JOIN address ON customer. address_id= address. address_id +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *customer* | NULL | ALL | idx\_fk\_address\_id | NULL | | | 599 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | SIMPLE | *address* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.customer.address\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +# Query: E48A20D0413512DA + +★ ☆ ☆ ☆ ☆ 20分 + +```sql + +SELECT + ID, name +FROM + ( +SELECT + address +FROM + customer_list +WHERE + SID= 1 +ORDER BY + phone +LIMIT + 50, 10) a + JOIN customer_list l ON (a. address= l. address) + JOIN city c ON (c. city= l. city) +ORDER BY + phone desc +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | PRIMARY | *country* | NULL | index | PRIMARY | PRIMARY | 2 | | 109 | ☠️ **100.00%** | ☠️ **O(n)** | Using index; Using temporary; Using filesort | +| 1 | PRIMARY | *city* | NULL | ref | PRIMARY,
idx\_fk\_country\_id | idx\_fk\_country\_id | 2 | sakila.country.country\_id | 5 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *c* | NULL | ALL | | NULL | | | 600 | 10.00% | ☠️ **O(n)** | Using where; Using join buffer (Block Nested Loop) | +| 1 | PRIMARY | *a* | NULL | ref | PRIMARY,
idx\_fk\_city\_id | idx\_fk\_city\_id | 2 | sakila.city.city\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | *cu* | NULL | ref | idx\_fk\_address\_id | idx\_fk\_address\_id | 2 | sakila.a.address\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 1 | PRIMARY | ** | NULL | ref | | | 152 | sakila.a.address | 6 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | +| 2 | DERIVED | *a* | NULL | ALL | PRIMARY,
idx\_fk\_city\_id | NULL | | | 603 | ☠️ **100.00%** | ☠️ **O(n)** | Using filesort | +| 2 | DERIVED | *cu* | NULL | ref | idx\_fk\_store\_id,
idx\_fk\_address\_id | idx\_fk\_address\_id | 2 | sakila.a.address\_id | 1 | 54.42% | ☠️ **O(n)** | Using where | +| 2 | DERIVED | *city* | NULL | eq\_ref | PRIMARY,
idx\_fk\_country\_id | PRIMARY | 2 | sakila.a.city\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | NULL | +| 2 | DERIVED | *country* | NULL | eq\_ref | PRIMARY | PRIMARY | 2 | sakila.city.country\_id | 1 | ☠️ **100.00%** | ☠️ **O(n)** | Using index | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **PRIMARY**: 最外层的select. + +* **DERIVED**: 用于from子句里有子查询的情况. MySQL会递归执行这些子查询, 把结果放在临时表里. + +#### Type信息解读 + +* **index**: 全表扫描, 只是扫描表的时候按照索引次序进行而不是行. 主要优点就是避免了排序, 但是开销仍然非常大. + +* **ref**: 连接不能基于关键字选择单个行, 可能查找到多个符合条件的行. 叫做ref是因为索引要跟某个参考值相比较. 这个参考值或者是一个数, 或者是来自一个表里的多表查询的结果值. 例:'SELECT * FROM tbl WHERE idx_col=expr;'. + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +* **eq_ref**: 除const类型外最好的可能实现的连接类型. 它用在一个索引的所有部分被连接使用并且索引是UNIQUE或PRIMARY KEY, 对于每个索引键, 表中只有一条记录与之匹配. 例:'SELECT * FROM ref_table,tbl WHERE ref_table.key_column=tbl.column;'. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + +* **Using index**: 只需通过索引就可以从表中获取列的信息, 无需额外去读取真实的行数据. 如果查询使用的列值仅仅是一个简单索引的部分值, 则会使用这种策略来优化查询. + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using join buffer**: 从已有连接中找被读入缓存的数据, 并且通过缓存来完成与当前表的连接. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的city表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列city添加索引,散粒度为: 99.83%; + +* **Case:** ALTER TABLE \`sakila\`.\`city\` add index \`idx\_city\` (\`city\`) ; + + + +## 建议使用AS关键字显示声明一个别名 + +* **Item:** ALI.001 + +* **Severity:** L0 + +* **Content:** 在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引 + +* **Item:** CLA.007 + +* **Severity:** L2 + +* **Content:** ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。 + +## 同一张表被连接两次 + +* **Item:** JOI.002 + +* **Severity:** L4 + +* **Content:** 相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。 + +## MySQL对子查询的优化效果不佳 + +* **Item:** SUB.001 + +* **Severity:** L4 + +* **Content:** MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 + +# Query: B0BA5A7079EA16B3 + +★ ★ ★ ★ ☆ 85分 + +```sql + +SELECT + * +FROM + film +WHERE + DATE( last_update) = '2006-02-15' +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using where | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 不建议使用SELECT * 类型查询 + +* **Item:** COL.001 + +* **Severity:** L1 + +* **Content:** 当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 + +## 避免在WHERE条件中使用函数或其他运算符 + +* **Item:** FUN.001 + +* **Severity:** L2 + +* **Content:** 虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。 + +# Query: 18A2AD1395A58EAE + +☆ ☆ ☆ ☆ ☆ 0分 + +```sql + +SELECT + last_update +FROM + film +GROUP BY + DATE( last_update) +``` + +## MySQL返回信息 + +Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'optimizer_RSq3xBEF0TXgZsHj.film.last_update' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +## GROUP BY的条件为表达式 + +* **Item:** CLA.010 + +* **Severity:** L2 + +* **Content:** 当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 + +# Query: 60F234BA33AAC132 + +★ ★ ★ ☆ ☆ 70分 + +```sql + +SELECT + last_update +FROM + film +ORDER BY + DATE( last_update) +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | ☠️ **100.00%** | ☠️ **O(n)** | Using filesort | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using filesort**: MySQL会对结果使用一个外部索引排序,而不是从表里按照索引次序读到相关内容. 可能在内存或者磁盘上进行排序. MySQL中无法利用索引完成的排序操作称为'文件排序'. + + +## SELECT未指定WHERE条件 + +* **Item:** CLA.001 + +* **Severity:** L4 + +* **Content:** SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 + +## ORDER BY的条件为表达式 + +* **Item:** CLA.009 + +* **Severity:** L2 + +* **Content:** 当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 + +# Query: 1ED2B7ECBA4215E1 + +★ ★ ★ ★ ☆ 80分 + +```sql + +SELECT + description +FROM + film +WHERE + description IN( 'NEWS', + 'asd' +) +GROUP BY + description +``` + +## Explain信息 + +| id | select\_type | table | partitions | type | possible_keys | key | key\_len | ref | rows | filtered | scalability | Extra | +|---|---|---|---|---|---|---|---|---|---|---|---|---| +| 1 | SIMPLE | *film* | NULL | ALL | | NULL | | | 1000 | 20.00% | ☠️ **O(n)** | Using where; Using temporary | + + + +### Explain信息解读 + +#### SelectType信息解读 + +* **SIMPLE**: 简单SELECT(不使用UNION或子查询等). + +#### Type信息解读 + +* ☠️ **ALL**: 最坏的情况, 从头到尾全表扫描. + +#### Extra信息解读 + +* ☠️ **Using temporary**: 表示MySQL在对查询结果排序时使用临时表. 常见于排序order by和分组查询group by. + +* **Using where**: WHERE条件用于筛选出与下一个表匹配的数据然后返回给客户端. 除非故意做的全表扫描, 否则连接类型是ALL或者是index, 且在Extra列的值中没有Using Where, 则该查询可能是有问题的. + + +## 为sakila库的film表添加索引 + +* **Item:** IDX.001 + +* **Severity:** L2 + +* **Content:** 为列description添加索引,散粒度为: 100.00%; + +* **Case:** ALTER TABLE \`sakila\`.\`film\` add index \`idx\_description\` (\`description\`(255)) ; + + + +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item:** CLA.008 + +* **Severity:** L2 + +* **Content:** 默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 + +# Query: 255BAC03F56CDBC7 + +★ ★ ★ ★ ★ 100分 + +```sql + +ALTER TABLE + address +ADD + index idx_city_id( city_id) +``` + +## 提醒:请将索引属性顺序与查询对齐 + +* **Item:** KEY.004 + +* **Severity:** L0 + +* **Content:** 如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。 + +# Query: C315BC4EE0F4E523 + +★ ★ ★ ★ ★ 100分 + +```sql + +ALTER TABLE + inventory +ADD + index `idx_store_film` ( + `store_id`, `film_id` ) +``` + +## 提醒:请将索引属性顺序与查询对齐 + +* **Item:** KEY.004 + +* **Severity:** L0 + +* **Content:** 如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。 + +# Query: 9BB74D074BA0727C + +★ ★ ★ ★ ★ 100分 + +```sql + +ALTER TABLE + inventory +ADD + index `idx_store_film` ( + `store_id`, `film_id` ), + ADD + index `idx_store_film` ( + `store_id`, `film_id` ), + ADD + index `idx_store_film` ( + `store_id`, `film_id` ) +``` + +## 提醒:请将索引属性顺序与查询对齐 + +* **Item:** KEY.004 + +* **Severity:** L0 + +* **Content:** 如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。 + diff --git a/doc/example/main_test.sh b/doc/example/main_test.sh new file mode 100755 index 00000000..96f72b5a --- /dev/null +++ b/doc/example/main_test.sh @@ -0,0 +1,17 @@ +#!/bin/bash + + +PROJECT_PATH=${GOPATH}/src/github.com/XiaoMi/soar/ + +if [ "$1x" == "-updatex" ]; then + cd "${PROJECT_PATH}" && ./soar -list-test-sqls | ./soar -config=./etc/soar.yaml > ./doc/example/main_test.md +else + cd "${PROJECT_PATH}" && ./soar -list-test-sqls | ./soar -config=./etc/soar.yaml > ./doc/example/main_test.log + # optimizer_XXX 库名,散粒度,以及索引先后顺序每次可能会不一致 + DIFF_LINES=$(cat ./doc/example/main_test.log ./doc/example/main_test.md | grep -v "optimizer\|散粒度" | sort | uniq -u | wc -l) + if [ "${DIFF_LINES}" -gt 0 ]; then + git diff ./doc/example/main_test.log ./doc/example/main_test.md + fi +fi + + diff --git a/doc/example/metalinter.json b/doc/example/metalinter.json new file mode 100644 index 00000000..6564bf67 --- /dev/null +++ b/doc/example/metalinter.json @@ -0,0 +1,23 @@ +{ + "Vendor": true, + "DisableAll": true, + "Enable": [ + "gofmt", + "goimports", + "interfacer", + "misspell", + "unconvert", + "gosimple", + "golint", + "structcheck", + "deadcode", + "ineffassign", + "varcheck", + "gas", + "vet" + ], + "Exclude": [ + "MagicWordSZjYPIDgod1M8XqYEwhsdlzv2SyAtjy8" + ], + "Deadline": "5m" +} diff --git a/doc/example/metalinter.sh b/doc/example/metalinter.sh new file mode 100755 index 00000000..7e3a067a --- /dev/null +++ b/doc/example/metalinter.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +METABIN=$(which gometalinter.v1) +PROJECT_PATH=${GOPATH}/src/github.com/XiaoMi/soar/ + +if [ "x$METABIN" == "x" ]; then + go get -u gopkg.in/alecthomas/gometalinter.v1 + ${GOPATH}/bin/gometalinter.v1 --install +fi + +UPDATE=$1 + +if [ "${UPDATE}X" != "X" ]; then + ${GOPATH}/bin/gometalinter.v1 --config ${PROJECT_PATH}/doc/example/metalinter.json ./... | tr -d [0-9] | sort > ${PROJECT_PATH}/doc/example/metalinter.txt +else + cd ${PROJECT_PATH} && diff <(${GOPATH}/bin/gometalinter.v1 --config ${PROJECT_PATH}/doc/example/metalinter.json ./... | tr -d [0-9] | sort) <(cat ${PROJECT_PATH}/doc/example/metalinter.txt) +fi + diff --git a/doc/example/metalinter.txt b/doc/example/metalinter.txt new file mode 100644 index 00000000..e69de29b diff --git a/doc/example/revive.toml b/doc/example/revive.toml new file mode 100644 index 00000000..4cf298a8 --- /dev/null +++ b/doc/example/revive.toml @@ -0,0 +1,51 @@ +ignoreGeneratedHeader = false +severity = "error" +confidence = 0.8 +errorCode = 0 +warningCode = 0 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.var-naming] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.indent-error-flow] +[rule.superfluous-else] +[rule.modifies-parameter] + +# This can be checked by other tools like megacheck +[rule.unreachable-code] + + +# Currently this makes too much noise, but should add it in +# and perhaps ignore it in a few files +#[rule.confusing-naming] +# severity = "warning" +#[rule.confusing-results] +# severity = "warning" +#[rule.unused-parameter] +# severity = "warning" +#[rule.deep-exit] +# severity = "warning" +#[rule.flag-parameter] +# severity = "warning" + + + +# Adding these will slow down the linter +# They are already provided by megacheck +# [rule.unexported-return] +# [rule.time-naming] +# [rule.errorf] + +# Adding these will slow down the linter +# Not sure if they are already provided by megacheck +# [rule.var-declaration] +# [rule.context-keys-type] diff --git a/doc/example/sakila.sql.gz b/doc/example/sakila.sql.gz new file mode 100644 index 00000000..5cdf8c77 Binary files /dev/null and b/doc/example/sakila.sql.gz differ diff --git a/doc/example/slow.log.digest b/doc/example/slow.log.digest new file mode 100644 index 00000000..db4305e4 --- /dev/null +++ b/doc/example/slow.log.digest @@ -0,0 +1,83 @@ + +# 13.7s user time, 20ms system time, 27.95M rss, 181.32M vsz +# Current date: Thu May 17 15:24:49 2018 +# Hostname: 127.0.0.1 +# Files: slow.log +# Overall: 75.28k total, 21 unique, 1.36 QPS, 0.22x concurrency __________ +# Time range: 2018-05-17 00:01:47 to 15:24:47 +# Attribute total min max avg 95% stddev median +# ============ ======= ======= ======= ======= ======= ======= ======= +# Exec time 12368s 20ms 6s 164ms 501ms 208ms 95ms +# Lock time 2s 0 311us 30us 38us 5us 27us +# Rows sent 21.79M 0 28.49k 303.58 49.17 2.44k 0.99 +# Rows examine 103.77M 0 31.41k 1.41k 4.49k 2.96k 621.67 +# Query size 8.58M 17 7.78k 119.54 143.84 32.94 112.70 + +# Profile +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ================ ===== ====== ===== ============ +# 1 0x6F837C9DA962A07D 11374.6099 92.0% 67535 0.1684 0.27 SELECT test.table_? +# 2 0x0B991403AD4E8932 803.2640 6.5% 5993 0.1340 0.24 SELECT test.table_? +# MISC 0xMISC 190.1791 1.5% 1751 0.1086 0.0 <19 ITEMS> + +# Query 1: 1.22 QPS, 0.21x concurrency, ID 0x6F837C9DA962A07D at byte 6821409 +# This item is included in the report because it matches --limit. +# Scores: V/M = 0.27 +# Time range: 2018-05-17 00:01:47 to 15:24:47 +# Attribute pct total min max avg 95% stddev median +# ============ === ======= ======= ======= ======= ======= ======= ======= +# Count 89 67535 +# Exec time 91 11375s 20ms 6s 168ms 501ms 212ms 100ms +# Lock time 88 2s 20us 221us 29us 38us 5us 27us +# Rows sent 0 65.95k 1 1 1 1 0 1 +# Rows examine 72 75.13M 0 31.41k 1.14k 3.52k 1.89k 592.07 +# Query size 88 7.61M 114 119 118.23 118.34 2.50 112.70 +# String: +# Databases test... (50646/74%)... 2 more +# Hosts 127.0.0.1 (13617/20%)... 4 more +# Users test_r +# Query_time distribution +# 1us +# 10us +# 100us +# 1ms +# 10ms ############################################################ +# 100ms ################################################################ +# 1s # +# 10s+ +# Tables +# SHOW TABLE STATUS FROM `test` LIKE 'table_78'\G +# SHOW CREATE TABLE `test`.`table_78`\G +# EXPLAIN /*!50100 PARTITIONS*/ +SELECT COUNT(*) AS `count` FROM test.table_78 WHERE `id` = 824076488 AND `last_modify` > 1526044213 AND `type` = 6\G + +# Query 2: 0.11 QPS, 0.01x concurrency, ID 0x0B991403AD4E8932 at byte 1691609 +# This item is included in the report because it matches --limit. +# Scores: V/M = 0.24 +# Time range: 2018-05-17 00:01:54 to 15:24:43 +# Attribute pct total min max avg 95% stddev median +# ============ === ======= ======= ======= ======= ======= ======= ======= +# Count 7 5993 +# Exec time 6 803s 20ms 1s 134ms 552ms 181ms 56ms +# Lock time 9 206ms 26us 179us 34us 44us 5us 31us +# Rows sent 1 290.64k 7 50 49.66 49.17 2.98 49.17 +# Rows examine 6 7.13M 7 5.79k 1.22k 4.27k 1.40k 563.87 +# Query size 9 850.97k 142 154 145.40 143.84 1.98 143.84 +# String: +# Databases test... (4280/71%)... 2 more +# Hosts 127.0.0.1 (1246/20%), 127.0.0.2 (1229/20%)... 3 more +# Users test_r +# Query_time distribution +# 1us +# 10us +# 100us +# 1ms +# 10ms ################################################################ +# 100ms #################################### +# 1s # +# 10s+ +# Tables +# SHOW TABLE STATUS FROM `test` LIKE 'table_83'\G +# SHOW CREATE TABLE `test`.`table_83`\G +# EXPLAIN /*!50100 PARTITIONS*/ +SELECT * FROM test.table_83 WHERE `id` = 68211602 AND `last_modify` < 1526495341 AND `type` in ('6') order by `last_modify` desc LIMIT 0,50\G diff --git a/doc/example/soar.vim b/doc/example/soar.vim new file mode 100644 index 00000000..f4b2eaf6 --- /dev/null +++ b/doc/example/soar.vim @@ -0,0 +1,37 @@ +"============================================================================ +"File: soar.vim +"Description: Syntax checking plugin for syntastic +"Maintainer: Pengxiang Li +"License: MIT +"============================================================================ + +if exists('g:loaded_syntastic_sql_soar_checker') + finish +endif +let g:loaded_syntastic_sql_soar_checker= 1 + +let s:save_cpo = &cpo +set cpo&vim + +function! SyntaxCheckers_sql_soar_GetLocList() dict + let makeprg = self.makeprgBuild({ + \ 'args_after': '-report-type lint -query '}) + + let errorformat = '%f:%l:%m' + + return SyntasticMake({ + \ 'makeprg': makeprg, + \ 'errorformat': errorformat, + \ 'defaults': {'type': 'W'}, + \ 'subtype': 'Style', + \ 'returns': [0, 1] }) +endfunction + +call g:SyntasticRegistry.CreateAndRegisterChecker({ + \ 'filetype': 'sql', + \ 'name': 'soar'}) + +let &cpo = s:save_cpo +unlet s:save_cpo + +" vim: set sw=4 sts=4 et fdm=marker: diff --git a/doc/explain.md b/doc/explain.md new file mode 100644 index 00000000..5caa9081 --- /dev/null +++ b/doc/explain.md @@ -0,0 +1,42 @@ + +## EXPLAIN信息解读 + +* [EXPLAIN语法](https://dev.mysql.com/doc/refman/5.7/en/explain.html) +* [EXPLAIN输出信息](https://dev.mysql.com/doc/refman/5.7/en/explain-output.html) + +### SELECT转换 + +指定了线上环境时SOAR会到线上环境进行EXPLAIN,然后对线上执行EXPLAIN的结果进行分析。由于低版本的MySQL不支持对INSERT, UPDATE, DELETE, REPLACE进行分析,SOAR会自动将这些类型的查询请求转换为SELECT请求再执行EXPLAIN信息。 + +另外当线上环境设置了read\_only或super\_readonly时即使是高版本的MySQL也无法对更新请求执行EXPLAIN。需要进行SELECT转换。 + +### 文本格式 + +SOAR也支持用户直接拷贝粘贴已有的EXPLAIN文本信息,格式可以是传统格式,\G输出的Verical格式,也可以是JSON格式。 + +JSON格式的EXPLAIN包含的内容很丰富,但不便于人查看,信息解读的时候会将JSON和Vertical格式统一转换成传统格式。Golang处理JSON格式需要提前定义结构体,这里不得不向[gojson](https://github.com/ChimeraCoder/gojson)献出膝盖,要是没有这个工具也许我们暂时会放弃对JSON格式的支持。 + +### Filtered + +表示此查询条件所过滤的数据的百分比。低版本的MySQL EXPLAIN信息不包含Filtered字段,SOAR会按 `filtered = rows/total_rows` 计算补充。 + +5.7之前的版本Filtered计算可能出现大于100%的[BUG](https//bugs.mysql.com/bug.php?id=34124),为了不对用户产生困扰,soar会将大于100%的Filered化整为100%。 + +### Scalability + +Scalability表示单表查询的运算复杂度,是参考[explain-analyzer](https//github.com/Preetam/explain-analyzer)项目添加的。Scalability是对access\_type的映射表,由于是单表查询,所以最大复杂度为O(n)。 + +| Access Type | Scalability | +| --- | --- | +| ALL | O(n) | +| index | O(n) | +| range | O(log n)+ | +| index\_subquery | O(log n)+ | +| unique\_subquery | O(log n)+ | +| index\_merge | O(log n)+ | +| ref\_or\_null | O(log n)+ | +| fulltext | O(log n)+ | +| ref | O(log n) | +| eq\_ref | O(log n) | +| const | O(1) | +| system | O(1) | diff --git a/doc/heuristic.md b/doc/heuristic.md new file mode 100644 index 00000000..1e93d48a --- /dev/null +++ b/doc/heuristic.md @@ -0,0 +1,1134 @@ +# 启发式规则建议 + +[toc] + +## 建议使用AS关键字显示声明一个别名 + +* **Item**:ALI.001 +* **Severity**:L0 +* **Content**:在列或表别名(如"tbl AS alias")中, 明确使用AS关键字比隐含别名(如"tbl alias")更易懂。 +* **Case**: + +```sql +select name from tbl t1 where id < 1000 +``` +## 不建议给列通配符'\*'设置别名 + +* **Item**:ALI.002 +* **Severity**:L8 +* **Content**:例: "SELECT tbl.\* col1, col2"上面这条SQL给列通配符设置了别名,这样的SQL可能存在逻辑错误。您可能意在查询col1, 但是代替它的是重命名的是tbl的最后一列。 +* **Case**: + +```sql +select tbl.* as c1,c2,c3 from tbl where id < 1000 +``` +## 别名不要与表或列的名字相同 + +* **Item**:ALI.003 +* **Severity**:L1 +* **Content**:表或列的别名与其真实名称相同, 这样的别名会使得查询更难去分辨。 +* **Case**: + +```sql +select name from tbl as tbl where id < 1000 +``` +## 修改表的默认字符集不会改表各个字段的字符集 + +* **Item**:ALT.001 +* **Severity**:L4 +* **Content**:很多初学者会将ALTER TABLE tbl\_name [DEFAULT] CHARACTER SET 'UTF8'误认为会修改所有字段的字符集,但实际上它只会影响后续新增的字段不会改表已有字段的字符集。如果想修改整张表所有字段的字符集建议使用ALTER TABLE tbl\_name CONVERT TO CHARACTER SET charset\_name; +* **Case**: + +```sql +ALTER TABLE tbl_name CONVERT TO CHARACTER SET charset_name; +``` +## 同一张表的多条ALTER请求建议合为一条 + +* **Item**:ALT.002 +* **Severity**:L2 +* **Content**:每次表结构变更对线上服务都会产生影响,即使是能够通过在线工具进行调整也请尽量通过合并ALTER请求的试减少操作次数。 +* **Case**: + +```sql +ALTER TABLE tbl ADD COLUMN col int, ADD INDEX idx_col (`col`); +``` +## 删除列为高危操作,操作前请注意检查业务逻辑是否还有依赖 + +* **Item**:ALT.003 +* **Severity**:L0 +* **Content**:如业务逻辑依赖未完全消除,列被删除后可能导致数据无法写入或无法查询到已删除列数据导致程序异常的情况。这种情况下即使通过备份数据回滚也会丢失用户请求写入的数据。 +* **Case**: + +```sql +ALTER TABLE tbl DROP COLUMN col; +``` +## 删除主键和外键为高危操作,操作前请与DBA确认影响 + +* **Item**:ALT.004 +* **Severity**:L0 +* **Content**:主键和外键为关系型数据库中两种重要约束,删除已有约束会打破已有业务逻辑,操作前请业务开发与DBA确认影响,三思而行。 +* **Case**: + +```sql +ALTER TABLE tbl DROP PRIMARY KEY; +``` +## 不建议使用前项通配符查找 + +* **Item**:ARG.001 +* **Severity**:L4 +* **Content**:例如“%foo”,查询参数有一个前项通配符的情况无法使用已有索引。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where name like '%foo' +``` +## 没有通配符的LIKE查询 + +* **Item**:ARG.002 +* **Severity**:L1 +* **Content**:不包含通配符的LIKE查询可能存在逻辑错误,因为逻辑上它与等值查询相同。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where name like 'foo' +``` +## 参数比较包含隐式转换,无法使用索引 + +* **Item**:ARG.003 +* **Severity**:L4 +* **Content**:隐式类型转换有无法命中索引的风险,在高并发、大数据量的情况下,命不中索引带来的后果非常严重。 +* **Case**: + +```sql +SELECT * FROM sakila.film WHERE length >= '60'; +``` +## IN (NULL)/NOT IN (NULL)永远非真 + +* **Item**:ARG.004 +* **Severity**:L4 +* **Content**:正确的作法是col IN ('val1', 'val2', 'val3') OR col IS NULL +* **Case**: + +```sql +SELECT * FROM sakila.film WHERE length >= '60'; +``` +## IN要慎用,元素过多会导致全表扫描 + +* **Item**:ARG.005 +* **Severity**:L1 +* **Content**: 如:select id from t where num in(1,2,3)对于连续的数值,能用BETWEEN就不要用IN了:select id from t where num between 1 and 3。而当IN值过多时MySQL也可能会进入全表扫描导致性能急剧下降。 +* **Case**: + +```sql +select id from t where num in(1,2,3) +``` +## 应尽量避免在WHERE子句中对字段进行NULL值判断 + +* **Item**:ARG.006 +* **Severity**:L1 +* **Content**:使用IS NULL或IS NOT NULL将可能导致引擎放弃使用索引而进行全表扫描,如:select id from t where num is null;可以在num上设置默认值0,确保表中num列没有null值,然后这样查询: select id from t where num=0; +* **Case**: + +```sql +select id from t where num is null +``` +## 避免使用模式匹配 + +* **Item**:ARG.007 +* **Severity**:L3 +* **Content**:性能问题是使用模式匹配操作符的最大缺点。使用LIKE或正则表达式进行模式匹配进行查询的另一个问题,是可能会返回意料之外的结果。最好的方案就是使用特殊的搜索引擎技术来替代SQL,比如Apache Lucene。另一个可选方案是将结果保存起来从而减少重复的搜索开销。如果一定要使用SQL,请考虑在MySQL中使用像FULLTEXT索引这样的第三方扩展。但更广泛地说,您不一定要使用SQL来解决所有问题。 +* **Case**: + +```sql +select c_id,c2,c3 from tbl where c2 like 'test%' +``` +## OR查询索引列时请尽量使用IN谓词 + +* **Item**:ARG.008 +* **Severity**:L1 +* **Content**:IN-list谓词可以用于索引检索,并且优化器可以对IN-list进行排序,以匹配索引的排序序列,从而获得更有效的检索。请注意,IN-list必须只包含常量,或在查询块执行期间保持常量的值,例如外引用。 +* **Case**: + +```sql +SELECT c1,c2,c3 FROM tbl WHERE c1 = 14 OR c1 = 17 +``` +## 引号中的字符串开头或结尾包含空格 + +* **Item**:ARG.009 +* **Severity**:L1 +* **Content**:如果VARCHAR列的前后存在空格将可能引起逻辑问题,如在MySQL 5.5中'a'和'a '可能会在查询中被认为是相同的值。 +* **Case**: + +```sql +SELECT 'abc ' +``` +## 不要使用hint,如sql\_no\_cache,force index,ignore key,straight join等 + +* **Item**:ARG.010 +* **Severity**:L1 +* **Content**:hint是用来强制SQL按照某个执行计划来执行,但随着数据量变化我们无法保证自己当初的预判是正确的。 +* **Case**: + +```sql +SELECT 'abc ' +``` +## 不要使用负向查询,如:NOT IN/NOT LIKE + +* **Item**:ARG.011 +* **Severity**:L3 +* **Content**:请尽量不要使用负向查询,这将导致全表扫描,对查询性能影响较大。 +* **Case**: + +```sql +select id from t where num not in(1,2,3); +``` +## 最外层SELECT未指定WHERE条件 + +* **Item**:CLA.001 +* **Severity**:L4 +* **Content**:SELECT语句没有WHERE子句,可能检查比预期更多的行(全表扫描)。对于SELECT COUNT(\*)类型的请求如果不要求精度,建议使用SHOW TABLE STATUS或EXPLAIN替代。 +* **Case**: + +```sql +select id from tbl +``` +## 不建议使用ORDER BY RAND() + +* **Item**:CLA.002 +* **Severity**:L3 +* **Content**:ORDER BY RAND()是从结果集中检索随机行的一种非常低效的方法,因为它会对整个结果进行排序并丢弃其大部分数据。 +* **Case**: + +```sql +select name from tbl where id < 1000 order by rand(number) +``` +## 不建议使用带OFFSET的LIMIT查询 + +* **Item**:CLA.003 +* **Severity**:L2 +* **Content**:使用LIMIT和OFFSET对结果集分页的复杂度是O(n^2),并且会随着数据增大而导致性能问题。采用“书签”扫描的方法实现分页效率更高。 +* **Case**: + +```sql +select c1,c2 from tbl where name=xx order by number limit 1 offset 20 +``` +## 不建议对常量进行GROUP BY + +* **Item**:CLA.004 +* **Severity**:L2 +* **Content**:GROUP BY 1 表示按第一列进行GROUP BY。如果在GROUP BY子句中使用数字,而不是表达式或列名称,当查询列顺序改变时,可能会导致问题。 +* **Case**: + +```sql +select col1,col2 from tbl group by 1 +``` +## ORDER BY常数列没有任何意义 + +* **Item**:CLA.005 +* **Severity**:L2 +* **Content**:SQL逻辑上可能存在错误; 最多只是一个无用的操作,不会更改查询结果。 +* **Case**: + +```sql +select id from test where id=1 order by id +``` +## 在不同的表中GROUP BY或ORDER BY + +* **Item**:CLA.006 +* **Severity**:L4 +* **Content**:这将强制使用临时表和filesort,可能产生巨大性能隐患,并且可能消耗大量内存和磁盘上的临时空间。 +* **Case**: + +```sql +select tb1.col, tb2.col from tb1, tb2 where id=1 group by tb1.col, tb2.col +``` +## ORDER BY语句对多个不同条件使用不同方向的排序无法使用索引 + +* **Item**:CLA.007 +* **Severity**:L2 +* **Content**:ORDER BY子句中的所有表达式必须按统一的ASC或DESC方向排序,以便利用索引。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c1='foo' order by c2 desc, c3 asc +``` +## 请为GROUP BY显示添加ORDER BY条件 + +* **Item**:CLA.008 +* **Severity**:L2 +* **Content**:默认MySQL会对'GROUP BY col1, col2, ...'请求按如下顺序排序'ORDER BY col1, col2, ...'。如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加'ORDER BY NULL'。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c1='foo' group by c2 +``` +## ORDER BY的条件为表达式 + +* **Item**:CLA.009 +* **Severity**:L2 +* **Content**:当ORDER BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 +* **Case**: + +```sql +select description from film where title ='ACADEMY DINOSAUR' order by length-language_id; +``` +## GROUP BY的条件为表达式 + +* **Item**:CLA.010 +* **Severity**:L2 +* **Content**:当GROUP BY条件为表达式或函数时会使用到临时表,如果在未指定WHERE或WHERE条件返回的结果集较大时性能会很差。 +* **Case**: + +```sql +select description from film where title ='ACADEMY DINOSAUR' GROUP BY length-language_id; +``` +## 建议为表添加注释 + +* **Item**:CLA.011 +* **Severity**:L1 +* **Content**:为表添加注释能够使得表的意义更明确,从而为日后的维护带来极大的便利。 +* **Case**: + +```sql +CREATE TABLE `test1` (`ID` bigint(20) NOT NULL AUTO_INCREMENT,`c1` varchar(128) DEFAULT NULL,PRIMARY KEY (`ID`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 +``` +## 将复杂的裹脚布式查询分解成几个简单的查询 + +* **Item**:CLA.012 +* **Severity**:L2 +* **Content**:SQL是一门极具表现力的语言,您可以在单个SQL查询或者单条语句中完成很多事情。但这并不意味着必须强制只使用一行代码,或者认为使用一行代码就搞定每个任务是个好主意。通过一个查询来获得所有结果的常见后果是得到了一个笛卡儿积。当查询中的两张表之间没有条件限制它们的关系时,就会发生这种情况。没有对应的限制而直接使用两张表进行联结查询,就会得到第一张表中的每一行和第二张表中的每一行的一个组合。每一个这样的组合就会成为结果集中的一行,最终您就会得到一个行数很多的结果集。重要的是要考虑这些查询很难编写、难以修改和难以调试。数据库查询请求的日益增加应该是预料之中的事。经理们想要更复杂的报告以及在用户界面上添加更多的字段。如果您的设计很复杂,并且是一个单一查询,要扩展它们就会很费时费力。不论对您还是项目来说,时间花在这些事情上面不值得。将复杂的意大利面条式查询分解成几个简单的查询。当您拆分一个复杂的SQL查询时,得到的结果可能是很多类似的查询,可能仅仅在数据类型上有所不同。编写所有的这些查询是很乏味的,因此,最好能够有个程序自动生成这些代码。SQL代码生成是一个很好的应用。尽管SQL支持用一行代码解决复杂的问题,但也别做不切实际的事情。 +* **Case**: + +```sql +这是一条很长很长的SQL,案例略。 +``` +## 不建议使用HAVING子句 + +* **Item**:CLA.013 +* **Severity**:L3 +* **Content**:将查询的HAVING子句改写为WHERE中的查询条件,可以在查询处理期间使用索引。 +* **Case**: + +```sql +SELECT s.c_id,count(s.c_id) FROM s where c = test GROUP BY s.c_id HAVING s.c_id <> '1660' AND s.c_id <> '2' order by s.c_id +``` +## 删除全表时建议使用TRUNCATE替代DELETE + +* **Item**:CLA.014 +* **Severity**:L2 +* **Content**:删除全表时建议使用TRUNCATE替代DELETE +* **Case**: + +```sql +delete from tbl +``` +## UPDATE未指定WHERE条件 + +* **Item**:CLA.015 +* **Severity**:L4 +* **Content**:UPDATE不指定WHERE条件一般是致命的,请您三思后行 +* **Case**: + +```sql +update tbl set col=1 +``` +## 不要UPDATE主键 + +* **Item**:CLA.016 +* **Severity**:L2 +* **Content**:主键是数据表中记录的唯一标识符,不建议频繁更新主键列,这将影响元数据统计信息进而影响正常的查询。 +* **Case**: + +```sql +update tbl set col=1 +``` +## 不建议使用存储过程、视图、触发器、临时表等 + +* **Item**:CLA.017 +* **Severity**:L2 +* **Content**:这些功能的使用在一定程度上会使得程序难以调试和拓展,更没有移植性,且会极大的增加出现BUG的概率。 +* **Case**: + +```sql +CREATE VIEW v_today (today) AS SELECT CURRENT_DATE; +``` +## 不建议使用SELECT \* 类型查询 + +* **Item**:COL.001 +* **Severity**:L1 +* **Content**:当表结构变更时,使用\*通配符选择所有列将导致查询的含义和行为会发生更改,可能导致查询返回更多的数据。 +* **Case**: + +```sql +select * from tbl where id=1 +``` +## INSERT未指定列名 + +* **Item**:COL.002 +* **Severity**:L2 +* **Content**:当表结构发生变更,如果INSERT或REPLACE请求不明确指定列名,请求的结果将会与预想的不同; 建议使用“INSERT INTO tbl(col1,col2)VALUES ...”代替。 +* **Case**: + +```sql +insert into tbl values(1,'name') +``` +## 建议修改自增ID为无符号类型 + +* **Item**:COL.003 +* **Severity**:L2 +* **Content**:建议修改自增ID为无符号类型 +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL AUTO_INCREMENT) +``` +## 请为列添加默认值 + +* **Item**:COL.004 +* **Severity**:L1 +* **Content**:请为列添加默认值,如果是ALTER操作,请不要忘记将原字段的默认值写上。字段无默认值,当表较大时无法在线变更表结构。 +* **Case**: + +```sql +CREATE TABLE tbl (col int) ENGINE=InnoDB; +``` +## 列未添加注释 + +* **Item**:COL.005 +* **Severity**:L1 +* **Content**:建议对表中每个列添加注释,来明确每个列在表中的含义及作用。 +* **Case**: + +```sql +CREATE TABLE tbl (col int) ENGINE=InnoDB; +``` +## 表中包含有太多的列 + +* **Item**:COL.006 +* **Severity**:L3 +* **Content**:表中包含有太多的列 +* **Case**: + +```sql +CREATE TABLE tbl ( cols ....); +``` +## 可使用VARCHAR代替CHAR,VARBINARY代替BINARY + +* **Item**:COL.008 +* **Severity**:L1 +* **Content**:为首先变长字段存储空间小,可以节省存储空间。其次对于查询来说,在一个相对较小的字段内搜索效率显然要高些。 +* **Case**: + +```sql +create table t1(id int,name char(20),last_time date) +``` +## 建议使用精确的数据类型 + +* **Item**:COL.009 +* **Severity**:L2 +* **Content**:实际上,任何使用FLOAT、REAL或DOUBLE PRECISION数据类型的设计都有可能是反模式。大多数应用程序使用的浮点数的取值范围并不需要达到IEEE 754标准所定义的最大/最小区间。在计算总量时,非精确浮点数所积累的影响是严重的。使用SQL中的NUMERIC或DECIMAL类型来代替FLOAT及其类似的数据类型进行固定精度的小数存储。这些数据类型精确地根据您定义这一列时指定的精度来存储数据。尽可能不要使用浮点数。 +* **Case**: + +```sql +CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,hours float not null,PRIMARY KEY (p_id, a_id)) +``` +## 不建议使用ENUM数据类型 + +* **Item**:COL.010 +* **Severity**:L2 +* **Content**:ENUM定义了列中值的类型,使用字符串表示ENUM里的值时,实际存储在列中的数据是这些值在定义时的序数。因此,这列的数据是字节对齐的,当您进行一次排序查询时,结果是按照实际存储的序数值排序的,而不是按字符串值的字母顺序排序的。这可能不是您所希望的。没有什么语法支持从ENUM或者check约束中添加或删除一个值;您只能使用一个新的集合重新定义这一列。如果您打算废弃一个选项,您可能会为历史数据而烦恼。作为一种策略,改变元数据——也就是说,改变表和列的定义——应该是不常见的,并且要注意测试和质量保证。有一个更好的解决方案来约束一列中的可选值:创建一张检查表,每一行包含一个允许在列中出现的候选值;然后在引用新表的旧表上声明一个外键约束。 +* **Case**: + +```sql +create table tab1(status ENUM('new','in progress','fixed')) +``` +## 当需要唯一约束时才使用NULL,仅当列不能有缺失值时才使用NOT NULL + +* **Item**:COL.011 +* **Severity**:L0 +* **Content**:NULL和0是不同的,10乘以NULL还是NULL。NULL和空字符串是不一样的。将一个字符串和标准SQL中的NULL联合起来的结果还是NULL。NULL和FALSE也是不同的。AND、OR和NOT这三个布尔操作如果涉及NULL,其结果也让很多人感到困惑。当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。使用NULL来表示任意类型不存在的空值。 当您将一列声明为NOT NULL时,也就是说这列中的每一个值都必须存在且是有意义的。 +* **Case**: + +```sql +select c1,c2,c3 from tbl where c4 is null or c4 <> 1 +``` +## BLOB和TEXT类型的字段不可设置为NULL + +* **Item**:COL.012 +* **Severity**:L5 +* **Content**:BLOB和TEXT类型的字段不可设置为NULL +* **Case**: + +```sql +CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` longblob, PRIMARY KEY (`id`)); +``` +## TIMESTAMP类型未设置默认值 + +* **Item**:COL.013 +* **Severity**:L4 +* **Content**:TIMESTAMP类型未设置默认值 +* **Case**: + +```sql +CREATE TABLE tbl( `id` bigint not null, `create_time` timestamp); +``` +## 为列指定了字符集 + +* **Item**:COL.014 +* **Severity**:L5 +* **Content**:建议列与表使用同一个字符集,不要单独指定列的字符集。 +* **Case**: + +```sql +CREATE TABLE `tb2` ( `id` int(11) DEFAULT NULL, `col` char(10) CHARACTER SET utf8 DEFAULT NULL) +``` +## BLOB类型的字段不可指定默认值 + +* **Item**:COL.015 +* **Severity**:L4 +* **Content**:BLOB类型的字段不可指定默认值 +* **Case**: + +```sql +CREATE TABLE `tbl` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `c` blob NOT NULL DEFAULT '', PRIMARY KEY (`id`)); +``` +## 整型定义建议采用INT(10)或BIGINT(20) + +* **Item**:COL.016 +* **Severity**:L1 +* **Content**:INT(M) 在 integer 数据类型中,M 表示最大显示宽度。 在 INT(M) 中,M 的值跟 INT(M) 所占多少存储空间并无任何关系。 INT(3)、INT(4)、INT(8) 在磁盘上都是占用 4 bytes 的存储空间。 +* **Case**: + +```sql +CREATE TABLE tab (a INT(1)); +``` +## varchar定义长度过长 + +* **Item**:COL.017 +* **Severity**:L2 +* **Content**:varchar 是可变长字符串,不预先分配存储空间,长度不要超过1024,如果存储长度过长MySQL将定义字段类型为text,独立出来一张表,用主键来对应,避免影响其它字段索引效率。 +* **Case**: + +```sql +CREATE TABLE tab (a varchar(3500)); +``` +## 消除不必要的DISTINCT条件 + +* **Item**:DIS.001 +* **Severity**:L1 +* **Content**:太多DISTINCT条件是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少DISTINCT条件的数量。如果主键列是列的结果集的一部分,则DISTINCT条件可能没有影响。 +* **Case**: + +```sql +SELECT DISTINCT c.c_id,count(DISTINCT c.c_name),count(DISTINCT c.c_e),count(DISTINCT c.c_n),count(DISTINCT c.c_me),c.c_d FROM (select distinct xing, name from B) as e WHERE e.country_id = c.country_id +``` +## COUNT(DISTINCT)多列时结果可能和你预想的不同 + +* **Item**:DIS.002 +* **Severity**:L3 +* **Content**:COUNT(DISTINCT col)计算该列除NULL之外的不重复行数,注意COUNT(DISTINCT col, col2)如果其中一列全为NULL那么即使另一列有不同的值,也返回0。 +* **Case**: + +```sql +SELECT COUNT(DISTINCT col, col2) FROM tbl; +``` +## DISTINCT \*对有主键的表没有意义 + +* **Item**:DIS.003 +* **Severity**:L3 +* **Content**:当表已经有主键时,对所有列进行DISTINCT的输出结果与不进行DISTINCT操作的结果相同,请不要画蛇添足。 +* **Case**: + +```sql +SELECT DISTINCT * FROM film; +``` +## 避免在WHERE条件中使用函数或其他运算符 + +* **Item**:FUN.001 +* **Severity**:L2 +* **Content**:虽然在SQL中使用函数可以简化很多复杂的查询,但使用了函数的查询无法利用表中已经建立的索引,该查询将会是全表扫描,性能较差。通常建议将列名写在比较运算符左侧,将查询过滤条件放在比较运算符右侧。 +* **Case**: + +```sql +select id from t where substring(name,1,3)='abc' +``` +## 指定了WHERE条件或非MyISAM引擎时使用COUNT(\*)操作性能不佳 + +* **Item**:FUN.002 +* **Severity**:L1 +* **Content**:COUNT(\*)的作用是统计表行数,COUNT(COL)的作用是统计指定列非NULL的行数。MyISAM表对于COUNT(\*)统计全表行数进行了特殊的优化,通常情况下非常快。但对于非MyISAM表或指定了某些WHERE条件,COUNT(\*)操作需要扫描大量的行才能获取精确的结果,性能也因此不佳。有时候某些业务场景并不需要完全精确的COUNT值,此时可以用近似值来代替。EXPLAIN出来的优化器估算的行数就是一个不错的近似值,执行EXPLAIN并不需要真正去执行查询,所以成本很低。 +* **Case**: + +```sql +SELECT c3, COUNT(*) AS accounts FROM tab where c2 < 10000 GROUP BY c3 ORDER BY num +``` +## 使用了合并为可空列的字符串连接 + +* **Item**:FUN.003 +* **Severity**:L3 +* **Content**:在一些查询请求中,您需要强制让某一列或者某个表达式返回非NULL的值,从而让查询逻辑变得更简单,担忧不想将这个值存下来。使用COALESCE()函数来构造连接的表达式,这样即使是空值列也不会使整表达式变为NULL。 +* **Case**: + +```sql +select c1 || coalesce(' ' || c2 || ' ', ' ') || c3 as c from tbl +``` +## 不建议使用SYSDATE()函数 + +* **Item**:FUN.004 +* **Severity**:L4 +* **Content**:SYSDATE()函数可能导致主从数据不一致,请使用NOW()函数替代SYSDATE()。 +* **Case**: + +```sql +SELECT SYSDATE(); +``` +## 不建议使用COUNT(col)或COUNT(常量) + +* **Item**:FUN.005 +* **Severity**:L1 +* **Content**:不要使用COUNT(col)或COUNT(常量)来替代COUNT(\*),COUNT(\*)是SQL92定义的标准统计行数的方法,跟数据无关,跟NULL和非NULL也无关。 +* **Case**: + +```sql +SELECT COUNT(1) FROM tbl; +``` +## 使用SUM(COL)时需注意NPE问题 + +* **Item**:FUN.006 +* **Severity**:L1 +* **Content**:当某一列的值全是NULL时,COUNT(COL)的返回结果为0,但SUM(COL)的返回结果为NULL,因此使用SUM()时需注意NPE问题。可以使用如下方式来避免SUM的NPE问题: SELECT IF(ISNULL(SUM(COL)), 0, SUM(COL)) FROM tbl +* **Case**: + +```sql +SELECT SUM(COL) FROM tbl; +``` +## 不建议对等值查询列使用GROUP BY + +* **Item**:GRP.001 +* **Severity**:L2 +* **Content**:GROUP BY中的列在前面的WHERE条件中使用了等值查询,对这样的列进行GROUP BY意义不大。 +* **Case**: + +```sql +select film_id, title from film where release_year='2006' group by release_year +``` +## JOIN语句混用逗号和ANSI模式 + +* **Item**:JOI.001 +* **Severity**:L2 +* **Content**:表连接的时候混用逗号和ANSI JOIN不便于人类理解,并且MySQL不同版本的表连接行为和优先级均有所不同,当MySQL版本变化后可能会引入错误。 +* **Case**: + +```sql +select c1,c2,c3 from t1,t2 join t3 on t1.c1=t2.c1,t1.c3=t3,c1 where id>1000 +``` +## 同一张表被连接两次 + +* **Item**:JOI.002 +* **Severity**:L4 +* **Content**:相同的表在FROM子句中至少出现两次,可以简化为对该表的单次访问。 +* **Case**: + +```sql +select tb1.col from (tb1, tb2) join tb2 on tb1.id=tb.id where tb1.id=1 +``` +## OUTER JOIN失效 + +* **Item**:JOI.003 +* **Severity**:L4 +* **Content**:由于WHERE条件错误使得OUTER JOIN的外部表无数据返回,这会将查询隐式转换为 INNER JOIN 。如:select c from L left join R using(c) where L.a=5 and R.b=10。这种SQL逻辑上可能存在错误或程序员对OUTER JOIN如何工作存在误解,因为LEFT/RIGHT JOIN是LEFT/RIGHT OUTER JOIN的缩写。 +* **Case**: + +```sql +select c1,c2,c3 from t1 left outer join t2 using(c1) where t1.c2=2 and t2.c3=4 +``` +## 不建议使用排它JOIN + +* **Item**:JOI.004 +* **Severity**:L4 +* **Content**:只在右侧表为NULL的带WHERE子句的LEFT OUTER JOIN语句,有可能是在WHERE子句中使用错误的列,如:“... FROM l LEFT OUTER JOIN r ON l.l = r.r WHERE r.z IS NULL”,这个查询正确的逻辑可能是 WHERE r.r IS NULL。 +* **Case**: + +```sql +select c1,c2,c3 from t1 left outer join t2 on t1.c1=t2.c1 where t2.c2 is null +``` +## 减少JOIN的数量 + +* **Item**:JOI.005 +* **Severity**:L2 +* **Content**:太多的JOIN是复杂的裹脚布式查询的症状。考虑将复杂查询分解成许多简单的查询,并减少JOIN的数量。 +* **Case**: + +```sql +select bp1.p_id, b1.d_d as l, b1.b_id from b1 join bp1 on (b1.b_id = bp1.b_id) left outer join (b1 as b2 join bp2 on (b2.b_id = bp2.b_id)) on (bp1.p_id = bp2.p_id ) join bp21 on (b1.b_id = bp1.b_id) join bp31 on (b1.b_id = bp1.b_id) join bp41 on (b1.b_id = bp1.b_id) where b2.b_id = 0 +``` +## 将嵌套查询重写为JOIN通常会导致更高效的执行和更有效的优化 + +* **Item**:JOI.006 +* **Severity**:L4 +* **Content**:一般来说,非嵌套子查询总是用于关联子查询,最多是来自FROM子句中的一个表,这些子查询用于ANY、ALL和EXISTS的谓词。如果可以根据查询语义决定子查询最多返回一个行,那么一个不相关的子查询或来自FROM子句中的多个表的子查询就被压平了。 +* **Case**: + +```sql +SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 ) +``` +## 不建议使用联表更新 + +* **Item**:JOI.007 +* **Severity**:L4 +* **Content**:当需要同时更新多张表时建议使用简单SQL,一条SQL只更新一张表,尽量不要将多张表的更新在同一条SQL中完成。 +* **Case**: + +```sql +UPDATE users u LEFT JOIN hobby h ON u.id = h.uid SET u.name = 'pianoboy' WHERE h.hobby = 'piano'; +``` +## 不要使用跨DB的Join查询 + +* **Item**:JOI.008 +* **Severity**:L4 +* **Content**:一般来说,跨DB的Join查询意味着查询语句跨越了两个不同的子系统,这可能意味着系统耦合度过高或库表结构设计不合理。 +* **Case**: + +```sql +SELECT s,p,d FROM tbl WHERE p.p_id = (SELECT s.p_id FROM tbl WHERE s.c_id = 100996 AND s.q = 1 ) +``` +## 建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列 + +* **Item**:KEY.001 +* **Severity**:L2 +* **Content**:建议使用自增列作为主键,如使用联合自增主键时请将自增键作为第一列 +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL PRIMARY KEY (`id`)) +``` +## 无主键或唯一键,无法在线变更表结构 + +* **Item**:KEY.002 +* **Severity**:L4 +* **Content**:无主键或唯一键,无法在线变更表结构 +* **Case**: + +```sql +create table test(col varchar(5000)) +``` +## 避免外键等递归关系 + +* **Item**:KEY.003 +* **Severity**:L4 +* **Content**:存在递归关系的数据很常见,数据常会像树或者以层级方式组织。然而,创建一个外键约束来强制执行同一表中两列之间的关系,会导致笨拙的查询。树的每一层对应着另一个连接。您将需要发出递归查询,以获得节点的所有后代或所有祖先。解决方案是构造一个附加的闭包表。它记录了树中所有节点间的关系,而不仅仅是那些具有直接的父子关系。您也可以比较不同层次的数据设计:闭包表,路径枚举,嵌套集。然后根据应用程序的需要选择一个。 +* **Case**: + +```sql +CREATE TABLE tab2 (p_id BIGINT UNSIGNED NOT NULL,a_id BIGINT UNSIGNED NOT NULL,PRIMARY KEY (p_id, a_id),FOREIGN KEY (p_id) REFERENCES tab1(p_id),FOREIGN KEY (a_id) REFERENCES tab3(a_id)) +``` +## 提醒:请将索引属性顺序与查询对齐 + +* **Item**:KEY.004 +* **Severity**:L0 +* **Content**:如果为列创建复合索引,请确保查询属性与索引属性的顺序相同,以便DBMS在处理查询时使用索引。如果查询和索引属性订单没有对齐,那么DBMS可能无法在查询处理期间使用索引。 +* **Case**: + +```sql +create index idx1 on tbl (last_name,first_name) +``` +## 表建的索引过多 + +* **Item**:KEY.005 +* **Severity**:L2 +* **Content**:表建的索引过多 +* **Case**: + +```sql +CREATE TABLE tbl ( a int, b int, c int, KEY idx_a (`a`),KEY idx_b(`b`),KEY idx_c(`c`)); +``` +## 主键中的列过多 + +* **Item**:KEY.006 +* **Severity**:L4 +* **Content**:主键中的列过多 +* **Case**: + +```sql +CREATE TABLE tbl ( a int, b int, c int, PRIMARY KEY(`a`,`b`,`c`)); +``` +## 未指定主键或主键非int或bigint + +* **Item**:KEY.007 +* **Severity**:L4 +* **Content**:未指定主键或主键非int或bigint,建议将主键设置为int unsigned或bigint unsigned。 +* **Case**: + +```sql +CREATE TABLE tbl (a int); +``` +## ORDER BY多个列但排序方向不同时可能无法使用索引 + +* **Item**:KEY.008 +* **Severity**:L4 +* **Content**:在MySQL 8.0之前当ORDER BY多个列指定的排序方向不同时将无法使用已经建立的索引。 +* **Case**: + +```sql +SELECT * FROM tbl ORDER BY a DESC, b ASC; +``` +## 添加唯一索引前请注意检查数据唯一性 + +* **Item**:KEY.009 +* **Severity**:L0 +* **Content**:请提前检查添加唯一索引列的数据唯一性,如果数据不唯一在线表结构调整时将有可能自动将重复列删除,这有可能导致数据丢失。 +* **Case**: + +```sql +CREATE UNIQUE INDEX part_of_name ON customer (name(10)); +``` +## SQL\_CALC\_FOUND\_ROWS效率低下 + +* **Item**:KWR.001 +* **Severity**:L2 +* **Content**:因为SQL\_CALC\_FOUND\_ROWS不能很好地扩展,所以可能导致性能问题; 建议业务使用其他策略来替代SQL\_CALC\_FOUND\_ROWS提供的计数功能,比如:分页结果展示等。 +* **Case**: + +```sql +select SQL_CALC_FOUND_ROWS col from tbl where id>1000 +``` +## 不建议使用MySQL关键字做列名或表名 + +* **Item**:KWR.002 +* **Severity**:L2 +* **Content**:当使用关键字做为列名或表名时程序需要对列名和表名进行转义,如果疏忽被将导致请求无法执行。 +* **Case**: + +```sql +CREATE TABLE tbl ( `select` int ) +``` +## 不建议使用复数做列名或表名 + +* **Item**:KWR.003 +* **Severity**:L1 +* **Content**:表名应该仅仅表示表里面的实体内容,不应该表示实体数量,对应于 DO 类名也是单数形式,符合表达习惯。 +* **Case**: + +```sql +CREATE TABLE tbl ( `books` int ) +``` +## INSERT INTO xx SELECT加锁粒度较大请谨慎 + +* **Item**:LCK.001 +* **Severity**:L3 +* **Content**:INSERT INTO xx SELECT加锁粒度较大请谨慎 +* **Case**: + +```sql +INSERT INTO tbl SELECT * FROM tbl2; +``` +## 请慎用INSERT ON DUPLICATE KEY UPDATE + +* **Item**:LCK.002 +* **Severity**:L3 +* **Content**:当主键为自增键时使用INSERT ON DUPLICATE KEY UPDATE可能会导致主键出现大量不连续快速增长,导致主键快速溢出无法继续写入。极端情况下还有可能导致主从数据不一致。 +* **Case**: + +```sql +INSERT INTO t1(a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1; +``` +## 用字符类型存储IP地址 + +* **Item**:LIT.001 +* **Severity**:L2 +* **Content**:字符串字面上看起来像IP地址,但不是INET\_ATON()的参数,表示数据被存储为字符而不是整数。将IP地址存储为整数更为有效。 +* **Case**: + +```sql +insert into tbl (IP,name) values('10.20.306.122','test') +``` +## 日期/时间未使用引号括起 + +* **Item**:LIT.002 +* **Severity**:L4 +* **Content**:诸如“WHERE col <2010-02-12”之类的查询是有效的SQL,但可能是一个错误,因为它将被解释为“WHERE col <1996”; 日期/时间文字应该加引号。 +* **Case**: + +```sql +select col1,col2 from tbl where time < 2018-01-10 +``` +## 一列中存储一系列相关数据的集合 + +* **Item**:LIT.003 +* **Severity**:L3 +* **Content**:将ID存储为一个列表,作为VARCHAR/TEXT列,这样能导致性能和数据完整性问题。查询这样的列需要使用模式匹配的表达式。使用逗号分隔的列表来做多表联结查询定位一行数据是极不优雅和耗时的。这将使验证ID更加困难。考虑一下,列表最多支持存放多少数据呢?将ID存储在一张单独的表中,代替使用多值属性,从而每个单独的属性值都可以占据一行。这样交叉表实现了两张表之间的多对多关系。这将更好地简化查询,也更有效地验证ID。 +* **Case**: + +```sql +select c1,c2,c3,c4 from tab1 where col_id REGEXP '[[:<:]]12[[:>:]]' +``` +## 请使用分号或已设定的DELIMITER结尾 + +* **Item**:LIT.004 +* **Severity**:L1 +* **Content**:USE database, SHOW DATABASES等命令也需要使用使用分号或已设定的DELIMITER结尾。 +* **Case**: + +```sql +USE db +``` +## 非确定性的GROUP BY + +* **Item**:RES.001 +* **Severity**:L4 +* **Content**:SQL返回的列既不在聚合函数中也不是GROUP BY表达式的列中,因此这些值的结果将是非确定性的。如:select a, b, c from tbl where foo="bar" group by a,该SQL返回的结果就是不确定的。 +* **Case**: + +```sql +select c1,c2,c3 from t1 where c2='foo' group by c2 +``` +## 未使用ORDER BY的LIMIT查询 + +* **Item**:RES.002 +* **Severity**:L4 +* **Content**:没有ORDER BY的LIMIT会导致非确定性的结果,这取决于查询执行计划。 +* **Case**: + +```sql +select col1,col2 from tbl where name=xx limit 10 +``` +## UPDATE/DELETE操作使用了LIMIT条件 + +* **Item**:RES.003 +* **Severity**:L4 +* **Content**:UPDATE/DELETE操作使用LIMIT条件和不添加WHERE条件一样危险,它可将会导致主从数据不一致或从库同步中断。 +* **Case**: + +```sql +UPDATE film SET length = 120 WHERE title = 'abc' LIMIT 1; +``` +## UPDATE/DELETE操作指定了ORDER BY条件 + +* **Item**:RES.004 +* **Severity**:L4 +* **Content**:UPDATE/DELETE操作不要指定ORDER BY条件。 +* **Case**: + +```sql +UPDATE film SET length = 120 WHERE title = 'abc' ORDER BY title +``` +## UPDATE可能存在逻辑错误,导致数据损坏 + +* **Item**:RES.005 +* **Severity**:L4 +* **Content**: +* **Case**: + +```sql +update tbl set col = 1 and cl = 2 where col=3; +``` +## 永远不真的比较条件 + +* **Item**:RES.006 +* **Severity**:L4 +* **Content**:查询条件永远非真,这将导致查询无匹配到的结果。 +* **Case**: + +```sql +select * from tbl where 1 != 1; +``` +## 永远为真的比较条件 + +* **Item**:RES.007 +* **Severity**:L4 +* **Content**:查询条件永远为真,这将导致WHERE条件失效进行全表查询。 +* **Case**: + +```sql +select * from tbl where 1 = 1; +``` +## 不建议使用LOAD DATA/SELECT ... INTO OUTFILE + +* **Item**:RES.008 +* **Severity**:L2 +* **Content**:SELECT INTO OUTFILE需要授予FILE权限,这通过会引入安全问题。LOAD DATA虽然可以提高数据导入速度,但同时也可能导致从库同步延迟过大。 +* **Case**: + +```sql +LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table; +``` +## 请谨慎使用TRUNCATE操作 + +* **Item**:SEC.001 +* **Severity**:L0 +* **Content**:一般来说想清空一张表最快速的做法就是使用TRUNCATE TABLE tbl\_name;语句。但TRUNCATE操作也并非是毫无代价的,TRUNCATE TABLE无法返回被删除的准确行数,如果需要返回被删除的行数建议使用DELETE语法。TRUNCATE操作还会重置AUTO\_INCREMENT,如果不想重置该值建议使用DELETE FROM tbl\_name WHERE 1;替代。TRUNCATE操作会对数据字典添加源数据锁(MDL),当一次需要TRUNCATE很多表时会影响整个实例的所有请求,因此如果要TRUNCATE多个表建议用DROP+CREATE的方式以减少锁时长。 +* **Case**: + +```sql +TRUNCATE TABLE tbl_name +``` +## 不使用明文存储密码 + +* **Item**:SEC.002 +* **Severity**:L0 +* **Content**:使用明文存储密码或者使用明文在网络上传递密码都是不安全的。如果攻击者能够截获您用来插入密码的SQL语句,他们就能直接读到密码。另外,将用户输入的字符串以明文的形式插入到纯SQL语句中,也会让攻击者发现它。如果您能够读取密码,黑客也可以。解决方案是使用单向哈希函数对原始密码进行加密编码。哈希是指将输入字符串转化成另一个新的、不可识别的字符串的函数。对密码加密表达式加点随机串来防御“字典攻击”。不要将明文密码输入到SQL查询语句中。在应用程序代码中计算哈希串,只在SQL查询中使用哈希串。 +* **Case**: + +```sql +create table test(id int,name varchar(20) not null,password varchar(200)not null) +``` +## 使用DELETE/DROP/TRUNCATE等操作时注意备份 + +* **Item**:SEC.003 +* **Severity**:L0 +* **Content**:在执行高危操作之前对数据进行备份是十分有必要的。 +* **Case**: + +```sql +delete from table where col = 'condition' +``` +## '!=' 运算符是非标准的 + +* **Item**:STA.001 +* **Severity**:L0 +* **Content**:"<>"才是标准SQL中的不等于运算符。 +* **Case**: + +```sql +select col1,col2 from tbl where type!=0 +``` +## 库名或表名点后建议不要加空格 + +* **Item**:STA.002 +* **Severity**:L1 +* **Content**:当使用db.table或table.column格式访问表或字段时,请不要在点号后面添加空格,虽然这样语法正确。 +* **Case**: + +```sql +select col from sakila. film +``` +## 索引起名不规范 + +* **Item**:STA.003 +* **Severity**:L1 +* **Content**:建议普通二级索引以idx\_为前缀,唯一索引以uk\_为前缀。 +* **Case**: + +```sql +select col from now where type!=0 +``` +## 起名时请不要使用字母、数字和下划线之外的字符 + +* **Item**:STA.004 +* **Severity**:L1 +* **Content**:以字母或下划线开头,名字只允许使用字母、数字和下划线。请统一大小写,不要使用驼峰命名法。不要在名字中出现连续下划线'\_\_',这样很难辨认。 +* **Case**: + +```sql +CREATE TABLE ` abc` (a int); +``` +## MySQL对子查询的优化效果不佳 + +* **Item**:SUB.001 +* **Severity**:L4 +* **Content**:MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。这可能会在 MySQL 5.6版本中得到改善, 但对于5.1及更早版本, 建议将该类查询分别重写为JOIN或LEFT OUTER JOIN。 +* **Case**: + +```sql +select col1,col2,col3 from table1 where col2 in(select col from table2) +``` +## 如果您不在乎重复的话,建议使用UNION ALL替代UNION + +* **Item**:SUB.002 +* **Severity**:L2 +* **Content**:与去除重复的UNION不同,UNION ALL允许重复元组。如果您不关心重复元组,那么使用UNION ALL将是一个更快的选项。 +* **Case**: + +```sql +select teacher_id as id,people_name as name from t1,t2 where t1.teacher_id=t2.people_id union select student_id as id,people_name as name from t1,t2 where t1.student_id=t2.people_id +``` +## 考虑使用EXISTS而不是DISTINCT子查询 + +* **Item**:SUB.003 +* **Severity**:L3 +* **Content**:DISTINCT关键字在对元组排序后删除重复。相反,考虑使用一个带有EXISTS关键字的子查询,您可以避免返回整个表。 +* **Case**: + +```sql +SELECT DISTINCT c.c_id, c.c_name FROM c,e WHERE e.c_id = c.c_id +``` +## 执行计划中嵌套连接深度过深 + +* **Item**:SUB.004 +* **Severity**:L3 +* **Content**:MySQL对子查询的优化效果不佳,MySQL将外部查询中的每一行作为依赖子查询执行子查询。 这是导致严重性能问题的常见原因。 +* **Case**: + +```sql +SELECT * from tb where id in (select id from (select id from tb)) +``` +## 子查询不支持LIMIT + +* **Item**:SUB.005 +* **Severity**:L8 +* **Content**:当前MySQL版本不支持在子查询中进行'LIMIT & IN/ALL/ANY/SOME'。 +* **Case**: + +```sql +SELECT * FROM staff WHERE name IN (SELECT NAME FROM customer ORDER BY name LIMIT 1) +``` +## 不建议在子查询中使用函数 + +* **Item**:SUB.006 +* **Severity**:L2 +* **Content**:MySQL将外部查询中的每一行作为依赖子查询执行子查询,如果在子查询中使用函数,即使是semi-join也很难进行高效的查询。可以将子查询重写为OUTER JOIN语句并用连接条件对数据进行过滤。 +* **Case**: + +```sql +SELECT * FROM staff WHERE name IN (SELECT max(NAME) FROM customer) +``` +## 不建议使用分区表 + +* **Item**:TBL.001 +* **Severity**:L4 +* **Content**:不建议使用分区表 +* **Case**: + +```sql +CREATE TABLE trb3(id INT, name VARCHAR(50), purchased DATE) PARTITION BY RANGE(YEAR(purchased)) (PARTITION p0 VALUES LESS THAN (1990), PARTITION p1 VALUES LESS THAN (1995), PARTITION p2 VALUES LESS THAN (2000), PARTITION p3 VALUES LESS THAN (2005) ); +``` +## 请为表选择合适的存储引擎 + +* **Item**:TBL.002 +* **Severity**:L4 +* **Content**:建表或修改表的存储引擎时建议使用推荐的存储引擎,如:innodb +* **Case**: + +```sql +create table test(`id` int(11) NOT NULL AUTO_INCREMENT) +``` +## 以DUAL命名的表在数据库中有特殊含义 + +* **Item**:TBL.003 +* **Severity**:L8 +* **Content**:DUAL表为虚拟表,不需要创建即可使用,也不建议服务以DUAL命名表。 +* **Case**: + +```sql +create table dual(id int, primary key (id)); +``` +## 表的初始AUTO\_INCREMENT值不为0 + +* **Item**:TBL.004 +* **Severity**:L2 +* **Content**:AUTO\_INCREMENT不为0会导致数据空洞。 +* **Case**: + +```sql +CREATE TABLE tbl (a int) AUTO_INCREMENT = 10; +``` +## 请使用推荐的字符集 + +* **Item**:TBL.005 +* **Severity**:L4 +* **Content**:表字符集只允许设置为utf8,utf8mb4 +* **Case**: + +```sql +CREATE TABLE tbl (a int) DEFAULT CHARSET = latin1; +``` diff --git a/doc/images/env.png b/doc/images/env.png new file mode 100644 index 00000000..8b6fb471 Binary files /dev/null and b/doc/images/env.png differ diff --git a/doc/images/logo.ascii b/doc/images/logo.ascii new file mode 100644 index 00000000..5dfb75d0 --- /dev/null +++ b/doc/images/logo.ascii @@ -0,0 +1,5 @@ +,adPPYba, ,adPPYba, ,adPPYYba, 8b,dPPYba, +I8[ "" a8" "8a "" `Y8 88P' "Y8 + `"Y8ba, 8b d8 ,adPPPPP88 88 +aa ]8I "8a, ,a8" 88, ,88 88 +`"YbbdP"' `"YbbdP"' `"8bbdP"Y8 88 diff --git a/doc/images/logo.png b/doc/images/logo.png new file mode 100644 index 00000000..7d62ffc3 Binary files /dev/null and b/doc/images/logo.png differ diff --git a/doc/images/qq.jpg b/doc/images/qq.jpg new file mode 100644 index 00000000..2638081c Binary files /dev/null and b/doc/images/qq.jpg differ diff --git a/doc/images/qq.png b/doc/images/qq.png new file mode 100644 index 00000000..d326c5e7 Binary files /dev/null and b/doc/images/qq.png differ diff --git a/doc/images/structure.png b/doc/images/structure.png new file mode 100644 index 00000000..4fc61c7b Binary files /dev/null and b/doc/images/structure.png differ diff --git a/doc/images/vim_plugin.png b/doc/images/vim_plugin.png new file mode 100644 index 00000000..cf5c5b96 Binary files /dev/null and b/doc/images/vim_plugin.png differ diff --git a/doc/indexing.md b/doc/indexing.md new file mode 100644 index 00000000..ec228bb2 --- /dev/null +++ b/doc/indexing.md @@ -0,0 +1,218 @@ + +# 索引优化建议 + +以下优化算法基于个人当前理解,能力有限,如有偏颇还请斧正。 + +## 简单查询索引优化 + +### 等值查询优化 + +* 单列等值查询,为该等值列加索引 +* 多列等值查询,每列求取散粒度,按从大到小排序取前N列添加到索引(N可配置) + +```sql +SELECT * FROM tbl WHERE a = 123; +SELECT * FROM tbl WHERE a = 123 AND b = 456; +SELECT * FROM tbl WHERE a IS NULL; +SELECT * FROM tbl WHERE a <=> 123; +SELECT * FROM tbl WHERE a IS TRUE; +SELECT * FROM tbl WHERE a IS FALSE; +SELECT * FROM tbl WHERE a IS NOT TRUE; +SELECT * FROM tbl WHERE a IS NOT FALSE; +SELECT * FROM tbl WHERE a IN ("xxx"); -- IN单值 +``` + +### 非等值查询优化 + +* 单列非等值查询,为该非等值列加索引 +* 多列非等值查询,每列求取散粒度,为散粒度最大的列加索引。 + +思考:对于多列非等值,为filtered最小列加索引可能比较好。因为输入可变,所以现在只按散粒度排序。对于高版本MySQL如果开启了Index Merge,考虑为非等值列加单列索引可能会比较好。 + +```sql +SELECT * FROM tbl WHERE a >= 123 -- <, <=, >=, >, !=, <> +SELECT * FROM tbl WHERE a BETWEEN 22 AND 44; -- NOT BETWEEN +SELECT * FROM tbl WHERE a LIKE 'blah%'; -- NOT LIKE +SELECT * FROM tbl WHERE a IS NOT NULL; +SELECT * FROM tbl WHERE a IN ("xxx"); -- IN多值 +``` + +### 等值 & 非等值组合查询优化 + +1. 先按`等值查询优化`为等值列添加索引 +2. 再将`非等值查询优化`的列追加在等值列索引后 + +```sql +SELECT * FROM tbl WHERE c = 9 AND a > 12 AND b > 345; -- INDEX(c, a)或INDEX(c, b) +``` + +### OR操作符 + +如果使用了OR操作符,即使OR两边是简单的查询条件也会对优化器带来很大的困难。一般对OR的优化需要依赖UNION ALL或Index Merge等多索引访问技术来实现。SOAR目前不会对使用OR操作符连接的字段进行索引优化。 + +### GROUP BY子句 + +GROUP BY相关字段能否加入索引列表需要依赖WHERE子句中的条件。当查询指定了WHERE条件,在满足WHERE子句只有等值查询时,可以对GROUP BY字段添加索引。当查询未指定WHERE条件,可以直接对GROUP BY字段添加索引。 + +* 按照GROPU BY的先后顺序添加索引 +* GROUP BY字段出现常量,数学运算或函数运算时会给出警告 + +### ORDER BY子句 + +ORDER BY相关字段能否加入索引列表需要依赖WHERE子句和GROUP BY子句中的条件。当查询指定了WHERE条件,在满足WHERE子句只有等值查询且无GROUP BY子句时,可以对ORDER BY字段添加索引。当查询未指定WHERE条件,在满足无GROUP BY子句时,可以对ORDER BY字段添加索引。 + +* 多个字段之间如果指定顺序相同,按照ORDER BY的先后顺序添加索引 +* 多个字段之间如果指定顺序不同,所有ORDER BY字段都不添加索引 +* ORDER BY字段出现常量,数学运算或函数运算时会给出警告 + +## 复杂查询索引优化 + +### JOIN索引优化算法 + +* LEFT JOIN为右表加索引 +* RIGHT JOIN为左表加索引 +* INNER JOIN两张表都加索引 +* NATURAL的处理方法参考前三条 +* STRAIGHT_JOIN为后面的表加索引 + +### SUBQUERY和UNION的复杂查询 + +对于使用了IN,EXIST等词的SUBQUERY或UNION类型的SQL,先将其拆成多条独立的SELECT语句。然后基于上面简单查询索引优化算法,对单条SELECT查询进行优化。SUBQUERY的连接列暂不考虑添加索引。 + + +```sql +SELECT * FROM film WHERE language_id = (SELECT language_id FROM language LIMIT 1); + +1. SELECT * FROM film; +2. SELECT language_id FROM language LIMIT 1; +``` + +```sql +SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id +UNION +SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id; + +1. SELECT * FROM city a LEFT JOIN country b ON a.country_id=b.country_id; +2. SELECT * FROM city a RIGHT JOIN country b ON a.country_id=b.country_id; +``` + +## 无法使用索引的情况 + +如下类型的查询条件无法使用索引或SOAR无法给出正确的索引建议。 + +```sql +-- MySQL无法使用索引 +SELECT * FROM tbl WHERE a LIKE '%blah%'; +SELECT * FROM tbl WHERE a IN (SELECT...) +SELECT * FROM tbl WHERE DATE(dt) = 'xxx' +SELECT * FROM tbl WHERE LOWER(s) = 'xxx' +SELECT * FROM tbl WHERE CAST(s …) = 'xxx' +SELECT * FROM tbl where a NOT IN() +-- SOAR不支持的索引建议 +SELECT * FROM tbl WHERE a = 'xxx' COLLATE xxx -- vitess语法暂不支持 +SELECT * FROM tbl ORDER BY a ASC, b DESC -- 8.0+支持 +SELECT * FROM tbl WHERE `date` LIKE '2016-12%' -- 时间数据类型隐式类型转换 +``` + +## 索引长度限制 + +由于索引长度受数据库版本及不同配置参数影响,参考[InnoDB限制](https://dev.mysql.com/doc/refman/8.0/en/innodb-restrictions.html)。这里将索引长度限制定义为可配置值,用户可以根据实际情况进行设置。 + +* 通过-max-index-bytes配置每列索引最大长度,默认为767 Bytes +* 超过单列索引最大长度限制后程序会自动添加该列的前缀索引(max-index-bytes/CHARSET_Maxlen) +* 通过-max-index-bytes-percolumn配置多列索引加各最大长度,默认为3072 Bytes +* 超过多列索引最大长度限制后,由程序生成的ALTER语句会将每列前缀索引长度指定为N,用户自行调整 + +```sql +ALTER TABLE `sakila`.`film_text` add index `idx_description` (`description`(255)) ; + +``` + +## 更新语句转换为只读查询 + +SOAR支持将DELETE, UPDATE, INSERT, REPLACE四种类型语句转换为SELECT查询。对转换后的SELECT查询进行索引优化。以下为转换示例。 + +```sql +UPDATE film SET length = 10 WHERE language_id = 20; + +SELECT * FROM film WHERE language_id = 20; +``` + +```sql +DELETE FROM film WHERE length > 100; + +SELECT * FROM film WHERE length > 100; +``` + +```sql +INSERT INTO city (country_id) SELECT country_id FROM country; + +SELECT country_id FROM country; +``` + +```sql +REPLACE INTO city (country_id) SELECT country_id FROM country; + +SELECT country_id FROM country; +``` + +## 散粒度计算 + +### 计算公式 + +`Cardinality = ColumnDistinctCount/TableTotalRows * 100%` + +由于直接对线上表进行COUNT(DISTINCT)操作会影响数据库请求执行效率,因此默认各列的散粒度均为1。用户可以通过指定`-sampling`参数开启数据采样。SOAR会将线上数据随机采样至测试环境求取散粒度。 + +### 数据采样算法 + +以下说明摘抄自PostgreSQL数据直方图采样算法。默认k(-sampling-statistic-target)设置为100,即最多采样3万行记录。 + +```text + The following choice of minrows is based on the paper + "Random sampling for histogram construction: how much is enough?" + by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in + Proceedings of ACM SIGMOD International Conference on Management + of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5 + says that for table size n, histogram size k, maximum relative + error in bin size f, and error probability gamma, the minimum + random sample size is + r = 4 * k * ln(2*n/gamma) / f^2 + Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain + r = 305.82 * k + Note that because of the log function, the dependence on n is + quite weak; even at n = 10^12, a 300*k sample gives <= 0.66 + bin size error with probability 0.99. So there's no real need to + scale for n, which is a good thing because we don't necessarily + know it at this point. +``` + +### 随机采样 + +随机采样使用的SQL如下,其中变量`r`, `n`的含义见上面的说明。 + +```sql +SELECT * FROM `tbl` WHERE RAND() < r LIMIT n; +``` + +## 索引去重 + +### 检查步骤 +1. 为查询语句可能使用索引的字段添加索引 +2. 枚举用到的所有库表的已知索引 +3. 判断所有新加的索引是否与已知索引重复 +4. 判断所有新加的索引之间是否存在索引重复 + + +### 检查规则 + +* PRIMARY > UNIQUE > KEY +* 索引名称相同,即: idxA == idxA +* (a, b) > (a) +* (a, b), (b, a) 会给出警告,用户自行判断是否重复 + +## 不足 + +* 目前只支持针对InnoDB引擎添加索引建议,不支持FULLTEXT, SPATIAL等其他类型索引 +* 暂不支持索引覆盖(Covering) +* 暂不支持Index Merge情况下的索引建议 diff --git a/doc/install.md b/doc/install.md new file mode 100644 index 00000000..3c8e6b1c --- /dev/null +++ b/doc/install.md @@ -0,0 +1,52 @@ +## 下载二进制安装包 + +```bash +TODO: 开源后补充下载release版本链接 +wget https://github.com/XiaoMi/soar/archive/v0.7.0.zip +``` + +## 源码安装 + +### 依赖软件 + +一般依赖 + +* Go 1.10+ +* git + +高级依赖(仅面向开发人员) + +* [mysql](https://dev.mysql.com/doc/refman/8.0/en/mysql.html) 客户端版本需要与容器中MySQL版本相同,避免出现由于认证原因导致无法连接问题 +* [docker](https://docs.docker.com/engine/reference/commandline/cli/) MySQL Server测试容器管理 +* [govendor](https://github.com/kardianos/govendor) Go包管理 +* [retool](https://github.com/twitchtv/retool) 依赖外部代码质量静态检查工具二进制文件管理 + +### 生成二进制文件 + +```bash +TODO: 开源后可直接从github执行go get下载,未开源前需要git clone到指定路径 +go get github.com/XiaoMi/soar +cd ${GOPATH}/github.com/XiaoMi/soar && make +``` + +### 开发调试 + +如下指令如果您没有精力参与SOAR的开发可以跳过。 + +* make deps 依赖检查 +* make vitess 升级Vitess Parser依赖 +* make tidb 升级TiDB Parser依赖 +* make fmt 代码格式化,统一风格 +* make lint 代码质量检查 +* make docker 启动一个MySQL测试容器,可用于测试依赖元数据检查的功能或不同版本MySQL差异 +* make test 运行所有的测试用例 +* make cover 代码测试覆盖度检查 +* make doc 自动生成命令行参数中-list-XX相关文档 +* make daily 每日构建,时刻跟进Vitess, TiDB依赖变化 +* make release 生成Linux, Windoes, Mac发布版本 + +## 安装验证 + +```bash +echo 'select * from film' | ./soar +``` diff --git a/doc/install_en.md b/doc/install_en.md new file mode 100644 index 00000000..e6ea5b11 --- /dev/null +++ b/doc/install_en.md @@ -0,0 +1,19 @@ +## Get Released Binary + +```bash +TODO: +wget http://... +``` + +## Build From Source + +```bash +go get github.com/XiaoMi/soar +cd $GOPATH/github.com/XiaoMi/soar && make +``` + +## Simple Test Case + +```bash +echo 'select * from film' | ./soar +``` diff --git a/doc/js/pretty.js b/doc/js/pretty.js new file mode 100644 index 00000000..8abbe459 --- /dev/null +++ b/doc/js/pretty.js @@ -0,0 +1,1110 @@ +! function(e, E) { + "object" == typeof exports && "object" == typeof module ? module.exports = E() : "function" == typeof define && define.amd ? define([], E) : "object" == typeof exports ? exports.sqlFormatter = E() : e.sqlFormatter = E() +}(this, function() { + return function(e) { + function E(n) { + if (t[n]) return t[n].exports; + var r = t[n] = { + exports: {}, + id: n, + loaded: !1 + }; + return e[n].call(r.exports, r, r.exports, E), r.loaded = !0, r.exports + } + var t = {}; + return E.m = e, E.c = t, E.p = "", E(0) + }([function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(18), + T = n(r), + R = t(19), + o = n(R), + N = t(20), + A = n(N), + I = t(21), + O = n(I); + E["default"] = { + format: function(e, E) { + switch (E = E || {}, E.language) { + case "db2": + return new T["default"](E).format(e); + case "n1ql": + return new o["default"](E).format(e); + case "pl/sql": + return new A["default"](E).format(e); + case "sql": + case void 0: + return new O["default"](E).format(e); + default: + throw Error("Unsupported SQL dialect: " + E.language) + } + } + }, e.exports = E["default"] + }, function(e, E) { + "use strict"; + E.__esModule = !0, E["default"] = function(e, E) { + if (!(e instanceof E)) throw new TypeError("Cannot call a class as a function") + } + }, function(e, E, t) { + var n = t(39), + r = "object" == typeof self && self && self.Object === Object && self, + T = n || r || Function("return this")(); + e.exports = T + }, function(e, E, t) { + function n(e, E) { + var t = T(e, E); + return r(t) ? t : void 0 + } + var r = t(33), + T = t(41); + e.exports = n + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(66), + o = n(R), + N = t(7), + A = n(N), + I = t(15), + O = n(I), + i = t(16), + S = n(i), + u = t(17), + L = n(u), + C = function() { + function e(E, t) { + (0, T["default"])(this, e), this.cfg = E || {}, this.indentation = new O["default"](this.cfg.indent), this.inlineBlock = new S["default"], this.params = new L["default"](this.cfg.params), this.tokenizer = t, this.previousReservedWord = {} + } + return e.prototype.format = function(e) { + var E = this.tokenizer.tokenize(e), + t = this.getFormattedQueryFromTokens(E); + return t.trim() + }, e.prototype.getFormattedQueryFromTokens = function(e) { + var E = this, + t = ""; + return e.forEach(function(n, r) { + n.type !== A["default"].WHITESPACE && (n.type === A["default"].LINE_COMMENT ? t = E.formatLineComment(n, t) : n.type === A["default"].BLOCK_COMMENT ? t = E.formatBlockComment(n, t) : n.type === A["default"].RESERVED_TOPLEVEL ? (t = E.formatToplevelReservedWord(n, t), E.previousReservedWord = n) : n.type === A["default"].RESERVED_NEWLINE ? (t = E.formatNewlineReservedWord(n, t), E.previousReservedWord = n) : n.type === A["default"].RESERVED ? (t = E.formatWithSpaces(n, t), E.previousReservedWord = n) : t = n.type === A["default"].OPEN_PAREN ? E.formatOpeningParentheses(e, r, t) : n.type === A["default"].CLOSE_PAREN ? E.formatClosingParentheses(n, t) : n.type === A["default"].PLACEHOLDER ? E.formatPlaceholder(n, t) : "," === n.value ? E.formatComma(n, t) : ":" === n.value ? E.formatWithSpaceAfter(n, t) : "." === n.value || ";" === n.value ? E.formatWithoutSpaces(n, t) : E.formatWithSpaces(n, t)) + }), t + }, e.prototype.formatLineComment = function(e, E) { + return this.addNewline(E + e.value) + }, e.prototype.formatBlockComment = function(e, E) { + return this.addNewline(this.addNewline(E) + this.indentComment(e.value)) + }, e.prototype.indentComment = function(e) { + return e.replace(/\n/g, "\n" + this.indentation.getIndent()) + }, e.prototype.formatToplevelReservedWord = function(e, E) { + return this.indentation.decreaseTopLevel(), E = this.addNewline(E), this.indentation.increaseToplevel(), E += this.equalizeWhitespace(e.value), this.addNewline(E) + }, e.prototype.formatNewlineReservedWord = function(e, E) { + return this.addNewline(E) + this.equalizeWhitespace(e.value) + " " + }, e.prototype.equalizeWhitespace = function(e) { + return e.replace(/\s+/g, " ") + }, e.prototype.formatOpeningParentheses = function(e, E, t) { + var n = e[E - 1]; + return n && n.type !== A["default"].WHITESPACE && n.type !== A["default"].OPEN_PAREN && (t = (0, o["default"])(t)), t += e[E].value, this.inlineBlock.beginIfPossible(e, E), this.inlineBlock.isActive() || (this.indentation.increaseBlockLevel(), t = this.addNewline(t)), t + }, e.prototype.formatClosingParentheses = function(e, E) { + return this.inlineBlock.isActive() ? (this.inlineBlock.end(), this.formatWithSpaceAfter(e, E)) : (this.indentation.decreaseBlockLevel(), this.formatWithSpaces(e, this.addNewline(E))) + }, e.prototype.formatPlaceholder = function(e, E) { + return E + this.params.get(e) + " " + }, e.prototype.formatComma = function(e, E) { + return E = (0, o["default"])(E) + e.value + " ", this.inlineBlock.isActive() ? E : /^LIMIT$/i.test(this.previousReservedWord.value) ? E : this.addNewline(E) + }, e.prototype.formatWithSpaceAfter = function(e, E) { + return (0, o["default"])(E) + e.value + " " + }, e.prototype.formatWithoutSpaces = function(e, E) { + return (0, o["default"])(E) + e.value + }, e.prototype.formatWithSpaces = function(e, E) { + return E + e.value + " " + }, e.prototype.addNewline = function(e) { + return (0, o["default"])(e) + "\n" + this.indentation.getIndent() + }, e + }(); + E["default"] = C, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(58), + o = n(R), + N = t(53), + A = n(N), + I = t(7), + O = n(I), + i = function() { + function e(E) { + (0, T["default"])(this, e), this.WHITESPACE_REGEX = /^(\s+)/, this.NUMBER_REGEX = /^((-\s*)?[0-9]+(\.[0-9]+)?|0x[0-9a-fA-F]+|0b[01]+)\b/, this.OPERATOR_REGEX = /^(!=|<>|==|<=|>=|!<|!>|\|\||::|->>|->|~~\*|~~|!~~\*|!~~|~\*|!~\*|!~|.)/, this.BLOCK_COMMENT_REGEX = /^(\/\*[^]*?(?:\*\/|$))/, this.LINE_COMMENT_REGEX = this.createLineCommentRegex(E.lineCommentTypes), this.RESERVED_TOPLEVEL_REGEX = this.createReservedWordRegex(E.reservedToplevelWords), this.RESERVED_NEWLINE_REGEX = this.createReservedWordRegex(E.reservedNewlineWords), this.RESERVED_PLAIN_REGEX = this.createReservedWordRegex(E.reservedWords), this.WORD_REGEX = this.createWordRegex(E.specialWordChars), this.STRING_REGEX = this.createStringRegex(E.stringTypes), this.OPEN_PAREN_REGEX = this.createParenRegex(E.openParens), this.CLOSE_PAREN_REGEX = this.createParenRegex(E.closeParens), this.INDEXED_PLACEHOLDER_REGEX = this.createPlaceholderRegex(E.indexedPlaceholderTypes, "[0-9]*"), this.IDENT_NAMED_PLACEHOLDER_REGEX = this.createPlaceholderRegex(E.namedPlaceholderTypes, "[a-zA-Z0-9._$]+"), this.STRING_NAMED_PLACEHOLDER_REGEX = this.createPlaceholderRegex(E.namedPlaceholderTypes, this.createStringPattern(E.stringTypes)) + } + return e.prototype.createLineCommentRegex = function(e) { + return RegExp("^((?:" + e.map(function(e) { + return (0, A["default"])(e) + }).join("|") + ").*?(?:\n|$))") + }, e.prototype.createReservedWordRegex = function(e) { + var E = e.join("|").replace(/ /g, "\\s+"); + return RegExp("^(" + E + ")\\b", "i") + }, e.prototype.createWordRegex = function() { + var e = arguments.length > 0 && void 0 !== arguments[0] ? arguments[0] : []; + return RegExp("^([\\w" + e.join("") + "]+)") + }, e.prototype.createStringRegex = function(e) { + return RegExp("^(" + this.createStringPattern(e) + ")") + }, e.prototype.createStringPattern = function(e) { + var E = { + "``": "((`[^`]*($|`))+)", + "[]": "((\\[[^\\]]*($|\\]))(\\][^\\]]*($|\\]))*)", + '""': '(("[^"\\\\]*(?:\\\\.[^"\\\\]*)*("|$))+)', + "''": "(('[^'\\\\]*(?:\\\\.[^'\\\\]*)*('|$))+)", + "N''": "((N'[^N'\\\\]*(?:\\\\.[^N'\\\\]*)*('|$))+)" + }; + return e.map(function(e) { + return E[e] + }).join("|") + }, e.prototype.createParenRegex = function(e) { + var E = this; + return RegExp("^(" + e.map(function(e) { + return E.escapeParen(e) + }).join("|") + ")", "i") + }, e.prototype.escapeParen = function(e) { + return 1 === e.length ? (0, A["default"])(e) : "\\b" + e + "\\b" + }, e.prototype.createPlaceholderRegex = function(e, E) { + if ((0, o["default"])(e)) return !1; + var t = e.map(A["default"]).join("|"); + return RegExp("^((?:" + t + ")(?:" + E + "))") + }, e.prototype.tokenize = function(e) { + for (var E = [], t = void 0; e.length;) t = this.getNextToken(e, t), e = e.substring(t.value.length), E.push(t); + return E + }, e.prototype.getNextToken = function(e, E) { + return this.getWhitespaceToken(e) || this.getCommentToken(e) || this.getStringToken(e) || this.getOpenParenToken(e) || this.getCloseParenToken(e) || this.getPlaceholderToken(e) || this.getNumberToken(e) || this.getReservedWordToken(e, E) || this.getWordToken(e) || this.getOperatorToken(e) + }, e.prototype.getWhitespaceToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].WHITESPACE, + regex: this.WHITESPACE_REGEX + }) + }, e.prototype.getCommentToken = function(e) { + return this.getLineCommentToken(e) || this.getBlockCommentToken(e) + }, e.prototype.getLineCommentToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].LINE_COMMENT, + regex: this.LINE_COMMENT_REGEX + }) + }, e.prototype.getBlockCommentToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].BLOCK_COMMENT, + regex: this.BLOCK_COMMENT_REGEX + }) + }, e.prototype.getStringToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].STRING, + regex: this.STRING_REGEX + }) + }, e.prototype.getOpenParenToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].OPEN_PAREN, + regex: this.OPEN_PAREN_REGEX + }) + }, e.prototype.getCloseParenToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].CLOSE_PAREN, + regex: this.CLOSE_PAREN_REGEX + }) + }, e.prototype.getPlaceholderToken = function(e) { + return this.getIdentNamedPlaceholderToken(e) || this.getStringNamedPlaceholderToken(e) || this.getIndexedPlaceholderToken(e) + }, e.prototype.getIdentNamedPlaceholderToken = function(e) { + return this.getPlaceholderTokenWithKey({ + input: e, + regex: this.IDENT_NAMED_PLACEHOLDER_REGEX, + parseKey: function(e) { + return e.slice(1) + } + }) + }, e.prototype.getStringNamedPlaceholderToken = function(e) { + var E = this; + return this.getPlaceholderTokenWithKey({ + input: e, + regex: this.STRING_NAMED_PLACEHOLDER_REGEX, + parseKey: function(e) { + return E.getEscapedPlaceholderKey({ + key: e.slice(2, -1), + quoteChar: e.slice(-1) + }) + } + }) + }, e.prototype.getIndexedPlaceholderToken = function(e) { + return this.getPlaceholderTokenWithKey({ + input: e, + regex: this.INDEXED_PLACEHOLDER_REGEX, + parseKey: function(e) { + return e.slice(1) + } + }) + }, e.prototype.getPlaceholderTokenWithKey = function(e) { + var E = e.input, + t = e.regex, + n = e.parseKey, + r = this.getTokenOnFirstMatch({ + input: E, + regex: t, + type: O["default"].PLACEHOLDER + }); + return r && (r.key = n(r.value)), r + }, e.prototype.getEscapedPlaceholderKey = function(e) { + var E = e.key, + t = e.quoteChar; + return E.replace(RegExp((0, A["default"])("\\") + t, "g"), t) + }, e.prototype.getNumberToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].NUMBER, + regex: this.NUMBER_REGEX + }) + }, e.prototype.getOperatorToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].OPERATOR, + regex: this.OPERATOR_REGEX + }) + }, e.prototype.getReservedWordToken = function(e, E) { + if (!E || !E.value || "." !== E.value) return this.getToplevelReservedToken(e) || this.getNewlineReservedToken(e) || this.getPlainReservedToken(e) + }, e.prototype.getToplevelReservedToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].RESERVED_TOPLEVEL, + regex: this.RESERVED_TOPLEVEL_REGEX + }) + }, e.prototype.getNewlineReservedToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].RESERVED_NEWLINE, + regex: this.RESERVED_NEWLINE_REGEX + }) + }, e.prototype.getPlainReservedToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].RESERVED, + regex: this.RESERVED_PLAIN_REGEX + }) + }, e.prototype.getWordToken = function(e) { + return this.getTokenOnFirstMatch({ + input: e, + type: O["default"].WORD, + regex: this.WORD_REGEX + }) + }, e.prototype.getTokenOnFirstMatch = function(e) { + var E = e.input, + t = e.type, + n = e.regex, + r = E.match(n); + if (r) return { + type: t, + value: r[1] + } + }, e + }(); + E["default"] = i, e.exports = E["default"] + }, function(e, E) { + function t(e) { + var E = typeof e; + return null != e && ("object" == E || "function" == E) + } + e.exports = t + }, function(e, E) { + "use strict"; + E.__esModule = !0, E["default"] = { + WHITESPACE: "whitespace", + WORD: "word", + STRING: "string", + RESERVED: "reserved", + RESERVED_TOPLEVEL: "reserved-toplevel", + RESERVED_NEWLINE: "reserved-newline", + OPERATOR: "operator", + OPEN_PAREN: "open-paren", + CLOSE_PAREN: "close-paren", + LINE_COMMENT: "line-comment", + BLOCK_COMMENT: "block-comment", + NUMBER: "number", + PLACEHOLDER: "placeholder" + }, e.exports = E["default"] + }, function(e, E, t) { + function n(e) { + return null != e && T(e.length) && !r(e) + } + var r = t(12), + T = t(59); + e.exports = n + }, function(e, E, t) { + function n(e) { + return null == e ? "" : r(e) + } + var r = t(10); + e.exports = n + }, function(e, E, t) { + function n(e) { + if ("string" == typeof e) return e; + if (T(e)) return N ? N.call(e) : ""; + var E = e + ""; + return "0" == E && 1 / e == -R ? "-0" : E + } + var r = t(26), + T = t(14), + R = 1 / 0, + o = r ? r.prototype : void 0, + N = o ? o.toString : void 0; + e.exports = n + }, function(e, E) { + function t(e) { + if (null != e) { + try { + return r.call(e) + } catch (E) {} + try { + return e + "" + } catch (E) {} + } + return "" + } + var n = Function.prototype, + r = n.toString; + e.exports = t + }, function(e, E, t) { + function n(e) { + var E = r(e) ? N.call(e) : ""; + return E == T || E == R + } + var r = t(6), + T = "[object Function]", + R = "[object GeneratorFunction]", + o = Object.prototype, + N = o.toString; + e.exports = n + }, function(e, E) { + function t(e) { + return null != e && "object" == typeof e + } + e.exports = t + }, function(e, E, t) { + function n(e) { + return "symbol" == typeof e || r(e) && o.call(e) == T + } + var r = t(13), + T = "[object Symbol]", + R = Object.prototype, + o = R.toString; + e.exports = n + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(61), + o = n(R), + N = t(60), + A = n(N), + I = "top-level", + O = "block-level", + i = function() { + function e(E) { + (0, T["default"])(this, e), this.indent = E || " ", this.indentTypes = [] + } + return e.prototype.getIndent = function() { + return (0, o["default"])(this.indent, this.indentTypes.length) + }, e.prototype.increaseToplevel = function() { + this.indentTypes.push(I) + }, e.prototype.increaseBlockLevel = function() { + this.indentTypes.push(O) + }, e.prototype.decreaseTopLevel = function() { + (0, A["default"])(this.indentTypes) === I && this.indentTypes.pop() + }, e.prototype.decreaseBlockLevel = function() { + for (; this.indentTypes.length > 0;) { + var e = this.indentTypes.pop(); + if (e !== I) break + } + }, e + }(); + E["default"] = i, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(7), + o = n(R), + N = 50, + A = function() { + function e() { + (0, T["default"])(this, e), this.level = 0 + } + return e.prototype.beginIfPossible = function(e, E) { + 0 === this.level && this.isInlineBlock(e, E) ? this.level = 1 : this.level > 0 ? this.level++ : this.level = 0 + }, e.prototype.end = function() { + this.level-- + }, e.prototype.isActive = function() { + return this.level > 0 + }, e.prototype.isInlineBlock = function(e, E) { + for (var t = 0, n = 0, r = E; e.length > r; r++) { + var T = e[r]; + if (t += T.value.length, t > N) return !1; + if (T.type === o["default"].OPEN_PAREN) n++; + else if (T.type === o["default"].CLOSE_PAREN && (n--, 0 === n)) return !0; + if (this.isForbiddenToken(T)) return !1 + } + return !1 + }, e.prototype.isForbiddenToken = function(e) { + var E = e.type, + t = e.value; + return E === o["default"].RESERVED_TOPLEVEL || E === o["default"].RESERVED_NEWLINE || E === o["default"].COMMENT || E === o["default"].BLOCK_COMMENT || ";" === t + }, e + }(); + E["default"] = A, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = function() { + function e(E) { + (0, T["default"])(this, e), this.params = E, this.index = 0 + } + return e.prototype.get = function(e) { + var E = e.key, + t = e.value; + return this.params ? E ? this.params[E] : this.params[this.index++] : t + }, e + }(); + E["default"] = R, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(4), + o = n(R), + N = t(5), + A = n(N), + I = ["ABS", "ACTIVATE", "ALIAS", "ALL", "ALLOCATE", "ALLOW", "ALTER", "ANY", "ARE", "ARRAY", "AS", "ASC", "ASENSITIVE", "ASSOCIATE", "ASUTIME", "ASYMMETRIC", "AT", "ATOMIC", "ATTRIBUTES", "AUDIT", "AUTHORIZATION", "AUX", "AUXILIARY", "AVG", "BEFORE", "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BLOB", "BOOLEAN", "BOTH", "BUFFERPOOL", "BY", "CACHE", "CALL", "CALLED", "CAPTURE", "CARDINALITY", "CASCADED", "CASE", "CAST", "CCSID", "CEIL", "CEILING", "CHAR", "CHARACTER", "CHARACTER_LENGTH", "CHAR_LENGTH", "CHECK", "CLOB", "CLONE", "CLOSE", "CLUSTER", "COALESCE", "COLLATE", "COLLECT", "COLLECTION", "COLLID", "COLUMN", "COMMENT", "COMMIT", "CONCAT", "CONDITION", "CONNECT", "CONNECTION", "CONSTRAINT", "CONTAINS", "CONTINUE", "CONVERT", "CORR", "CORRESPONDING", "COUNT", "COUNT_BIG", "COVAR_POP", "COVAR_SAMP", "CREATE", "CROSS", "CUBE", "CUME_DIST", "CURRENT", "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_LC_CTYPE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_SERVER", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TIMEZONE", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CURRENT_USER", "CURSOR", "CYCLE", "DATA", "DATABASE", "DATAPARTITIONNAME", "DATAPARTITIONNUM", "DATE", "DAY", "DAYS", "DB2GENERAL", "DB2GENRL", "DB2SQL", "DBINFO", "DBPARTITIONNAME", "DBPARTITIONNUM", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFAULTS", "DEFINITION", "DELETE", "DENSERANK", "DENSE_RANK", "DEREF", "DESCRIBE", "DESCRIPTOR", "DETERMINISTIC", "DIAGNOSTICS", "DISABLE", "DISALLOW", "DISCONNECT", "DISTINCT", "DO", "DOCUMENT", "DOUBLE", "DROP", "DSSIZE", "DYNAMIC", "EACH", "EDITPROC", "ELEMENT", "ELSE", "ELSEIF", "ENABLE", "ENCODING", "ENCRYPTION", "END", "END-EXEC", "ENDING", "ERASE", "ESCAPE", "EVERY", "EXCEPTION", "EXCLUDING", "EXCLUSIVE", "EXEC", "EXECUTE", "EXISTS", "EXIT", "EXP", "EXPLAIN", "EXTENDED", "EXTERNAL", "EXTRACT", "FALSE", "FENCED", "FETCH", "FIELDPROC", "FILE", "FILTER", "FINAL", "FIRST", "FLOAT", "FLOOR", "FOR", "FOREIGN", "FREE", "FULL", "FUNCTION", "FUSION", "GENERAL", "GENERATED", "GET", "GLOBAL", "GOTO", "GRANT", "GRAPHIC", "GROUP", "GROUPING", "HANDLER", "HASH", "HASHED_VALUE", "HINT", "HOLD", "HOUR", "HOURS", "IDENTITY", "IF", "IMMEDIATE", "IN", "INCLUDING", "INCLUSIVE", "INCREMENT", "INDEX", "INDICATOR", "INDICATORS", "INF", "INFINITY", "INHERIT", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INT", "INTEGER", "INTEGRITY", "INTERSECTION", "INTERVAL", "INTO", "IS", "ISOBID", "ISOLATION", "ITERATE", "JAR", "JAVA", "KEEP", "KEY", "LABEL", "LANGUAGE", "LARGE", "LATERAL", "LC_CTYPE", "LEADING", "LEAVE", "LEFT", "LIKE", "LINKTYPE", "LN", "LOCAL", "LOCALDATE", "LOCALE", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "LOCATORS", "LOCK", "LOCKMAX", "LOCKSIZE", "LONG", "LOOP", "LOWER", "MAINTAINED", "MATCH", "MATERIALIZED", "MAX", "MAXVALUE", "MEMBER", "MERGE", "METHOD", "MICROSECOND", "MICROSECONDS", "MIN", "MINUTE", "MINUTES", "MINVALUE", "MOD", "MODE", "MODIFIES", "MODULE", "MONTH", "MONTHS", "MULTISET", "NAN", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NEW_TABLE", "NEXTVAL", "NO", "NOCACHE", "NOCYCLE", "NODENAME", "NODENUMBER", "NOMAXVALUE", "NOMINVALUE", "NONE", "NOORDER", "NORMALIZE", "NORMALIZED", "NOT", "NULL", "NULLIF", "NULLS", "NUMERIC", "NUMPARTS", "OBID", "OCTET_LENGTH", "OF", "OFFSET", "OLD", "OLD_TABLE", "ON", "ONLY", "OPEN", "OPTIMIZATION", "OPTIMIZE", "OPTION", "ORDER", "OUT", "OUTER", "OVER", "OVERLAPS", "OVERLAY", "OVERRIDING", "PACKAGE", "PADDED", "PAGESIZE", "PARAMETER", "PART", "PARTITION", "PARTITIONED", "PARTITIONING", "PARTITIONS", "PASSWORD", "PATH", "PERCENTILE_CONT", "PERCENTILE_DISC", "PERCENT_RANK", "PIECESIZE", "PLAN", "POSITION", "POWER", "PRECISION", "PREPARE", "PREVVAL", "PRIMARY", "PRIQTY", "PRIVILEGES", "PROCEDURE", "PROGRAM", "PSID", "PUBLIC", "QUERY", "QUERYNO", "RANGE", "RANK", "READ", "READS", "REAL", "RECOVERY", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REFRESH", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "RELEASE", "RENAME", "REPEAT", "RESET", "RESIGNAL", "RESTART", "RESTRICT", + O = ["ADD", "AFTER", "ALTER COLUMN", "ALTER TABLE", "DELETE FROM", "EXCEPT", "FETCH FIRST", "FROM", "GROUP BY", "GO", "HAVING", "INSERT INTO", "INTERSECT", "LIMIT", "ORDER BY", "SELECT", "SET CURRENT SCHEMA", "SET SCHEMA", "SET", "UNION ALL", "UPDATE", "VALUES", "WHERE"], + i = ["AND", "CROSS JOIN", "INNER JOIN", "JOIN", "LEFT JOIN", "LEFT OUTER JOIN", "OR", "OUTER JOIN", "RIGHT JOIN", "RIGHT OUTER JOIN"], + S = void 0, + u = function() { + function e(E) { + (0, T["default"])(this, e), this.cfg = E + } + return e.prototype.format = function(e) { + return S || (S = new A["default"]({ + reservedWords: I, + reservedToplevelWords: O, + reservedNewlineWords: i, + stringTypes: ['""', "''", "``", "[]"], + openParens: ["("], + closeParens: [")"], + indexedPlaceholderTypes: ["?"], + namedPlaceholderTypes: [":"], + lineCommentTypes: ["--"], + specialWordChars: ["#", "@"] + })), new o["default"](this.cfg, S).format(e) + }, e + }(); + E["default"] = u, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(4), + o = n(R), + N = t(5), + A = n(N), + I = ["ALL", "ALTER", "ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASC", "BEGIN", "BETWEEN", "BINARY", "BOOLEAN", "BREAK", "BUCKET", "BUILD", "BY", "CALL", "CASE", "CAST", "CLUSTER", "COLLATE", "COLLECTION", "COMMIT", "CONNECT", "CONTINUE", "CORRELATE", "COVER", "CREATE", "DATABASE", "DATASET", "DATASTORE", "DECLARE", "DECREMENT", "DELETE", "DERIVED", "DESC", "DESCRIBE", "DISTINCT", "DO", "DROP", "EACH", "ELEMENT", "ELSE", "END", "EVERY", "EXCEPT", "EXCLUDE", "EXECUTE", "EXISTS", "EXPLAIN", "FALSE", "FETCH", "FIRST", "FLATTEN", "FOR", "FORCE", "FROM", "FUNCTION", "GRANT", "GROUP", "GSI", "HAVING", "IF", "IGNORE", "ILIKE", "IN", "INCLUDE", "INCREMENT", "INDEX", "INFER", "INLINE", "INNER", "INSERT", "INTERSECT", "INTO", "IS", "JOIN", "KEY", "KEYS", "KEYSPACE", "KNOWN", "LAST", "LEFT", "LET", "LETTING", "LIKE", "LIMIT", "LSM", "MAP", "MAPPING", "MATCHED", "MATERIALIZED", "MERGE", "MINUS", "MISSING", "NAMESPACE", "NEST", "NOT", "NULL", "NUMBER", "OBJECT", "OFFSET", "ON", "OPTION", "OR", "ORDER", "OUTER", "OVER", "PARSE", "PARTITION", "PASSWORD", "PATH", "POOL", "PREPARE", "PRIMARY", "PRIVATE", "PRIVILEGE", "PROCEDURE", "PUBLIC", "RAW", "REALM", "REDUCE", "RENAME", "RETURN", "RETURNING", "REVOKE", "RIGHT", "ROLE", "ROLLBACK", "SATISFIES", "SCHEMA", "SELECT", "SELF", "SEMI", "SET", "SHOW", "SOME", "START", "STATISTICS", "STRING", "SYSTEM", "THEN", "TO", "TRANSACTION", "TRIGGER", "TRUE", "TRUNCATE", "UNDER", "UNION", "UNIQUE", "UNKNOWN", "UNNEST", "UNSET", "UPDATE", "UPSERT", "USE", "USER", "USING", "VALIDATE", "VALUE", "VALUED", "VALUES", "VIA", "VIEW", "WHEN", "WHERE", "WHILE", "WITH", "WITHIN", "WORK", "XOR"], + O = ["DELETE FROM", "EXCEPT ALL", "EXCEPT", "EXPLAIN DELETE FROM", "EXPLAIN UPDATE", "EXPLAIN UPSERT", "FROM", "GROUP BY", "HAVING", "INFER", "INSERT INTO", "INTERSECT ALL", "INTERSECT", "LET", "LIMIT", "MERGE", "NEST", "ORDER BY", "PREPARE", "SELECT", "SET CURRENT SCHEMA", "SET SCHEMA", "SET", "UNION ALL", "UNION", "UNNEST", "UPDATE", "UPSERT", "USE KEYS", "VALUES", "WHERE"], + i = ["AND", "INNER JOIN", "JOIN", "LEFT JOIN", "LEFT OUTER JOIN", "OR", "OUTER JOIN", "RIGHT JOIN", "RIGHT OUTER JOIN", "XOR"], + S = void 0, + u = function() { + function e(E) { + (0, T["default"])(this, e), this.cfg = E + } + return e.prototype.format = function(e) { + return S || (S = new A["default"]({ + reservedWords: I, + reservedToplevelWords: O, + reservedNewlineWords: i, + stringTypes: ['""', "''", "``"], + openParens: ["(", "[", "{"], + closeParens: [")", "]", "}"], + namedPlaceholderTypes: ["$"], + lineCommentTypes: ["#", "--"] + })), new o["default"](this.cfg, S).format(e) + }, e + }(); + E["default"] = u, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(4), + o = n(R), + N = t(5), + A = n(N), + I = ["A", "ACCESSIBLE", "AGENT", "AGGREGATE", "ALL", "ALTER", "ANY", "ARRAY", "AS", "ASC", "AT", "ATTRIBUTE", "AUTHID", "AVG", "BETWEEN", "BFILE_BASE", "BINARY_INTEGER", "BINARY", "BLOB_BASE", "BLOCK", "BODY", "BOOLEAN", "BOTH", "BOUND", "BULK", "BY", "BYTE", "C", "CALL", "CALLING", "CASCADE", "CASE", "CHAR_BASE", "CHAR", "CHARACTER", "CHARSET", "CHARSETFORM", "CHARSETID", "CHECK", "CLOB_BASE", "CLONE", "CLOSE", "CLUSTER", "CLUSTERS", "COALESCE", "COLAUTH", "COLLECT", "COLUMNS", "COMMENT", "COMMIT", "COMMITTED", "COMPILED", "COMPRESS", "CONNECT", "CONSTANT", "CONSTRUCTOR", "CONTEXT", "CONTINUE", "CONVERT", "COUNT", "CRASH", "CREATE", "CREDENTIAL", "CURRENT", "CURRVAL", "CURSOR", "CUSTOMDATUM", "DANGLING", "DATA", "DATE_BASE", "DATE", "DAY", "DECIMAL", "DEFAULT", "DEFINE", "DELETE", "DESC", "DETERMINISTIC", "DIRECTORY", "DISTINCT", "DO", "DOUBLE", "DROP", "DURATION", "ELEMENT", "ELSIF", "EMPTY", "ESCAPE", "EXCEPTIONS", "EXCLUSIVE", "EXECUTE", "EXISTS", "EXIT", "EXTENDS", "EXTERNAL", "EXTRACT", "FALSE", "FETCH", "FINAL", "FIRST", "FIXED", "FLOAT", "FOR", "FORALL", "FORCE", "FROM", "FUNCTION", "GENERAL", "GOTO", "GRANT", "GROUP", "HASH", "HEAP", "HIDDEN", "HOUR", "IDENTIFIED", "IF", "IMMEDIATE", "IN", "INCLUDING", "INDEX", "INDEXES", "INDICATOR", "INDICES", "INFINITE", "INSTANTIABLE", "INT", "INTEGER", "INTERFACE", "INTERVAL", "INTO", "INVALIDATE", "IS", "ISOLATION", "JAVA", "LANGUAGE", "LARGE", "LEADING", "LENGTH", "LEVEL", "LIBRARY", "LIKE", "LIKE2", "LIKE4", "LIKEC", "LIMITED", "LOCAL", "LOCK", "LONG", "MAP", "MAX", "MAXLEN", "MEMBER", "MERGE", "MIN", "MINUS", "MINUTE", "MLSLABEL", "MOD", "MODE", "MONTH", "MULTISET", "NAME", "NAN", "NATIONAL", "NATIVE", "NATURAL", "NATURALN", "NCHAR", "NEW", "NEXTVAL", "NOCOMPRESS", "NOCOPY", "NOT", "NOWAIT", "NULL", "NULLIF", "NUMBER_BASE", "NUMBER", "OBJECT", "OCICOLL", "OCIDATE", "OCIDATETIME", "OCIDURATION", "OCIINTERVAL", "OCILOBLOCATOR", "OCINUMBER", "OCIRAW", "OCIREF", "OCIREFCURSOR", "OCIROWID", "OCISTRING", "OCITYPE", "OF", "OLD", "ON", "ONLY", "OPAQUE", "OPEN", "OPERATOR", "OPTION", "ORACLE", "ORADATA", "ORDER", "ORGANIZATION", "ORLANY", "ORLVARY", "OTHERS", "OUT", "OVERLAPS", "OVERRIDING", "PACKAGE", "PARALLEL_ENABLE", "PARAMETER", "PARAMETERS", "PARENT", "PARTITION", "PASCAL", "PCTFREE", "PIPE", "PIPELINED", "PLS_INTEGER", "PLUGGABLE", "POSITIVE", "POSITIVEN", "PRAGMA", "PRECISION", "PRIOR", "PRIVATE", "PROCEDURE", "PUBLIC", "RAISE", "RANGE", "RAW", "READ", "REAL", "RECORD", "REF", "REFERENCE", "RELEASE", "RELIES_ON", "REM", "REMAINDER", "RENAME", "RESOURCE", "RESULT_CACHE", "RESULT", "RETURN", "RETURNING", "REVERSE", "REVOKE", "ROLLBACK", "ROW", "ROWID", "ROWNUM", "ROWTYPE", "SAMPLE", "SAVE", "SAVEPOINT", "SB1", "SB2", "SB4", "SECOND", "SEGMENT", "SELF", "SEPARATE", "SEQUENCE", "SERIALIZABLE", "SHARE", "SHORT", "SIZE_T", "SIZE", "SMALLINT", "SOME", "SPACE", "SPARSE", "SQL", "SQLCODE", "SQLDATA", "SQLERRM", "SQLNAME", "SQLSTATE", "STANDARD", "START", "STATIC", "STDDEV", "STORED", "STRING", "STRUCT", "STYLE", "SUBMULTISET", "SUBPARTITION", "SUBSTITUTABLE", "SUBTYPE", "SUCCESSFUL", "SUM", "SYNONYM", "SYSDATE", "TABAUTH", "TABLE", "TDO", "THE", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_ABBR", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_REGION", "TO", "TRAILING", "TRANSACTION", "TRANSACTIONAL", "TRIGGER", "TRUE", "TRUSTED", "TYPE", "UB1", "UB2", "UB4", "UID", "UNDER", "UNIQUE", "UNPLUG", "UNSIGNED", "UNTRUSTED", "USE", "USER", "USING", "VALIDATE", "VALIST", "VALUE", "VARCHAR", "VARCHAR2", "VARIABLE", "VARIANCE", "VARRAY", "VARYING", "VIEW", "VIEWS", "VOID", "WHENEVER", "WHILE", "WITH", "WORK", "WRAPPED", "WRITE", "YEAR", "ZONE"], + O = ["ADD", "ALTER COLUMN", "ALTER TABLE", "BEGIN", "CONNECT BY", "DECLARE", "DELETE FROM", "DELETE", "END", "EXCEPT", "EXCEPTION", "FETCH FIRST", "FROM", "GROUP BY", "HAVING", "INSERT INTO", "INSERT", "INTERSECT", "LIMIT", "LOOP", "MODIFY", "ORDER BY", "SELECT", "SET CURRENT SCHEMA", "SET SCHEMA", "SET", "START WITH", "UNION ALL", "UNION", "UPDATE", "VALUES", "WHERE"], + i = ["AND", "CROSS APPLY", "CROSS JOIN", "ELSE", "END", "INNER JOIN", "JOIN", "LEFT JOIN", "LEFT OUTER JOIN", "OR", "OUTER APPLY", "OUTER JOIN", "RIGHT JOIN", "RIGHT OUTER JOIN", "WHEN", "XOR"], + S = void 0, + u = function() { + function e(E) { + (0, T["default"])(this, e), this.cfg = E + } + return e.prototype.format = function(e) { + return S || (S = new A["default"]({ + reservedWords: I, + reservedToplevelWords: O, + reservedNewlineWords: i, + stringTypes: ['""', "N''", "''", "``"], + openParens: ["(", "CASE"], + closeParens: [")", "END"], + indexedPlaceholderTypes: ["?"], + namedPlaceholderTypes: [":"], + lineCommentTypes: ["--"], + specialWordChars: ["_", "$", "#", ".", "@"] + })), new o["default"](this.cfg, S).format(e) + }, e + }(); + E["default"] = u, e.exports = E["default"] + }, function(e, E, t) { + "use strict"; + + function n(e) { + return e && e.__esModule ? e : { + "default": e + } + } + E.__esModule = !0; + var r = t(1), + T = n(r), + R = t(4), + o = n(R), + N = t(5), + A = n(N), + I = ["ACCESSIBLE", "ACTION", "AGAINST", "AGGREGATE", "ALGORITHM", "ALL", "ALTER", "ANALYSE", "ANALYZE", "AS", "ASC", "AUTOCOMMIT", "AUTO_INCREMENT", "BACKUP", "BEGIN", "BETWEEN", "BINLOG", "BOTH", "CASCADE", "CASE", "CHANGE", "CHANGED", "CHARACTER SET", "CHARSET", "CHECK", "CHECKSUM", "COLLATE", "COLLATION", "COLUMN", "COLUMNS", "COMMENT", "COMMIT", "COMMITTED", "COMPRESSED", "CONCURRENT", "CONSTRAINT", "CONTAINS", "CONVERT", "CREATE", "CROSS", "CURRENT_TIMESTAMP", "DATABASE", "DATABASES", "DAY", "DAY_HOUR", "DAY_MINUTE", "DAY_SECOND", "DEFAULT", "DEFINER", "DELAYED", "DELETE", "DESC", "DESCRIBE", "DETERMINISTIC", "DISTINCT", "DISTINCTROW", "DIV", "DO", "DROP", "DUMPFILE", "DUPLICATE", "DYNAMIC", "ELSE", "ENCLOSED", "END", "ENGINE", "ENGINES", "ENGINE_TYPE", "ESCAPE", "ESCAPED", "EVENTS", "EXEC", "EXECUTE", "EXISTS", "EXPLAIN", "EXTENDED", "FAST", "FETCH", "FIELDS", "FILE", "FIRST", "FIXED", "FLUSH", "FOR", "FORCE", "FOREIGN", "FULL", "FULLTEXT", "FUNCTION", "GLOBAL", "GRANT", "GRANTS", "GROUP_CONCAT", "HEAP", "HIGH_PRIORITY", "HOSTS", "HOUR", "HOUR_MINUTE", "HOUR_SECOND", "IDENTIFIED", "IF", "IFNULL", "IGNORE", "IN", "INDEX", "INDEXES", "INFILE", "INSERT", "INSERT_ID", "INSERT_METHOD", "INTERVAL", "INTO", "INVOKER", "IS", "ISOLATION", "KEY", "KEYS", "KILL", "LAST_INSERT_ID", "LEADING", "LEVEL", "LIKE", "LINEAR", "LINES", "LOAD", "LOCAL", "LOCK", "LOCKS", "LOGS", "LOW_PRIORITY", "MARIA", "MASTER", "MASTER_CONNECT_RETRY", "MASTER_HOST", "MASTER_LOG_FILE", "MATCH", "MAX_CONNECTIONS_PER_HOUR", "MAX_QUERIES_PER_HOUR", "MAX_ROWS", "MAX_UPDATES_PER_HOUR", "MAX_USER_CONNECTIONS", "MEDIUM", "MERGE", "MINUTE", "MINUTE_SECOND", "MIN_ROWS", "MODE", "MODIFY", "MONTH", "MRG_MYISAM", "MYISAM", "NAMES", "NATURAL", "NOT", "NOW()", "NULL", "OFFSET", "ON DELETE", "ON UPDATE", "ON", "ONLY", "OPEN", "OPTIMIZE", "OPTION", "OPTIONALLY", "OUTFILE", "PACK_KEYS", "PAGE", "PARTIAL", "PARTITION", "PARTITIONS", "PASSWORD", "PRIMARY", "PRIVILEGES", "PROCEDURE", "PROCESS", "PROCESSLIST", "PURGE", "QUICK", "RAID0", "RAID_CHUNKS", "RAID_CHUNKSIZE", "RAID_TYPE", "RANGE", "READ", "READ_ONLY", "READ_WRITE", "REFERENCES", "REGEXP", "RELOAD", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "REPLICATION", "RESET", "RESTORE", "RESTRICT", "RETURN", "RETURNS", "REVOKE", "RLIKE", "ROLLBACK", "ROW", "ROWS", "ROW_FORMAT", "SECOND", "SECURITY", "SEPARATOR", "SERIALIZABLE", "SESSION", "SHARE", "SHOW", "SHUTDOWN", "SLAVE", "SONAME", "SOUNDS", "SQL", "SQL_AUTO_IS_NULL", "SQL_BIG_RESULT", "SQL_BIG_SELECTS", "SQL_BIG_TABLES", "SQL_BUFFER_RESULT", "SQL_CACHE", "SQL_CALC_FOUND_ROWS", "SQL_LOG_BIN", "SQL_LOG_OFF", "SQL_LOG_UPDATE", "SQL_LOW_PRIORITY_UPDATES", "SQL_MAX_JOIN_SIZE", "SQL_NO_CACHE", "SQL_QUOTE_SHOW_CREATE", "SQL_SAFE_UPDATES", "SQL_SELECT_LIMIT", "SQL_SLAVE_SKIP_COUNTER", "SQL_SMALL_RESULT", "SQL_WARNINGS", "START", "STARTING", "STATUS", "STOP", "STORAGE", "STRAIGHT_JOIN", "STRING", "STRIPED", "SUPER", "TABLE", "TABLES", "TEMPORARY", "TERMINATED", "THEN", "TO", "TRAILING", "TRANSACTIONAL", "TRUE", "TRUNCATE", "TYPE", "TYPES", "UNCOMMITTED", "UNIQUE", "UNLOCK", "UNSIGNED", "USAGE", "USE", "USING", "VARIABLES", "VIEW", "WHEN", "WITH", "WORK", "WRITE", "YEAR_MONTH"], + O = ["ADD", "AFTER", "ALTER COLUMN", "ALTER TABLE", "DELETE FROM", "EXCEPT", "FETCH FIRST", "FROM", "GROUP BY", "GO", "HAVING", "INSERT INTO", "INSERT", "INTERSECT", "LIMIT", "MODIFY", "ORDER BY", "SELECT", "SET CURRENT SCHEMA", "SET SCHEMA", "SET", "UNION ALL", "UNION", "UPDATE", "VALUES", "WHERE"], + i = ["AND", "CROSS APPLY", "CROSS JOIN", "ELSE", "INNER JOIN", "JOIN", "LEFT JOIN", "LEFT OUTER JOIN", "OR", "OUTER APPLY", "OUTER JOIN", "RIGHT JOIN", "RIGHT OUTER JOIN", "WHEN", "XOR"], + S = void 0, + u = function() { + function e(E) { + (0, T["default"])(this, e), this.cfg = E + } + return e.prototype.format = function(e) { + return S || (S = new A["default"]({ + reservedWords: I, + reservedToplevelWords: O, + reservedNewlineWords: i, + stringTypes: ['""', "N''", "''", "``", "[]"], + openParens: ["(", "CASE"], + closeParens: [")", "END"], + indexedPlaceholderTypes: ["?"], + namedPlaceholderTypes: ["@", ":"], + lineCommentTypes: ["#", "--"] + })), new o["default"](this.cfg, S).format(e) + }, e + }(); + E["default"] = u, e.exports = E["default"] + }, function(e, E, t) { + var n = t(3), + r = t(2), + T = n(r, "DataView"); + e.exports = T + }, function(e, E, t) { + var n = t(3), + r = t(2), + T = n(r, "Map"); + e.exports = T + }, function(e, E, t) { + var n = t(3), + r = t(2), + T = n(r, "Promise"); + e.exports = T + }, function(e, E, t) { + var n = t(3), + r = t(2), + T = n(r, "Set"); + e.exports = T + }, function(e, E, t) { + var n = t(2), + r = n.Symbol; + e.exports = r + }, function(e, E, t) { + var n = t(3), + r = t(2), + T = n(r, "WeakMap"); + e.exports = T + }, function(e, E) { + function t(e) { + return e.split("") + } + e.exports = t + }, function(e, E) { + function t(e, E, t, n) { + for (var r = e.length, T = t + (n ? 1 : -1); n ? T-- : ++T < r;) + if (E(e[T], T, e)) return T; + return -1 + } + e.exports = t + }, function(e, E) { + function t(e) { + return r.call(e) + } + var n = Object.prototype, + r = n.toString; + e.exports = t + }, function(e, E, t) { + function n(e, E, t) { + return E === E ? R(e, E, t) : r(e, T, t) + } + var r = t(29), + T = t(32), + R = t(49); + e.exports = n + }, function(e, E) { + function t(e) { + return e !== e + } + e.exports = t + }, function(e, E, t) { + function n(e) { + if (!R(e) || T(e)) return !1; + var E = r(e) ? u : A; + return E.test(o(e)) + } + var r = t(12), + T = t(45), + R = t(6), + o = t(11), + N = /[\\^$.*+?()[\]{}|]/g, + A = /^\[object .+?Constructor\]$/, + I = Function.prototype, + O = Object.prototype, + i = I.toString, + S = O.hasOwnProperty, + u = RegExp("^" + i.call(S).replace(N, "\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, "$1.*?") + "$"); + e.exports = n + }, function(e, E) { + function t(e, E) { + var t = ""; + if (!e || 1 > E || E > n) return t; + do E % 2 && (t += e), E = r(E / 2), E && (e += e); while (E); + return t + } + var n = 9007199254740991, + r = Math.floor; + e.exports = t + }, function(e, E) { + function t(e, E, t) { + var n = -1, + r = e.length; + 0 > E && (E = -E > r ? 0 : r + E), t = t > r ? r : t, 0 > t && (t += r), r = E > t ? 0 : t - E >>> 0, E >>>= 0; + for (var T = Array(r); ++n < r;) T[n] = e[n + E]; + return T + } + e.exports = t + }, function(e, E, t) { + function n(e, E, t) { + var n = e.length; + return t = void 0 === t ? n : t, E || n > t ? r(e, E, t) : e + } + var r = t(35); + e.exports = n + }, function(e, E, t) { + function n(e, E) { + for (var t = e.length; t-- && r(E, e[t], 0) > -1;); + return t + } + var r = t(31); + e.exports = n + }, function(e, E, t) { + var n = t(2), + r = n["__core-js_shared__"]; + e.exports = r + }, function(e, E) { + (function(E) { + var t = "object" == typeof E && E && E.Object === Object && E; + e.exports = t + }).call(E, function() { + return this + }()) + }, function(e, E, t) { + var n = t(22), + r = t(23), + T = t(24), + R = t(25), + o = t(27), + N = t(30), + A = t(11), + I = "[object Map]", + O = "[object Object]", + i = "[object Promise]", + S = "[object Set]", + u = "[object WeakMap]", + L = "[object DataView]", + C = Object.prototype, + s = C.toString, + a = A(n), + f = A(r), + c = A(T), + p = A(R), + l = A(o), + D = N; + (n && D(new n(new ArrayBuffer(1))) != L || r && D(new r) != I || T && D(T.resolve()) != i || R && D(new R) != S || o && D(new o) != u) && (D = function(e) { + var E = s.call(e), + t = E == O ? e.constructor : void 0, + n = t ? A(t) : void 0; + if (n) switch (n) { + case a: + return L; + case f: + return I; + case c: + return i; + case p: + return S; + case l: + return u + } + return E + }), e.exports = D + }, function(e, E) { + function t(e, E) { + return null == e ? void 0 : e[E] + } + e.exports = t + }, function(e, E) { + function t(e) { + return N.test(e) + } + var n = "\\ud800-\\udfff", + r = "\\u0300-\\u036f\\ufe20-\\ufe23", + T = "\\u20d0-\\u20f0", + R = "\\ufe0e\\ufe0f", + o = "\\u200d", + N = RegExp("[" + o + n + r + T + R + "]"); + e.exports = t + }, function(e, E) { + function t(e, E) { + return E = null == E ? n : E, !!E && ("number" == typeof e || r.test(e)) && e > -1 && e % 1 == 0 && E > e + } + var n = 9007199254740991, + r = /^(?:0|[1-9]\d*)$/; + e.exports = t + }, function(e, E, t) { + function n(e, E, t) { + if (!o(t)) return !1; + var n = typeof E; + return !!("number" == n ? T(t) && R(E, t.length) : "string" == n && E in t) && r(t[E], e) + } + var r = t(52), + T = t(8), + R = t(43), + o = t(6); + e.exports = n + }, function(e, E, t) { + function n(e) { + return !!T && T in e + } + var r = t(38), + T = function() { + var e = /[^.]+$/.exec(r && r.keys && r.keys.IE_PROTO || ""); + return e ? "Symbol(src)_1." + e : "" + }(); + e.exports = n + }, function(e, E) { + function t(e) { + var E = e && e.constructor, + t = "function" == typeof E && E.prototype || n; + return e === t + } + var n = Object.prototype; + e.exports = t + }, function(e, E, t) { + var n = t(48), + r = n(Object.keys, Object); + e.exports = r + }, function(e, E) { + function t(e, E) { + return function(t) { + return e(E(t)) + } + } + e.exports = t + }, function(e, E) { + function t(e, E, t) { + for (var n = t - 1, r = e.length; ++n < r;) + if (e[n] === E) return n; + return -1 + } + e.exports = t + }, function(e, E, t) { + function n(e) { + return T(e) ? R(e) : r(e) + } + var r = t(28), + T = t(42), + R = t(51); + e.exports = n + }, function(e, E) { + function t(e) { + return e.match(c) || [] + } + var n = "\\ud800-\\udfff", + r = "\\u0300-\\u036f\\ufe20-\\ufe23", + T = "\\u20d0-\\u20f0", + R = "\\ufe0e\\ufe0f", + o = "[" + n + "]", + N = "[" + r + T + "]", + A = "\\ud83c[\\udffb-\\udfff]", + I = "(?:" + N + "|" + A + ")", + O = "[^" + n + "]", + i = "(?:\\ud83c[\\udde6-\\uddff]){2}", + S = "[\\ud800-\\udbff][\\udc00-\\udfff]", + u = "\\u200d", + L = I + "?", + C = "[" + R + "]?", + s = "(?:" + u + "(?:" + [O, i, S].join("|") + ")" + C + L + ")*", + a = C + L + s, + f = "(?:" + [O + N + "?", N, i, S, o].join("|") + ")", + c = RegExp(A + "(?=" + A + ")|" + f + a, "g"); + e.exports = t + }, function(e, E) { + function t(e, E) { + return e === E || e !== e && E !== E + } + e.exports = t + }, function(e, E, t) { + function n(e) { + return e = r(e), e && R.test(e) ? e.replace(T, "\\$&") : e + } + var r = t(9), + T = /[\\^$.*+?()[\]{}|]/g, + R = RegExp(T.source); + e.exports = n + }, function(e, E, t) { + function n(e) { + return r(e) && o.call(e, "callee") && (!A.call(e, "callee") || N.call(e) == T) + } + var r = t(56), + T = "[object Arguments]", + R = Object.prototype, + o = R.hasOwnProperty, + N = R.toString, + A = R.propertyIsEnumerable; + e.exports = n + }, function(e, E) { + var t = Array.isArray; + e.exports = t + }, function(e, E, t) { + function n(e) { + return T(e) && r(e) + } + var r = t(8), + T = t(13); + e.exports = n + }, function(e, E, t) { + (function(e) { + var n = t(2), + r = t(62), + T = "object" == typeof E && E && !E.nodeType && E, + R = T && "object" == typeof e && e && !e.nodeType && e, + o = R && R.exports === T, + N = o ? n.Buffer : void 0, + A = N ? N.isBuffer : void 0, + I = A || r; + e.exports = I + }).call(E, t(67)(e)) + }, function(e, E, t) { + function n(e) { + if (o(e) && (R(e) || "string" == typeof e || "function" == typeof e.splice || N(e) || T(e))) return !e.length; + var E = r(e); + if (E == O || E == i) return !e.size; + if (A(e)) return !I(e).length; + for (var t in e) + if (u.call(e, t)) return !1; + return !0 + } + var r = t(40), + T = t(54), + R = t(55), + o = t(8), + N = t(57), + A = t(46), + I = t(47), + O = "[object Map]", + i = "[object Set]", + S = Object.prototype, + u = S.hasOwnProperty; + e.exports = n + }, function(e, E) { + function t(e) { + return "number" == typeof e && e > -1 && e % 1 == 0 && n >= e + } + var n = 9007199254740991; + e.exports = t + }, function(e, E) { + function t(e) { + var E = e ? e.length : 0; + return E ? e[E - 1] : void 0 + } + e.exports = t + }, function(e, E, t) { + function n(e, E, t) { + return E = (t ? T(e, E, t) : void 0 === E) ? 1 : R(E), r(o(e), E) + } + var r = t(34), + T = t(44), + R = t(64), + o = t(9); + e.exports = n + }, function(e, E) { + function t() { + return !1 + } + e.exports = t + }, function(e, E, t) { + function n(e) { + if (!e) return 0 === e ? e : 0; + if (e = r(e), e === T || e === -T) { + var E = 0 > e ? -1 : 1; + return E * R + } + return e === e ? e : 0 + } + var r = t(65), + T = 1 / 0, + R = 1.7976931348623157e308; + e.exports = n + }, function(e, E, t) { + function n(e) { + var E = r(e), + t = E % 1; + return E === E ? t ? E - t : E : 0 + } + var r = t(63); + e.exports = n + }, function(e, E, t) { + function n(e) { + if ("number" == typeof e) return e; + if (T(e)) return R; + if (r(e)) { + var E = "function" == typeof e.valueOf ? e.valueOf() : e; + e = r(E) ? E + "" : E + } + if ("string" != typeof e) return 0 === e ? e : +e; + e = e.replace(o, ""); + var t = A.test(e); + return t || I.test(e) ? O(e.slice(2), t ? 2 : 8) : N.test(e) ? R : +e + } + var r = t(6), + T = t(14), + R = NaN, + o = /^\s+|\s+$/g, + N = /^[-+]0x[0-9a-f]+$/i, + A = /^0b[01]+$/i, + I = /^0o[0-7]+$/i, + O = parseInt; + e.exports = n + }, function(e, E, t) { + function n(e, E, t) { + if (e = N(e), e && (t || void 0 === E)) return e.replace(A, ""); + if (!e || !(E = r(E))) return e; + var n = o(e), + I = R(n, o(E)) + 1; + return T(n, 0, I).join("") + } + var r = t(10), + T = t(36), + R = t(37), + o = t(50), + N = t(9), + A = /\s+$/; + e.exports = n + }, function(e, E) { + e.exports = function(e) { + return e.webpackPolyfill || (e.deprecate = function() {}, e.paths = [], e.children = [], e.webpackPolyfill = 1), e + } + }]) +}); + +function escape2Html(str) { + var arrEntities = { + 'lt': '<', + 'gt': '>', + 'nbsp': '', + 'amp': '&', + 'quot': '"' + }; + return str.replace(/&(lt|gt|nbsp|amp|quot);/ig, function(all, t) { + return arrEntities[t]; + }); +} + +function load() { + let codeList = document.getElementsByTagName('code'); + + for (let i = 0; i < codeList.length; i++) { + codeList[i].innerHTML = window.sqlFormatter.format(escape2Html(codeList[i].innerHTML)) + } +}; diff --git a/doc/report_type.md b/doc/report_type.md new file mode 100644 index 00000000..86c81bb6 --- /dev/null +++ b/doc/report_type.md @@ -0,0 +1,133 @@ +# 支持的报告类型 + +[toc] + +## lint +* **Description**:参考sqlint格式,以插件形式集成到代码编辑器,显示输出更加友好 + +* **Example**: + +```bash +soar -report-type lint -query test.sql +``` +## markdown +* **Description**:该格式为默认输出格式,以markdown格式展现,可以用网页浏览器插件直接打开,也可以用markdown编辑器打开 + +* **Example**: + +```bash +echo "select * from film" | soar +``` +## rewrite +* **Description**:SQL重写功能,配合-rewrite-rules参数一起使用,可以通过-list-rewrite-rules查看所有支持的SQL重写规则 + +* **Example**: + +```bash +echo "select * from film" | soar -rewrite-rules star2columns,delimiter -report-type rewrite +``` +## ast +* **Description**:输出SQL的抽象语法树,主要用于测试 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type ast +``` +## tiast +* **Description**:输出SQL的TiDB抽象语法树,主要用于测试 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type tiast +``` +## fingerprint +* **Description**:输出SQL的指纹 + +* **Example**: + +```bash +echo "select * from film where language_id=1" | soar -report-type fingerprint +``` +## md2html +* **Description**:markdown格式转html格式小工具 + +* **Example**: + +```bash +soar -list-heuristic-rules | soar -report-type md2html > heuristic_rules.html +``` +## explain-digest +* **Description**:输入为EXPLAIN的表格,JSON或Vertical格式,对其进行分析,给出分析结果 + +* **Example**: + +```bash +soar -report-type explain-digest << EOF ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +| 1 | SIMPLE | film | ALL | NULL | NULL | NULL | NULL | 1131 | | ++----+-------------+-------+------+---------------+------+---------+------+------+-------+ +EOF +``` +## duplicate-key-checker +* **Description**:对OnlineDsn中指定的DB进行索引重复检查 + +* **Example**: + +```bash +soar -report-type duplicate-key-checker -online-dsn user:passwd@127.0.0.1:3306/db +``` +## html +* **Description**:以HTML格式输出报表 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type html +``` +## json +* **Description**:输出JSON格式报表,方便应用程序处理 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type json +``` +## tokenize +* **Description**:对SQL进行切词,主要用于测试 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type tokenize +``` +## compress +* **Description**:SQL压缩小工具,使用内置SQL压缩逻辑,测试中的功能 + +* **Example**: + +```bash +echo "select +* +from + film" | soar -report-type compress +``` +## pretty +* **Description**:使用kr/pretty打印报告,主要用于测试 + +* **Example**: + +```bash +echo "select * from film" | soar -report-type pretty +``` +## remove-comment +* **Description**:去除SQL语句中的注释,支持单行多行注释的去除 + +* **Example**: + +```bash +echo "select/*comment*/ * from film" | soar -report-type remove-comment +``` diff --git a/doc/rewrite.md b/doc/rewrite.md new file mode 100644 index 00000000..68a821d5 --- /dev/null +++ b/doc/rewrite.md @@ -0,0 +1,272 @@ +# 重写规则 + +[toc] + +## dml2select +* **Description**:将数据库更新请求转换为只读查询请求,便于执行EXPLAIN + +* **Original**: + +```sql +DELETE FROM film WHERE length > 100 +``` + +* **Suggest**: + +```sql +select * from film where length > 100 +``` +## star2columns +* **Description**:为SELECT *补全表的列信息 + +* **Original**: + +```sql +SELECT * FROM film +``` + +* **Suggest**: + +```sql +select film.film_id, film.title from film +``` +## insertcolumns +* **Description**:为INSERT补全表的列信息 + +* **Original**: + +```sql +insert into film values(1,2,3,4,5) +``` + +* **Suggest**: + +```sql +insert into film(film_id, title, description, release_year, language_id) values (1, 2, 3, 4, 5) +``` +## having +* **Description**:将查询的HAVING子句改写为WHERE中的查询条件 + +* **Original**: + +```sql +SELECT state, COUNT(*) FROM Drivers GROUP BY state HAVING state IN ('GA', 'TX') ORDER BY state +``` + +* **Suggest**: + +```sql +select state, COUNT(*) from Drivers where state in ('GA', 'TX') group by state order by state asc +``` +## orderbynull +* **Description**:如果GROUP BY语句不指定ORDER BY条件会导致无谓的排序产生,如果不需要排序建议添加ORDER BY NULL + +* **Original**: + +```sql +SELECT sum(col1) FROM tbl GROUP BY col +``` + +* **Suggest**: + +```sql +select sum(col1) from tbl group by col order by null +``` +## unionall +* **Description**:可以接受重复的时间,使用UNION ALL替代UNION以提高查询效率 + +* **Original**: + +```sql +select country_id from city union select country_id from country +``` + +* **Suggest**: + +```sql +select country_id from city union all select country_id from country +``` +## or2in +* **Description**:将同一列不同条件的OR查询转写为IN查询 + +* **Original**: + +```sql +select country_id from city where col1 = 1 or (col2 = 1 or col2 = 2 ) or col1 = 3; +``` + +* **Suggest**: + +```sql +select country_id from city where (col2 in (1, 2)) or col1 in (1, 3); +``` +## dmlorderby +* **Description**:删除DML更新操作中无意义的ORDER BY + +* **Original**: + +```sql +DELETE FROM tbl WHERE col1=1 ORDER BY col +``` + +* **Suggest**: + +```sql +delete from tbl where col1 = 1 +``` +## distinctstar +* **Description**:DISTINCT *对有主键的表没有意义,可以将DISTINCT删掉 + +* **Original**: + +```sql +SELECT DISTINCT * FROM film; +``` + +* **Suggest**: + +```sql +SELECT * FROM film +``` +## standard +* **Description**:SQL标准化,如:关键字转换为小写 + +* **Original**: + +```sql +SELECT sum(col1) FROM tbl GROUP BY 1; +``` + +* **Suggest**: + +```sql +select sum(col1) from tbl group by 1 +``` +## mergealter +* **Description**:合并同一张表的多条ALTER语句 + +* **Original**: + +```sql +ALTER TABLE t2 DROP COLUMN c;ALTER TABLE t2 DROP COLUMN d; +``` + +* **Suggest**: + +```sql +ALTER TABLE t2 DROP COLUMN c, DROP COLUMN d; +``` +## alwaystrue +* **Description**:删除无用的恒真判断条件 + +* **Original**: + +```sql +SELECT count(col) FROM tbl where 'a'= 'a' or ('b' = 'b' and a = 'b'); +``` + +* **Suggest**: + +```sql +select count(col) from tbl where (a = 'b'); +``` +## countstar +* **Description**:不建议使用COUNT(col)或COUNT(常量),建议改写为COUNT(*) + +* **Original**: + +```sql +SELECT count(col) FROM tbl GROUP BY 1; +``` + +* **Suggest**: + +```sql +SELECT count(*) FROM tbl GROUP BY 1; +``` +## innodb +* **Description**:建表时建议使用InnoDB引擎,非InnoDB引擎表自动转InnoDB + +* **Original**: + +```sql +CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT); +``` + +* **Suggest**: + +```sql +create table t1 ( + id bigint(20) not null auto_increment +) ENGINE=InnoDB; +``` +## autoincrement +* **Description**:将autoincrement初始化为1 + +* **Original**: + +```sql +CREATE TABLE t1(id bigint(20) NOT NULL AUTO_INCREMENT) ENGINE=InnoDB AUTO_INCREMENT=123802; +``` + +* **Suggest**: + +```sql +create table t1(id bigint(20) not null auto_increment) ENGINE=InnoDB auto_increment=1; +``` +## intwidth +* **Description**:整型数据类型修改默认显示宽度 + +* **Original**: + +```sql +create table t1 (id int(20) not null auto_increment) ENGINE=InnoDB; +``` + +* **Suggest**: + +```sql +create table t1 (id int(10) not null auto_increment) ENGINE=InnoDB; +``` +## truncate +* **Description**:不带WHERE条件的DELETE操作建议修改为TRUNCATE + +* **Original**: + +```sql +DELETE FROM tbl +``` + +* **Suggest**: + +```sql +truncate table tbl +``` +## rmparenthesis +* **Description**:去除没有意义的括号 + +* **Original**: + +```sql +select col from table where (col = 1); +``` + +* **Suggest**: + +```sql +select col from table where col = 1; +``` +## delimiter +* **Description**:补全DELIMITER + +* **Original**: + +```sql +use sakila +``` + +* **Suggest**: + +```sql +use sakila; +``` diff --git a/doc/roadmap.md b/doc/roadmap.md new file mode 100644 index 00000000..0390824b --- /dev/null +++ b/doc/roadmap.md @@ -0,0 +1,9 @@ +## 路线图 + +* 语法支持方面,目前主要依赖vitess,TiDB对SQL语法的支持。 +* 目前仅针对MySQL语法族进行开发和测试,其他使用SQL的数据库产品暂不支持。 +* Profiling和Trace功能有待深入挖掘,供经验丰富的DBA分析使用。 +* 目前尚不支持直接线上自动执行评审通过的SQL,后续会努力支持。 +* 由于暂不支持线上自动执行,因此数据备份功能也未提供。 +* Vim, Sublime, Emacs等编辑器插件支持。 +* Currently, only support Chinese suggestion, if you can help us add multi-language support, it will be greatly appreciated. diff --git a/doc/structure.md b/doc/structure.md new file mode 100644 index 00000000..02be0bf9 --- /dev/null +++ b/doc/structure.md @@ -0,0 +1,51 @@ + +# 体系架构 + +![架构图](http://github.com/XiaoMi/soar/raw/master/doc/images/structure.png) + +SOAR主要由语法解析器,集成环境,优化建议,重写逻辑,工具集五大模块组成。下面将对每个模块的作用及设计实现进行简述,更详细的算法及逻辑会在各个独立章节中详细讲解。 + +## 语法解析和语法检查 + +一条SQL从文件,标准输入或命令行参数等形式传递给SOAR后首先进入语法解析器,这里一开始我们选用了vitess的语法解析库作为SOAR的语法解析库,但随时需求的不断增加我们发现有些复杂需求使用vitess的语法解析实现起来比较逻辑比较复杂。于是参考业办其他数据库产品,我们引入了TiDB的语法解析器做为补充。我们发现这两个解析库还存在一定的盲区,于是又引入了MySQL执行返回结果作为多多版本SQL方言的补充。大家也可以看到在语法解析器这里,SOAR的实现方案是松散的、可插拔的。SOAR并不直接维护庞大的语法解析库,它把各种优秀的语法解析库集成在一起,各取所长。 + +## 集成环境 + +集成环境区分`线上环境`和`测试环境`两种,分别用于解决不同场景下用户的SQL优化需求。一种常见的情况是已有表结构需要优化查询SQL的场景,可以从线上环境导出表结构和足够的采样数据到测试环境,在测试环境上就可以放心的执行各种高危操作而不用担心数据被损坏。另一种常见的情况是建一套全新的数据库,需要验证提供的数据字典中是否存在优化的可能。对于这种情况,很有可能你不需要知道线上环境在哪儿,完全只是想先试试看,如果报错了马上改对就是了。当然还有更多种组合的场景需求,将在[集成环境](http://github.com/XiaoMi/soar/raw/master/doc/enviorment.md)一单分类说明。 + +## 优化建议 + +目前SOAR可以提供的优化建议有基于启发式规则(通常也称之为经验)的优化建议,基于索引优化算法给出的索引优化建议,以及基于EXPLAIN信息给出的解读。 + +### 启发式规则建议 + +下面这段代码是启发式规则的的元数据结构,它由规则代号,危险等级,规则摘要,规则解释,SQL示例,建议位置,规则函数等7部分组成。每一条SQL经过语法解析后会经过数百个启发式规则的逐一检查,命中了的规则将会保存在一个叫heuristicSuggest的变量中传递下去,与其他优化建议合并输出。这里最核心的部分,也是代码最多的部分在heuristic.go,里面包含了所有的启发式规则实现的函数。所有的启发式规则列表保存在rules.go文件中。 + +```Golang +// Rule 评审规则元数据结构 +type Rule struct { + Item string `json:"Item"` // 规则代号 + Severity string `json:"Severity"` // 危险等级:L[0-8], 数字越大表示级别越高 + Summary string `json:"Summary"` // 规则摘要 + Content string `json:"Content"` // 规则解释 + Case string `json:"Case"` // SQL示例 + Position int `json:"Position"` // 建议所处SQL字符位置,默认0表示全局建议 + Func func(*Query4Audit) Rule `json:"-"` // 函数名 +} +``` + +### 索引优化 + +关于索引优化,数据库经过几十年的发展,DBA沉淀了很多宝贵的经验,怎样把这些感性的经验转化为覆盖全面、逻辑可推导的算法是这种模块最大的挑战。很幸运的是SOAR并不是第一个尝试做这类算法整理的产品,有很多前人的著作、论文、博客等的知识储备。毫不夸张的说,为了写成这个模块我们读了不下5百万字的著作和论文,还不包括网络上各种大神的博客,这些老师们的知识结晶收集整理在[鸣谢](http://github.com/XiaoMi/soar/raw/master/doc/thanks.md)章节。使用到的算法在[索引优化](http://github.com/XiaoMi/soar/raw/master/doc/indexing.md)章节有详细的描述,虽然在某些算法理解上可能还存在一定争议,很希望与同行们共同讨论,共同进步,不断完善SOAR的算法。 + +### EXPLAIN解读 + +做过SQL优化的人对EXPLAIN应该都不陌生,但对于新手来说要记住每一个列代表什么含义,每个关键字背后的奥秘是什么需要足够的脑容量来记忆才行。统计了一下SOAR只在EXPLAIN信息的注解一项差不多写了200行代码,按平均行长度120计算,算下来一个DBA要精通EXPLAIN优化就要记住不下2万字的文档。SOAR能帮每为DBA节约了这部分脑容量。不过关于EXPLAIN解读还远不止这些,想了解更多可以参考[EXPLAIN信息解读](http://github.com/XiaoMi/soar/raw/master/doc/explain.md)章节。 + +## 重写逻辑 + +上面提到的优化建议是我们早期实现的主要功能,早期的功能还只是停留在建议上,对于一些初级用户看到建议也不一定会改写。为了进一步简化SQL优化的成本,SOAR又进一步挖掘了自动SQL重写的功能。现在提供几十种常见场景下的SQL等价转写,不过相比SQL优化建议还有很大的改进空间。这部分的功能和逻辑将在[重写逻辑](http://github.com/XiaoMi/soar/raw/master/doc/rewrite.md)一章中详细说明。 + +## 工具集 + +除了SQL优化和改写以外,为了方便用户使用以及美化输出展现形式,SOAR还提供了一些辅助的小工具,比如markdown转HTML工具,SQL格式化输出工具等等。你可以在[常用命令](http://github.com/XiaoMi/soar/raw/master/doc/cheatsheet.md)中找到这些小工具的使用方法。 diff --git a/doc/thanks.md b/doc/thanks.md new file mode 100644 index 00000000..a02440db --- /dev/null +++ b/doc/thanks.md @@ -0,0 +1,39 @@ +## 鸣谢 + +以下为SOAR的灵感及代码来源,我们站在伟人的肩膀上,让DBA的工作和生活更美好。 + +* [vitess](https://github.com/vitessio/vitess) +* [SQLAdvisor](https://github.com/Meituan-Dianping/SQLAdvisor) +* [pt-query-advisor](https://www.percona.com/doc/percona-toolkit/2.1/pt-query-advisor.html) +* [sqlcheck](https://github.com/jarulraj/sqlcheck) +* [pg_idx_advisor](https://github.com/cohenjo/pg_idx_advisor) +* [mysql-xplain-xplain](https://github.com/rap2hpoutre/mysql-xplain-xplain) +* [explain-analyzer](https://github.com/Preetam/explain-analyzer) +* [Explain](https://github.com/goghcrow/explain/blob/master/Explain.php) +* [sql-beautify](https://github.com/jkramer/sql-beautify) +* [go-mysql](https://github.com/percona/go-mysql) +* [pretty](https://github.com/kr/pretty) +* [golang_escape](https://github.com/liule/golang_escape) +* [mymysql](https://github.com/ziutek/mymysql) +* [beego/logs](https://github.com/astaxie/beego/logs) +* [uniuri](https://github.com/dchest/uniuri) +* [gjson](https://github.com/tidwall/gjson) + +## 参考博文 + +* [MySQL Reference Manual Chapter 8 Optimization](https://dev.mysql.com/doc/refman/8.0/en/optimization.html) +* [Indexing 101: Optimizing MySQL queries on a single table](https://www.percona.com/blog/2015/04/27/indexing-101-optimizing-mysql-queries-on-a-single-table/) +* [MySQL: Building the best INDEX for a given SELECT](http://mysql.rjweb.org/doc.php/index_cookbook_mysql) +* [MySQL INDEX Cookbook](http://mysql.rjweb.org/slides/cook.pdf) +* [Random Sampling for Histogram Construction: How much is enough?](http://www.mathcs.emory.edu/~cheung/papers/StreamDB/Histogram/1998-Chaudhuri-Histo.pdf) +* [10 Cool SQL Optimisations That do not Depend on the Cost Model](https://blog.jooq.org/2017/09/28/10-cool-sql-optimisations-that-do-not-depend-on-the-cost-model/) + +## 参考书目 + +* 《高性能MySQL》/《High Performance MySQL》 +* 《数据库索引设计与优化》/《Relational Database Index Design and the Optimizers》 +* 《数据库系统概论》/《Database System Concepts》 +* 《SQL反模式》/《SQL Antipatterns》 +* 《数据库查询优化器的艺术》/《The Art of Database Query Optimizer》 +* 《SQL优化最佳实践》/《SQL Optimization Best Practice》 +* 《SQL编程风格》/《Sql Programming Style》 diff --git a/doc/thanks_en.md b/doc/thanks_en.md new file mode 100644 index 00000000..06ae1255 --- /dev/null +++ b/doc/thanks_en.md @@ -0,0 +1,39 @@ +## Thanks + +以下为SOAR的灵感及代码来源,我们站在伟人的肩膀上,让DBA的工作和生活更美好。 + +* [vitess](https://github.com/vitessio/vitess) +* [SQLAdvisor](https://github.com/Meituan-Dianping/SQLAdvisor) +* [pt-query-advisor](https://www.percona.com/doc/percona-toolkit/2.1/pt-query-advisor.html) +* [sqlcheck](https://github.com/jarulraj/sqlcheck) +* [pg_idx_advisor](https://github.com/cohenjo/pg_idx_advisor) +* [mysql-xplain-xplain](https://github.com/rap2hpoutre/mysql-xplain-xplain) +* [explain-analyzer](https://github.com/Preetam/explain-analyzer) +* [Explain](https://github.com/goghcrow/explain/blob/master/Explain.php) +* [sql-beautify](https://github.com/jkramer/sql-beautify) +* [go-mysql](https://github.com/percona/go-mysql) +* [pretty](https://github.com/kr/pretty) +* [golang_escape](https://github.com/liule/golang_escape) +* [mymysql](https://github.com/ziutek/mymysql) +* [beego/logs](https://github.com/astaxie/beego/logs) +* [uniuri](https://github.com/dchest/uniuri) +* [gjson](https://github.com/tidwall/gjson) + +## Reference Articles + +* [MySQL Reference Manual Chapter 8 Optimization](https://dev.mysql.com/doc/refman/8.0/en/optimization.html) +* [Indexing 101: Optimizing MySQL queries on a single table](https://www.percona.com/blog/2015/04/27/indexing-101-optimizing-mysql-queries-on-a-single-table/) +* [MySQL: Building the best INDEX for a given SELECT](http://mysql.rjweb.org/doc.php/index_cookbook_mysql) +* [MySQL INDEX Cookbook](http://mysql.rjweb.org/slides/cook.pdf) +* [Random Sampling for Histogram Construction: How much is enough?](http://www.mathcs.emory.edu/~cheung/papers/StreamDB/Histogram/1998-Chaudhuri-Histo.pdf) +* [10 Cool SQL Optimisations That do not Depend on the Cost Model](https://blog.jooq.org/2017/09/28/10-cool-sql-optimisations-that-do-not-depend-on-the-cost-model/) + +## Books + +* 《高性能MySQL》/《High Performance MySQL》 +* 《数据库索引设计与优化》/《Relational Database Index Design and the Optimizers》 +* 《数据库系统概论》/《Database System Concepts》 +* 《SQL反模式》/《SQL Antipatterns》 +* 《数据库查询优化器的艺术》/《The Art of Database Query Optimizer》 +* 《SQL优化最佳实践》/《SQL Optimization Best Practice》 +* 《SQL编程风格》/《Sql Programming Style》 diff --git a/doc/themes/foghorn.css b/doc/themes/foghorn.css new file mode 100644 index 00000000..f8945572 --- /dev/null +++ b/doc/themes/foghorn.css @@ -0,0 +1,141 @@ + +html, body { + padding:1em; + margin:auto; + max-width:42em; + background:#fefefe; + } +body { + font: 1.3em "Vollkorn", Palatino, Times; + color: #333; + line-height: 1.4; + text-align: justify; + } +header, nav, article, footer { + width: 700px; + margin:0 auto; + } +article { + margin-top: 4em; + margin-bottom: 4em; + min-height: 400px; + } +footer { + margin-bottom:50px; + } +video { + margin: 2em 0; + border:1px solid #ddd; + } + +nav { + font-size: .9em; + font-style: italic; + border-bottom: 1px solid #ddd; + padding: 1em 0; + } +nav p { + margin: 0; + } + +/* Typography +-------------------------------------------------------- */ + +h1 { + margin-top: 0; + font-weight: normal; + } +h2 { + font-weight: normal; + } +h3 { + font-weight: normal; + font-style: italic; + margin-top:3em; + } +p { + margin-top:0; + -webkit-hypens:auto; + -moz-hypens:auto; + hyphens:auto; + } +ul { + list-style: square; + padding-left:1.2em; + } +ol { + padding-left:1.2em; + } +blockquote { + margin-left: 1em; + padding-left: 1em; + border-left: 1px solid #ddd; + } +code { + font-family: "Consolas", "Menlo", "Monaco", monospace, serif; + font-size: .9em; + background: white; + } +a { + color: #2484c1; + text-decoration: none; + } +a:hover { + text-decoration: underline; + } +a img { + border:none; + } +h1 a, h1 a:hover { + color: #333; + text-decoration: none; + } +hr { + color : #ddd; + height : 1px; + margin: 2em 0; + border-top : solid 1px #ddd; + border-bottom : none; + border-left: 0; + border-right: 0; + } +p#heart{ + font-size: 2em; + line-height: 1; + text-align: center; + color: #ccc; + } +.red { + color:#B50000; + } + +/* Home Page +--------------------------- */ + +body#index li { + margin-bottom: 1em; + } + + +/* iPad +-------------------------------------------------------- */ +@media only screen and (max-device-width: 1024px) { +body { + font-size: 120%; + line-height: 1.4; + } +} /* @iPad */ + +/* iPhone +-------------------------------------------------------- */ +@media only screen and (max-device-width: 480px) { +body { + text-align: left; + } +article, footer { + width: auto; + } +article { + padding: 0 10px; + } +} /* @iPhone */ diff --git a/doc/themes/ghostwriter.css b/doc/themes/ghostwriter.css new file mode 100644 index 00000000..0e41dffe --- /dev/null +++ b/doc/themes/ghostwriter.css @@ -0,0 +1,413 @@ +/* ============================================================ */ +/* Base */ +/* ============================================================ */ +html, body { + height: 100%; +} + +body { + background: #fefefe; + color: #424242; + font-family: "Open Sans", arial, sans-serif; + font-size: 18px; +} + +h1, h2, h3, h4, h5, h6 { + margin-bottom: 33px; + text-transform: none; +} + +h1 { + font-size: 26px; +} + +h2 { + font-size: 24px; +} + +h3 { + font-size: 20px; + margin-bottom: 20px; +} + +h4 { + font-size: 18px; + margin-bottom: 18px; +} + +h5 { + font-size: 16px; + margin-bottom: 15px; +} + +h6 { + font-size: 14px; + margin-bottom: 12px; +} + +p { + line-height: 1.8; + margin: 0 0 30px; +} + +a { + color: #f03838; + text-decoration: none; +} + +ul, ol { + list-style-position: inside; + line-height: 1.8; + margin: 0 0 40px; + padding: 0; +} +ul li, ol li { + margin: 0 0 10px; +} + +blockquote { + border-left: 1px dotted #303030; + margin: 40px 0; + padding: 5px 30px; +} +blockquote p { + color: #AEADAD; + display: block; + font-style: italic; + margin: 0; + width: 100%; +} + +img { + display: block; + margin: 40px 0; + width: auto; + max-width: 100%; +} + +pre { + background: #F1F0EA; + border: 1px solid #DDDBCC; + border-radius: 3px; + margin: 0 0 40px; + padding: 15px 20px; +} + +::selection { + background: #FFF5B8; + color: #000; + display: block; +} + +::-moz-selection { + background: #FFF5B8; + color: #000; + display: block; +} + +/* ============================================================ */ +/* General Appearance */ +/* ============================================================ */ +.container { + margin: 0 auto; + position: relative; + width: 100%; + max-width: 889px; +} + +#wrapper { + height: auto; + min-height: 100%; + /* This must be the same as the height of the footer */ + margin-bottom: -265px; +} +#wrapper:after { + content: ""; + display: block; + /* This must be the same as the height of the footer */ + height: 265px; +} + +.button { + background: #303030; + border: none; + border-radius: 3px; + color: #FEFEFE; + font-size: 14px; + font-weight: 700; + padding: 10px 12px; + text-transform: uppercase; +} +.button:hover { + background: #f03838; +} + +.button-square { + background: #f03838; + float: left; + margin: 0 0 0 10px; + padding: 8px; +} +.button-square:hover { + background: #303030; +} + +/* ============================================================ */ +/* Site Header */ +/* ============================================================ */ +.site-header { + padding: 100px 0 35px; + overflow: auto; + text-align: center; + text-transform: uppercase; +} + +.site-title-wrapper { + display: table; + margin: 0 auto; +} + +.site-title { + float: left; + font-size: 14px; + font-weight: 600; + margin: 0; + text-transform: uppercase; +} +.site-title a { + float: left; + background: #f03838; + color: #FEFEFE; + padding: 5px 10px 6px; +} +.site-title a:hover { + background: #303030; +} + +/* ============================================================ */ +/* Post */ +/* ============================================================ */ +.post { + margin: 0 40px; +} + +.post-header { + border-bottom: 6px solid #303030; + margin: 0 0 50px; + padding: 0 0 80px; + text-align: center; + text-transform: uppercase; +} + +.post-title { + font-size: 52px; + font-weight: 700; + margin: 15px 0; + text-transform: uppercase; +} + +.post-date { + color: #AEADAD; + font-size: 14px; + font-weight: 600; + line-height: 1; + margin: 25px 0 0; +} +.post-date:after { + border-bottom: 1px dotted #303030; + content: ""; + display: block; + margin: 40px auto 0; + width: 100px; +} + +.post-content { + margin: 0 0 92px; +} +.post-content a:hover { + border-bottom: 1px dotted #f03838; + padding: 0 0 2px; +} + +.post-tags { + color: #AEADAD; + font-size: 14px; +} +.post-tags span { + font-weight: 600; +} + +.post-navigation { + display: table; + margin: 70px auto 100px; +} + +.newer-posts, +.older-posts { + float: left; + background: #f03838; + color: #FEFEFE; + font-size: 14px; + font-weight: 600; + margin: 0 5px; + padding: 5px 10px 6px; + text-transform: uppercase; +} +.newer-posts:hover, +.older-posts:hover { + background: #303030; +} + +.page-number { + display: none; +} + +/* ============================================================ */ +/* Post Index */ +/* ============================================================ */ +.post-list { + border-top: 6px solid #303030; + list-style: none; + margin: 80px 40px 0; + padding: 35px 0 0; +} + +.post-stub { + border-bottom: 1px dotted #303030; + margin: 0; +} +.post-stub:first-child { + padding-top: 0; +} +.post-stub a { + -webkit-transition: all 0.2s ease-in-out; + -moz-transition: all 0.2s ease-in-out; + transition: all 0.2s ease-in-out; + display: block; + color: #424242; + padding: 20px 5px; +} +.post-stub a:hover { + background: #FCF5F5; + color: #f03838; + padding: 20px 12px; +} + +.post-stub-title { + display: inline-block; + margin: 0; + text-transform: none; +} + +.post-stub-date { + display: inline-block; +} +.post-stub-date:before { + content: "/ "; +} + +.next-posts-link a, +.previous-posts-link a { + display: block; + padding: 8px 11px; +} + +/* ============================================================ */ +/* Icons */ +/* ============================================================ */ +.icon { + background-size: 14px 38px; + display: block; + height: 38px; + width: 14px; +} + +.icon-menu { + background-position: 0 0; + height: 14px; + width: 14px; +} + +.icon-up { + background-position: 0 -15px; + height: 8px; + width: 14px; +} + +.icon-rss { + background-position: 0 -24px; + height: 14px; + width: 14px; +} + +/* ============================================================ */ +/* Footer */ +/* ============================================================ */ +.footer { + background: #303030; + color: #D3D3D3; + height: 265px; + overflow: auto; +} +.footer .site-title-wrapper { + margin: 80px auto 35px; +} +.footer .site-title a:hover, +.footer .button-square:hover { + background: #121212; +} + +.button-jump-top { + padding-top: 11px; + padding-bottom: 11px; +} + +.footer-copyright { + color: #656565; + font-size: 14px; + margin: 0; + text-align: center; + text-transform: uppercase; +} +.footer-copyright a { + color: #656565; + font-weight: 700; +} +.footer-copyright a:hover { + color: #FEFEFE; +} + +/* ============================================================ */ +/* NProgress */ +/* ============================================================ */ +#nprogress .bar { + background: #f03838; +} + +#nprogress .peg { + box-shadow: 0 0 10px #f03838, 0 0 5px #f03838; +} + +#nprogress .spinner-icon { + border-top-color: #f03838; + border-left-color: #f03838; +} + +/* ============================================================ */ +/* Media Queries */ +/* ============================================================ */ +@media only screen and (max-width: 600px) { + .post-stub-title { + display: block; + } + + .post-stub-date:before { + content: ""; + display: block; + } +} +@media only screen and (max-width: 400px) { + .post-title { + font-size: 32px; + } +} diff --git a/doc/themes/github-dark.css b/doc/themes/github-dark.css new file mode 100644 index 00000000..f41386ea --- /dev/null +++ b/doc/themes/github-dark.css @@ -0,0 +1,765 @@ +@font-face { + font-family: octicons-link; + src: url(data:font/woff;charset=utf-8;base64,d09GRgABAAAAAAZwABAAAAAACFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABEU0lHAAAGaAAAAAgAAAAIAAAAAUdTVUIAAAZcAAAACgAAAAoAAQAAT1MvMgAAAyQAAABJAAAAYFYEU3RjbWFwAAADcAAAAEUAAACAAJThvmN2dCAAAATkAAAABAAAAAQAAAAAZnBnbQAAA7gAAACyAAABCUM+8IhnYXNwAAAGTAAAABAAAAAQABoAI2dseWYAAAFsAAABPAAAAZwcEq9taGVhZAAAAsgAAAA0AAAANgh4a91oaGVhAAADCAAAABoAAAAkCA8DRGhtdHgAAAL8AAAADAAAAAwGAACfbG9jYQAAAsAAAAAIAAAACABiATBtYXhwAAACqAAAABgAAAAgAA8ASm5hbWUAAAToAAABQgAAAlXu73sOcG9zdAAABiwAAAAeAAAAME3QpOBwcmVwAAAEbAAAAHYAAAB/aFGpk3jaTY6xa8JAGMW/O62BDi0tJLYQincXEypYIiGJjSgHniQ6umTsUEyLm5BV6NDBP8Tpts6F0v+k/0an2i+itHDw3v2+9+DBKTzsJNnWJNTgHEy4BgG3EMI9DCEDOGEXzDADU5hBKMIgNPZqoD3SilVaXZCER3/I7AtxEJLtzzuZfI+VVkprxTlXShWKb3TBecG11rwoNlmmn1P2WYcJczl32etSpKnziC7lQyWe1smVPy/Lt7Kc+0vWY/gAgIIEqAN9we0pwKXreiMasxvabDQMM4riO+qxM2ogwDGOZTXxwxDiycQIcoYFBLj5K3EIaSctAq2kTYiw+ymhce7vwM9jSqO8JyVd5RH9gyTt2+J/yUmYlIR0s04n6+7Vm1ozezUeLEaUjhaDSuXHwVRgvLJn1tQ7xiuVv/ocTRF42mNgZGBgYGbwZOBiAAFGJBIMAAizAFoAAABiAGIAznjaY2BkYGAA4in8zwXi+W2+MjCzMIDApSwvXzC97Z4Ig8N/BxYGZgcgl52BCSQKAA3jCV8CAABfAAAAAAQAAEB42mNgZGBg4f3vACQZQABIMjKgAmYAKEgBXgAAeNpjYGY6wTiBgZWBg2kmUxoDA4MPhGZMYzBi1AHygVLYQUCaawqDA4PChxhmh/8ODDEsvAwHgMKMIDnGL0x7gJQCAwMAJd4MFwAAAHjaY2BgYGaA4DAGRgYQkAHyGMF8NgYrIM3JIAGVYYDT+AEjAwuDFpBmA9KMDEwMCh9i/v8H8sH0/4dQc1iAmAkALaUKLgAAAHjaTY9LDsIgEIbtgqHUPpDi3gPoBVyRTmTddOmqTXThEXqrob2gQ1FjwpDvfwCBdmdXC5AVKFu3e5MfNFJ29KTQT48Ob9/lqYwOGZxeUelN2U2R6+cArgtCJpauW7UQBqnFkUsjAY/kOU1cP+DAgvxwn1chZDwUbd6CFimGXwzwF6tPbFIcjEl+vvmM/byA48e6tWrKArm4ZJlCbdsrxksL1AwWn/yBSJKpYbq8AXaaTb8AAHja28jAwOC00ZrBeQNDQOWO//sdBBgYGRiYWYAEELEwMTE4uzo5Zzo5b2BxdnFOcALxNjA6b2ByTswC8jYwg0VlNuoCTWAMqNzMzsoK1rEhNqByEyerg5PMJlYuVueETKcd/89uBpnpvIEVomeHLoMsAAe1Id4AAAAAAAB42oWQT07CQBTGv0JBhagk7HQzKxca2sJCE1hDt4QF+9JOS0nbaaYDCQfwCJ7Au3AHj+LO13FMmm6cl7785vven0kBjHCBhfpYuNa5Ph1c0e2Xu3jEvWG7UdPDLZ4N92nOm+EBXuAbHmIMSRMs+4aUEd4Nd3CHD8NdvOLTsA2GL8M9PODbcL+hD7C1xoaHeLJSEao0FEW14ckxC+TU8TxvsY6X0eLPmRhry2WVioLpkrbp84LLQPGI7c6sOiUzpWIWS5GzlSgUzzLBSikOPFTOXqly7rqx0Z1Q5BAIoZBSFihQYQOOBEdkCOgXTOHA07HAGjGWiIjaPZNW13/+lm6S9FT7rLHFJ6fQbkATOG1j2OFMucKJJsxIVfQORl+9Jyda6Sl1dUYhSCm1dyClfoeDve4qMYdLEbfqHf3O/AdDumsjAAB42mNgYoAAZQYjBmyAGYQZmdhL8zLdDEydARfoAqIAAAABAAMABwAKABMAB///AA8AAQAAAAAAAAAAAAAAAAABAAAAAA==) format('woff'); +} + +.markdown-body { + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; + line-height: 1.5; + color: #24292e; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 16px; + line-height: 1.5; + word-wrap: break-word; +} + +.markdown-body .pl-c { + color: #6a737d; +} + +.markdown-body .pl-c1, +.markdown-body .pl-s .pl-v { + color: #005cc5; +} + +.markdown-body .pl-e, +.markdown-body .pl-en { + color: #6f42c1; +} + +.markdown-body .pl-smi, +.markdown-body .pl-s .pl-s1 { + color: #24292e; +} + +.markdown-body .pl-ent { + color: #22863a; +} + +.markdown-body .pl-k { + color: #d73a49; +} + +.markdown-body .pl-s, +.markdown-body .pl-pds, +.markdown-body .pl-s .pl-pse .pl-s1, +.markdown-body .pl-sr, +.markdown-body .pl-sr .pl-cce, +.markdown-body .pl-sr .pl-sre, +.markdown-body .pl-sr .pl-sra { + color: #032f62; +} + +.markdown-body .pl-v, +.markdown-body .pl-smw { + color: #e36209; +} + +.markdown-body .pl-bu { + color: #b31d28; +} + +.markdown-body .pl-ii { + color: #fafbfc; + background-color: #b31d28; +} + +.markdown-body .pl-c2 { + color: #fafbfc; + background-color: #d73a49; +} + +.markdown-body .pl-c2::before { + content: "^M"; +} + +.markdown-body .pl-sr .pl-cce { + font-weight: bold; + color: #22863a; +} + +.markdown-body .pl-ml { + color: #735c0f; +} + +.markdown-body .pl-mh, +.markdown-body .pl-mh .pl-en, +.markdown-body .pl-ms { + font-weight: bold; + color: #005cc5; +} + +.markdown-body .pl-mi { + font-style: italic; + color: #24292e; +} + +.markdown-body .pl-mb { + font-weight: bold; + color: #24292e; +} + +.markdown-body .pl-md { + color: #b31d28; + background-color: #ffeef0; +} + +.markdown-body .pl-mi1 { + color: #22863a; + background-color: #f0fff4; +} + +.markdown-body .pl-mc { + color: #e36209; + background-color: #ffebda; +} + +.markdown-body .pl-mi2 { + color: #f6f8fa; + background-color: #005cc5; +} + +.markdown-body .pl-mdr { + font-weight: bold; + color: #6f42c1; +} + +.markdown-body .pl-ba { + color: #586069; +} + +.markdown-body .pl-sg { + color: #959da5; +} + +.markdown-body .pl-corl { + text-decoration: underline; + color: #032f62; +} + +.markdown-body .octicon { + display: inline-block; + vertical-align: text-top; + fill: currentColor; +} + +.markdown-body a { + background-color: transparent; + -webkit-text-decoration-skip: objects; +} + +.markdown-body a:active, +.markdown-body a:hover { + outline-width: 0; +} + +.markdown-body strong { + font-weight: inherit; +} + +.markdown-body strong { + font-weight: bolder; +} + +.markdown-body h1 { + font-size: 2em; + margin: 0.67em 0; +} + +.markdown-body img { + border-style: none; +} + +.markdown-body svg:not(:root) { + overflow: hidden; +} + +.markdown-body code, +.markdown-body kbd, +.markdown-body pre { + font-family: monospace, monospace; + font-size: 1em; +} + +.markdown-body hr { + box-sizing: content-box; + height: 0; + overflow: visible; +} + +.markdown-body input { + font: inherit; + margin: 0; +} + +.markdown-body input { + overflow: visible; +} + +.markdown-body [type="checkbox"] { + box-sizing: border-box; + padding: 0; +} + +.markdown-body * { + box-sizing: border-box; +} + +.markdown-body input { + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +.markdown-body a { + color: #0366d6; + text-decoration: none; +} + +.markdown-body a:hover { + text-decoration: underline; +} + +.markdown-body strong { + font-weight: 600; +} + +.markdown-body hr { + height: 0; + margin: 15px 0; + overflow: hidden; + background: transparent; + border: 0; + border-bottom: 1px solid #dfe2e5; +} + +.markdown-body hr::before { + display: table; + content: ""; +} + +.markdown-body hr::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body table { + border-spacing: 0; + border-collapse: collapse; +} + +.markdown-body td, +.markdown-body th { + padding: 0; +} + +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body h1 { + font-size: 32px; + font-weight: 600; +} + +.markdown-body h2 { + font-size: 24px; + font-weight: 600; +} + +.markdown-body h3 { + font-size: 20px; + font-weight: 600; +} + +.markdown-body h4 { + font-size: 16px; + font-weight: 600; +} + +.markdown-body h5 { + font-size: 14px; + font-weight: 600; +} + +.markdown-body h6 { + font-size: 12px; + font-weight: 600; +} + +.markdown-body p { + margin-top: 0; + margin-bottom: 10px; +} + +.markdown-body blockquote { + margin: 0; +} + +.markdown-body ul, +.markdown-body ol { + padding-left: 0; + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body ol ol, +.markdown-body ul ol { + list-style-type: lower-roman; +} + +.markdown-body ul ul ol, +.markdown-body ul ol ol, +.markdown-body ol ul ol, +.markdown-body ol ol ol { + list-style-type: lower-alpha; +} + +.markdown-body dd { + margin-left: 0; +} + +.markdown-body code { + font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + font-size: 12px; +} + +.markdown-body pre { + margin-top: 0; + margin-bottom: 0; + font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + font-size: 12px; +} + +.markdown-body .octicon { + vertical-align: text-bottom; +} + +.markdown-body .pl-0 { + padding-left: 0 !important; +} + +.markdown-body .pl-1 { + padding-left: 4px !important; +} + +.markdown-body .pl-2 { + padding-left: 8px !important; +} + +.markdown-body .pl-3 { + padding-left: 16px !important; +} + +.markdown-body .pl-4 { + padding-left: 24px !important; +} + +.markdown-body .pl-5 { + padding-left: 32px !important; +} + +.markdown-body .pl-6 { + padding-left: 40px !important; +} + +.markdown-body::before { + display: table; + content: ""; +} + +.markdown-body::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body>*:first-child { + margin-top: 0 !important; +} + +.markdown-body>*:last-child { + margin-bottom: 0 !important; +} + +.markdown-body a:not([href]) { + color: inherit; + text-decoration: none; +} + +.markdown-body .anchor { + float: left; + padding-right: 4px; + margin-left: -20px; + line-height: 1; +} + +.markdown-body .anchor:focus { + outline: none; +} + +.markdown-body p, +.markdown-body blockquote, +.markdown-body ul, +.markdown-body ol, +.markdown-body dl, +.markdown-body table, +.markdown-body pre { + margin-top: 0; + margin-bottom: 16px; +} + +.markdown-body hr { + height: 0.25em; + padding: 0; + margin: 24px 0; + background-color: #e1e4e8; + border: 0; +} + +.markdown-body blockquote { + padding: 0 1em; + color: #6a737d; + border-left: 0.25em solid #dfe2e5; +} + +.markdown-body blockquote>:first-child { + margin-top: 0; +} + +.markdown-body blockquote>:last-child { + margin-bottom: 0; +} + +.markdown-body kbd { + display: inline-block; + padding: 3px 5px; + font-size: 11px; + line-height: 10px; + color: #444d56; + vertical-align: middle; + background-color: #fafbfc; + border: solid 1px #c6cbd1; + border-bottom-color: #959da5; + border-radius: 3px; + box-shadow: inset 0 -1px 0 #959da5; +} + +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + margin-top: 24px; + margin-bottom: 16px; + font-weight: 600; + line-height: 1.25; +} + +.markdown-body h1 .octicon-link, +.markdown-body h2 .octicon-link, +.markdown-body h3 .octicon-link, +.markdown-body h4 .octicon-link, +.markdown-body h5 .octicon-link, +.markdown-body h6 .octicon-link { + color: #1b1f23; + vertical-align: middle; + visibility: hidden; +} + +.markdown-body h1:hover .anchor, +.markdown-body h2:hover .anchor, +.markdown-body h3:hover .anchor, +.markdown-body h4:hover .anchor, +.markdown-body h5:hover .anchor, +.markdown-body h6:hover .anchor { + text-decoration: none; +} + +.markdown-body h1:hover .anchor .octicon-link, +.markdown-body h2:hover .anchor .octicon-link, +.markdown-body h3:hover .anchor .octicon-link, +.markdown-body h4:hover .anchor .octicon-link, +.markdown-body h5:hover .anchor .octicon-link, +.markdown-body h6:hover .anchor .octicon-link { + visibility: visible; +} + +.markdown-body h1 { + padding-bottom: 0.3em; + font-size: 2em; + border-bottom: 1px solid #eaecef; +} + +.markdown-body h2 { + padding-bottom: 0.3em; + font-size: 1.5em; + border-bottom: 1px solid #eaecef; +} + +.markdown-body h3 { + font-size: 1.25em; +} + +.markdown-body h4 { + font-size: 1em; +} + +.markdown-body h5 { + font-size: 0.875em; +} + +.markdown-body h6 { + font-size: 0.85em; + color: #6a737d; +} + +.markdown-body ul, +.markdown-body ol { + padding-left: 2em; +} + +.markdown-body ul ul, +.markdown-body ul ol, +.markdown-body ol ol, +.markdown-body ol ul { + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body li>p { + margin-top: 16px; +} + +.markdown-body li+li { + margin-top: 0.25em; +} + +.markdown-body dl { + padding: 0; +} + +.markdown-body dl dt { + padding: 0; + margin-top: 16px; + font-size: 1em; + font-style: italic; + font-weight: 600; +} + +.markdown-body dl dd { + padding: 0 16px; + margin-bottom: 16px; +} + +.markdown-body table { + display: block; + width: 100%; + overflow: auto; +} + +.markdown-body table th { + font-weight: 600; +} + +.markdown-body table th, +.markdown-body table td { + padding: 6px 13px; + border: 1px solid #dfe2e5; +} + +.markdown-body table tr { + background-color: #fff; + border-top: 1px solid #c6cbd1; +} + +.markdown-body table tr:nth-child(2n) { + background-color: #f6f8fa; +} + +.markdown-body img { + max-width: 100%; + box-sizing: content-box; + background-color: #fff; +} + +.markdown-body img[align=right] { + padding-left: 20px; +} + +.markdown-body img[align=left] { + padding-right: 20px; +} + +.markdown-body code { + padding: 0; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; + font-size: 85%; + background-color: rgba(27,31,35,0.05); + border-radius: 3px; +} + +.markdown-body code::before, +.markdown-body code::after { + letter-spacing: -0.2em; + content: "\00a0"; +} + +.markdown-body pre { + word-wrap: normal; +} + +.markdown-body pre>code { + padding: 0; + margin: 0; + font-size: 100%; + word-break: normal; + white-space: pre; + background: transparent; + border: 0; +} + +.markdown-body .highlight { + margin-bottom: 16px; +} + +.markdown-body .highlight pre { + margin-bottom: 0; + word-break: normal; +} + +.markdown-body .highlight pre, +.markdown-body pre { + padding: 16px; + overflow: auto; + font-size: 85%; + line-height: 1.45; + background-color: #f6f8fa; + border-radius: 3px; +} + +.markdown-body pre code { + display: inline; + max-width: auto; + padding: 0; + margin: 0; + overflow: visible; + line-height: inherit; + word-wrap: normal; + background-color: transparent; + border: 0; +} + +.markdown-body pre code::before, +.markdown-body pre code::after { + content: normal; +} + +.markdown-body .full-commit .btn-outline:not(:disabled):hover { + color: #005cc5; + border-color: #005cc5; +} + +.markdown-body kbd { + display: inline-block; + padding: 3px 5px; + font: 11px "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + line-height: 10px; + color: #444d56; + vertical-align: middle; + background-color: #fafbfc; + border: solid 1px #d1d5da; + border-bottom-color: #c6cbd1; + border-radius: 3px; + box-shadow: inset 0 -1px 0 #c6cbd1; +} + +.markdown-body :checked+.radio-label { + position: relative; + z-index: 1; + border-color: #0366d6; +} + +.markdown-body .task-list-item { + list-style-type: none; +} + +.markdown-body .task-list-item+.task-list-item { + margin-top: 3px; +} + +.markdown-body .task-list-item input { + margin: 0 0.2em 0.25em -1.6em; + vertical-align: middle; +} + +.markdown-body hr { + border-bottom-color: #eee; +} + +/*Markdown Viewer*/ +.markdown-body summary:hover { cursor: pointer; } +.markdown-body ul li p { margin: 0; } + +/*GitHub Dark*/ + +body { + background: #181818; +} + +.markdown-body { + color: #c0c0c0 !important; + background: #181818 !important; + border-color: #484848 !important; +} +.markdown-body table { color: #c0c0c0 !important; } +.markdown-body table th { border-color: #343434 !important; } +.markdown-body table td { border-color: #343434 !important; } +.markdown-body table tr { + background: #141414 !important; + border-color: #343434 !important; +} +.markdown-body table tr:nth-child(2n) { background: #181818 !important; } +.markdown-body hr { background: #383838 !important; } +.markdown-body h1, +.markdown-body h2 { border-color: #343434 !important; } +.markdown-body h1, .markdown-body h2, +.markdown-body h3, .markdown-body h4, +.markdown-body .octicon-link { color: #e0e0e0 !important; } +.markdown-body blockquote strong { color: #808080 !important; } +.markdown-body blockquote { border-color: #343434 !important; } +.markdown-body blockquote, +.markdown-body blockquote code { color: #666 !important; } +.markdown-body code, .markdown-body tt, .markdown-body pre, +.markdown-body .highlight pre, body.blog pre { + border: 1px solid rgba(255,255,255,.1) !important; +} +.markdown-body code, .markdown-body tt { background: #202020 !important; } +.markdown-body pre { + background: #141414 !important; color: #ccc !important; +} +.markdown-body pre code { background: none !important; border: 0 !important; } +.markdown-body code[class*="language-"] { + color: #c0c0c0 !important; + text-shadow: none !important; +} +.markdown-body code[class*="language-"] .operator, +.markdown-body code[class*="language-"] .string { + background: none !important; +} +.markdown-body a[href*="/labels/"], +.markdown-body a:not([href*="/labels/"]), +.markdown-body blockquote a code { color: #4183C4 !important; } + +.markdown-body summary:hover { cursor: pointer; } diff --git a/doc/themes/github.css b/doc/themes/github.css new file mode 100644 index 00000000..697938e9 --- /dev/null +++ b/doc/themes/github.css @@ -0,0 +1,713 @@ +@font-face { + font-family: octicons-link; + src: url(data:font/woff;charset=utf-8;base64,d09GRgABAAAAAAZwABAAAAAACFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABEU0lHAAAGaAAAAAgAAAAIAAAAAUdTVUIAAAZcAAAACgAAAAoAAQAAT1MvMgAAAyQAAABJAAAAYFYEU3RjbWFwAAADcAAAAEUAAACAAJThvmN2dCAAAATkAAAABAAAAAQAAAAAZnBnbQAAA7gAAACyAAABCUM+8IhnYXNwAAAGTAAAABAAAAAQABoAI2dseWYAAAFsAAABPAAAAZwcEq9taGVhZAAAAsgAAAA0AAAANgh4a91oaGVhAAADCAAAABoAAAAkCA8DRGhtdHgAAAL8AAAADAAAAAwGAACfbG9jYQAAAsAAAAAIAAAACABiATBtYXhwAAACqAAAABgAAAAgAA8ASm5hbWUAAAToAAABQgAAAlXu73sOcG9zdAAABiwAAAAeAAAAME3QpOBwcmVwAAAEbAAAAHYAAAB/aFGpk3jaTY6xa8JAGMW/O62BDi0tJLYQincXEypYIiGJjSgHniQ6umTsUEyLm5BV6NDBP8Tpts6F0v+k/0an2i+itHDw3v2+9+DBKTzsJNnWJNTgHEy4BgG3EMI9DCEDOGEXzDADU5hBKMIgNPZqoD3SilVaXZCER3/I7AtxEJLtzzuZfI+VVkprxTlXShWKb3TBecG11rwoNlmmn1P2WYcJczl32etSpKnziC7lQyWe1smVPy/Lt7Kc+0vWY/gAgIIEqAN9we0pwKXreiMasxvabDQMM4riO+qxM2ogwDGOZTXxwxDiycQIcoYFBLj5K3EIaSctAq2kTYiw+ymhce7vwM9jSqO8JyVd5RH9gyTt2+J/yUmYlIR0s04n6+7Vm1ozezUeLEaUjhaDSuXHwVRgvLJn1tQ7xiuVv/ocTRF42mNgZGBgYGbwZOBiAAFGJBIMAAizAFoAAABiAGIAznjaY2BkYGAA4in8zwXi+W2+MjCzMIDApSwvXzC97Z4Ig8N/BxYGZgcgl52BCSQKAA3jCV8CAABfAAAAAAQAAEB42mNgZGBg4f3vACQZQABIMjKgAmYAKEgBXgAAeNpjYGY6wTiBgZWBg2kmUxoDA4MPhGZMYzBi1AHygVLYQUCaawqDA4PChxhmh/8ODDEsvAwHgMKMIDnGL0x7gJQCAwMAJd4MFwAAAHjaY2BgYGaA4DAGRgYQkAHyGMF8NgYrIM3JIAGVYYDT+AEjAwuDFpBmA9KMDEwMCh9i/v8H8sH0/4dQc1iAmAkALaUKLgAAAHjaTY9LDsIgEIbtgqHUPpDi3gPoBVyRTmTddOmqTXThEXqrob2gQ1FjwpDvfwCBdmdXC5AVKFu3e5MfNFJ29KTQT48Ob9/lqYwOGZxeUelN2U2R6+cArgtCJpauW7UQBqnFkUsjAY/kOU1cP+DAgvxwn1chZDwUbd6CFimGXwzwF6tPbFIcjEl+vvmM/byA48e6tWrKArm4ZJlCbdsrxksL1AwWn/yBSJKpYbq8AXaaTb8AAHja28jAwOC00ZrBeQNDQOWO//sdBBgYGRiYWYAEELEwMTE4uzo5Zzo5b2BxdnFOcALxNjA6b2ByTswC8jYwg0VlNuoCTWAMqNzMzsoK1rEhNqByEyerg5PMJlYuVueETKcd/89uBpnpvIEVomeHLoMsAAe1Id4AAAAAAAB42oWQT07CQBTGv0JBhagk7HQzKxca2sJCE1hDt4QF+9JOS0nbaaYDCQfwCJ7Au3AHj+LO13FMmm6cl7785vven0kBjHCBhfpYuNa5Ph1c0e2Xu3jEvWG7UdPDLZ4N92nOm+EBXuAbHmIMSRMs+4aUEd4Nd3CHD8NdvOLTsA2GL8M9PODbcL+hD7C1xoaHeLJSEao0FEW14ckxC+TU8TxvsY6X0eLPmRhry2WVioLpkrbp84LLQPGI7c6sOiUzpWIWS5GzlSgUzzLBSikOPFTOXqly7rqx0Z1Q5BAIoZBSFihQYQOOBEdkCOgXTOHA07HAGjGWiIjaPZNW13/+lm6S9FT7rLHFJ6fQbkATOG1j2OFMucKJJsxIVfQORl+9Jyda6Sl1dUYhSCm1dyClfoeDve4qMYdLEbfqHf3O/AdDumsjAAB42mNgYoAAZQYjBmyAGYQZmdhL8zLdDEydARfoAqIAAAABAAMABwAKABMAB///AA8AAQAAAAAAAAAAAAAAAAABAAAAAA==) format('woff'); +} + +.markdown-body { + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; + line-height: 1.5; + color: #24292e; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 16px; + line-height: 1.5; + word-wrap: break-word; +} + +.markdown-body .pl-c { + color: #6a737d; +} + +.markdown-body .pl-c1, +.markdown-body .pl-s .pl-v { + color: #005cc5; +} + +.markdown-body .pl-e, +.markdown-body .pl-en { + color: #6f42c1; +} + +.markdown-body .pl-smi, +.markdown-body .pl-s .pl-s1 { + color: #24292e; +} + +.markdown-body .pl-ent { + color: #22863a; +} + +.markdown-body .pl-k { + color: #d73a49; +} + +.markdown-body .pl-s, +.markdown-body .pl-pds, +.markdown-body .pl-s .pl-pse .pl-s1, +.markdown-body .pl-sr, +.markdown-body .pl-sr .pl-cce, +.markdown-body .pl-sr .pl-sre, +.markdown-body .pl-sr .pl-sra { + color: #032f62; +} + +.markdown-body .pl-v, +.markdown-body .pl-smw { + color: #e36209; +} + +.markdown-body .pl-bu { + color: #b31d28; +} + +.markdown-body .pl-ii { + color: #fafbfc; + background-color: #b31d28; +} + +.markdown-body .pl-c2 { + color: #fafbfc; + background-color: #d73a49; +} + +.markdown-body .pl-c2::before { + content: "^M"; +} + +.markdown-body .pl-sr .pl-cce { + font-weight: bold; + color: #22863a; +} + +.markdown-body .pl-ml { + color: #735c0f; +} + +.markdown-body .pl-mh, +.markdown-body .pl-mh .pl-en, +.markdown-body .pl-ms { + font-weight: bold; + color: #005cc5; +} + +.markdown-body .pl-mi { + font-style: italic; + color: #24292e; +} + +.markdown-body .pl-mb { + font-weight: bold; + color: #24292e; +} + +.markdown-body .pl-md { + color: #b31d28; + background-color: #ffeef0; +} + +.markdown-body .pl-mi1 { + color: #22863a; + background-color: #f0fff4; +} + +.markdown-body .pl-mc { + color: #e36209; + background-color: #ffebda; +} + +.markdown-body .pl-mi2 { + color: #f6f8fa; + background-color: #005cc5; +} + +.markdown-body .pl-mdr { + font-weight: bold; + color: #6f42c1; +} + +.markdown-body .pl-ba { + color: #586069; +} + +.markdown-body .pl-sg { + color: #959da5; +} + +.markdown-body .pl-corl { + text-decoration: underline; + color: #032f62; +} + +.markdown-body .octicon { + display: inline-block; + vertical-align: text-top; + fill: currentColor; +} + +.markdown-body a { + background-color: transparent; + -webkit-text-decoration-skip: objects; +} + +.markdown-body a:active, +.markdown-body a:hover { + outline-width: 0; +} + +.markdown-body strong { + font-weight: inherit; +} + +.markdown-body strong { + font-weight: bolder; +} + +.markdown-body h1 { + font-size: 2em; + margin: 0.67em 0; +} + +.markdown-body img { + border-style: none; +} + +.markdown-body svg:not(:root) { + overflow: hidden; +} + +.markdown-body code, +.markdown-body kbd, +.markdown-body pre { + font-family: monospace, monospace; + font-size: 1em; +} + +.markdown-body hr { + box-sizing: content-box; + height: 0; + overflow: visible; +} + +.markdown-body input { + font: inherit; + margin: 0; +} + +.markdown-body input { + overflow: visible; +} + +.markdown-body [type="checkbox"] { + box-sizing: border-box; + padding: 0; +} + +.markdown-body * { + box-sizing: border-box; +} + +.markdown-body input { + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +.markdown-body a { + color: #0366d6; + text-decoration: none; +} + +.markdown-body a:hover { + text-decoration: underline; +} + +.markdown-body strong { + font-weight: 600; +} + +.markdown-body hr { + height: 0; + margin: 15px 0; + overflow: hidden; + background: transparent; + border: 0; + border-bottom: 1px solid #dfe2e5; +} + +.markdown-body hr::before { + display: table; + content: ""; +} + +.markdown-body hr::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body table { + border-spacing: 0; + border-collapse: collapse; +} + +.markdown-body td, +.markdown-body th { + padding: 0; +} + +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body h1 { + font-size: 32px; + font-weight: 600; +} + +.markdown-body h2 { + font-size: 24px; + font-weight: 600; +} + +.markdown-body h3 { + font-size: 20px; + font-weight: 600; +} + +.markdown-body h4 { + font-size: 16px; + font-weight: 600; +} + +.markdown-body h5 { + font-size: 14px; + font-weight: 600; +} + +.markdown-body h6 { + font-size: 12px; + font-weight: 600; +} + +.markdown-body p { + margin-top: 0; + margin-bottom: 10px; +} + +.markdown-body blockquote { + margin: 0; +} + +.markdown-body ul, +.markdown-body ol { + padding-left: 0; + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body ol ol, +.markdown-body ul ol { + list-style-type: lower-roman; +} + +.markdown-body ul ul ol, +.markdown-body ul ol ol, +.markdown-body ol ul ol, +.markdown-body ol ol ol { + list-style-type: lower-alpha; +} + +.markdown-body dd { + margin-left: 0; +} + +.markdown-body code { + font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + font-size: 12px; +} + +.markdown-body pre { + margin-top: 0; + margin-bottom: 0; + font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + font-size: 12px; +} + +.markdown-body .octicon { + vertical-align: text-bottom; +} + +.markdown-body .pl-0 { + padding-left: 0 !important; +} + +.markdown-body .pl-1 { + padding-left: 4px !important; +} + +.markdown-body .pl-2 { + padding-left: 8px !important; +} + +.markdown-body .pl-3 { + padding-left: 16px !important; +} + +.markdown-body .pl-4 { + padding-left: 24px !important; +} + +.markdown-body .pl-5 { + padding-left: 32px !important; +} + +.markdown-body .pl-6 { + padding-left: 40px !important; +} + +.markdown-body::before { + display: table; + content: ""; +} + +.markdown-body::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body>*:first-child { + margin-top: 0 !important; +} + +.markdown-body>*:last-child { + margin-bottom: 0 !important; +} + +.markdown-body a:not([href]) { + color: inherit; + text-decoration: none; +} + +.markdown-body .anchor { + float: left; + padding-right: 4px; + margin-left: -20px; + line-height: 1; +} + +.markdown-body .anchor:focus { + outline: none; +} + +.markdown-body p, +.markdown-body blockquote, +.markdown-body ul, +.markdown-body ol, +.markdown-body dl, +.markdown-body table, +.markdown-body pre { + margin-top: 0; + margin-bottom: 16px; +} + +.markdown-body hr { + height: 0.25em; + padding: 0; + margin: 24px 0; + background-color: #e1e4e8; + border: 0; +} + +.markdown-body blockquote { + padding: 0 1em; + color: #6a737d; + border-left: 0.25em solid #dfe2e5; +} + +.markdown-body blockquote>:first-child { + margin-top: 0; +} + +.markdown-body blockquote>:last-child { + margin-bottom: 0; +} + +.markdown-body kbd { + display: inline-block; + padding: 3px 5px; + font-size: 11px; + line-height: 10px; + color: #444d56; + vertical-align: middle; + background-color: #fafbfc; + border: solid 1px #c6cbd1; + border-bottom-color: #959da5; + border-radius: 3px; + box-shadow: inset 0 -1px 0 #959da5; +} + +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + margin-top: 24px; + margin-bottom: 16px; + font-weight: 600; + line-height: 1.25; +} + +.markdown-body h1 .octicon-link, +.markdown-body h2 .octicon-link, +.markdown-body h3 .octicon-link, +.markdown-body h4 .octicon-link, +.markdown-body h5 .octicon-link, +.markdown-body h6 .octicon-link { + color: #1b1f23; + vertical-align: middle; + visibility: hidden; +} + +.markdown-body h1:hover .anchor, +.markdown-body h2:hover .anchor, +.markdown-body h3:hover .anchor, +.markdown-body h4:hover .anchor, +.markdown-body h5:hover .anchor, +.markdown-body h6:hover .anchor { + text-decoration: none; +} + +.markdown-body h1:hover .anchor .octicon-link, +.markdown-body h2:hover .anchor .octicon-link, +.markdown-body h3:hover .anchor .octicon-link, +.markdown-body h4:hover .anchor .octicon-link, +.markdown-body h5:hover .anchor .octicon-link, +.markdown-body h6:hover .anchor .octicon-link { + visibility: visible; +} + +.markdown-body h1 { + padding-bottom: 0.3em; + font-size: 2em; + border-bottom: 1px solid #eaecef; +} + +.markdown-body h2 { + padding-bottom: 0.3em; + font-size: 1.5em; + border-bottom: 1px solid #eaecef; +} + +.markdown-body h3 { + font-size: 1.25em; +} + +.markdown-body h4 { + font-size: 1em; +} + +.markdown-body h5 { + font-size: 0.875em; +} + +.markdown-body h6 { + font-size: 0.85em; + color: #6a737d; +} + +.markdown-body ul, +.markdown-body ol { + padding-left: 2em; +} + +.markdown-body ul ul, +.markdown-body ul ol, +.markdown-body ol ol, +.markdown-body ol ul { + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body li>p { + margin-top: 16px; +} + +.markdown-body li+li { + margin-top: 0.25em; +} + +.markdown-body dl { + padding: 0; +} + +.markdown-body dl dt { + padding: 0; + margin-top: 16px; + font-size: 1em; + font-style: italic; + font-weight: 600; +} + +.markdown-body dl dd { + padding: 0 16px; + margin-bottom: 16px; +} + +.markdown-body table { + display: block; + width: 100%; + overflow: auto; +} + +.markdown-body table th { + font-weight: 600; +} + +.markdown-body table th, +.markdown-body table td { + padding: 6px 13px; + border: 1px solid #dfe2e5; +} + +.markdown-body table tr { + background-color: #fff; + border-top: 1px solid #c6cbd1; +} + +.markdown-body table tr:nth-child(2n) { + background-color: #f6f8fa; +} + +.markdown-body img { + max-width: 100%; + box-sizing: content-box; + background-color: #fff; +} + +.markdown-body img[align=right] { + padding-left: 20px; +} + +.markdown-body img[align=left] { + padding-right: 20px; +} + +.markdown-body code { + padding: 0; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; + font-size: 85%; + background-color: rgba(27,31,35,0.05); + border-radius: 3px; +} + +.markdown-body code::before, +.markdown-body code::after { + letter-spacing: -0.2em; + content: "\00a0"; +} + +.markdown-body pre { + word-wrap: normal; +} + +.markdown-body pre>code { + padding: 0; + margin: 0; + font-size: 100%; + word-break: normal; + white-space: pre; + background: transparent; + border: 0; +} + +.markdown-body .highlight { + margin-bottom: 16px; +} + +.markdown-body .highlight pre { + margin-bottom: 0; + word-break: normal; +} + +.markdown-body .highlight pre, +.markdown-body pre { + padding: 16px; + overflow: auto; + font-size: 85%; + line-height: 1.45; + background-color: #f6f8fa; + border-radius: 3px; +} + +.markdown-body pre code { + display: inline; + max-width: auto; + padding: 0; + margin: 0; + overflow: visible; + line-height: inherit; + word-wrap: normal; + background-color: transparent; + border: 0; +} + +.markdown-body pre code::before, +.markdown-body pre code::after { + content: normal; +} + +.markdown-body .full-commit .btn-outline:not(:disabled):hover { + color: #005cc5; + border-color: #005cc5; +} + +.markdown-body kbd { + display: inline-block; + padding: 3px 5px; + font: 11px "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; + line-height: 10px; + color: #444d56; + vertical-align: middle; + background-color: #fafbfc; + border: solid 1px #d1d5da; + border-bottom-color: #c6cbd1; + border-radius: 3px; + box-shadow: inset 0 -1px 0 #c6cbd1; +} + +.markdown-body :checked+.radio-label { + position: relative; + z-index: 1; + border-color: #0366d6; +} + +.markdown-body .task-list-item { + list-style-type: none; +} + +.markdown-body .task-list-item+.task-list-item { + margin-top: 3px; +} + +.markdown-body .task-list-item input { + margin: 0 0.2em 0.25em -1.6em; + vertical-align: middle; +} + +.markdown-body hr { + border-bottom-color: #eee; +} + +/*Markdown Viewer*/ +.markdown-body summary:hover { cursor: pointer; } +.markdown-body ul li p { margin: 0; } diff --git a/doc/themes/godspeed.css b/doc/themes/godspeed.css new file mode 100644 index 00000000..233815a8 --- /dev/null +++ b/doc/themes/godspeed.css @@ -0,0 +1,626 @@ +/* Title: Godspeed */ +/* Author: Jocelyn Richard http://jocelynrichard.com/ */ +/* Description: A quirky, low-contrast theme. Works best with Brush Up: http://www.myfonts.com/fonts/pintassilgo/brush-up/ */ + +/* ================================================ */ +/* 1. Reset */ +/* 2. Skeleton */ +/* 3. Media Queries */ +/* 4. Print Styles */ +/* 5. Godspeed Overrides */ +/* ================================================ */ + + + +/* ================================================ */ +/* 1. Reset */ +/* ================================================ */ + +html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, +b, u, i, center, dl, dt, dd, ol, ul, li, fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, aside, canvas, details, embed, figure, figcaption, footer, header, hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video {margin: 0; padding: 0; border: 0;} /* Edited from http://www.cssreset.com/scripts/eric-meyer-reset-css/ */ + +article, aside, details, figcaption, figure, footer, header, hgroup, nav, section, summary {display: block;} /* Semantic tags definition for IE 6/7/8/9 and Firefox 3 */ + +html {-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;} /* Prevents iOS text size adjust after orientation change, without disabling user zoom */ + + + +/* ================================================ */ +/* 2. Skeleton */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 14px; +} + +body { + font-family: 'Open Sans', sans-serif; + margin: 1.71rem 1.71rem 3rem 1.71rem ; /* Get margins even if the Markdown rendering app doesn't include any */ + background-color: white; + color: #222; +} + +#wrapper { /* #wrapper: ID added by Marked */ + max-width: 42rem; + margin: 0 auto; + margin-left: auto !important; /* Countering toc.css added by Marked */ + padding: 1.71rem 0 !important; /* Countering toc.css added by Marked */ +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-bottom: 1.6rem; +} + +h1, +h2 { + margin-top: 3.2rem; +} + +h1 { + font-size: 2.82rem; /* 42.3px @15px */ + line-height: 3.2rem; /* 48px @15px */ +} + +h2 { + font-size: 1.99rem; /* 29.9px @15px */ + line-height: 2.4rem; /* 36px @15px */ +} + +h3 { + font-size: 1.41rem; /* 21.2px @15px */ + line-height: 2rem; /* 30px @15px */ +} + +h4 { + font-size: 1rem; /* 15px @15px */ + line-height: 1.6rem; /* 24px @15px */ +} + +h5, h6 { + font-size: 0.8rem; + line-height: 1.2rem; + text-transform: uppercase; +} + +h6 { + margin-left: 1.6rem; +} + +p, +ol, +ul, +blockquote { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +ul ul, +ul ol, +ol ul, +ol ol { + margin-left: 1.6rem; + margin-top: 1.6rem; +} + +#generated-toc ul ul, /* #generated-toc: added by Marked for its table of contents */ +#generated-toc ul ol, +#generated-toc ol ul, +#generated-toc ol ol { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; +} + +blockquote { + margin: 0 0 1.6rem 2.4rem; + padding-left: 0.8rem; /* Voire */ + border-left: 4px solid rgba(0,0,0,0.08); + font-style: normal; +} + +blockquote ul { + margin-left: 0.8rem; /* Pour ne pas que les hanging bullets mordent sur le blockquote */ +} + +ol li blockquote, /* So that blockquote work in lists */ +ul li blockquote { + margin-left: 0; +} + +a:link { + text-decoration: none; + color: #165bd4; + border-bottom: 1px solid #ccc; +} + +a:visited { + color: #7697cf; + border-bottom: 1px solid #ccc; +} + +a:hover { + border-color: #165bd4; +} + +a:active { + background-color: #e6e6e6; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + font-size: 0.85rem; + margin: 0 0 1.6rem 0; + border-collapse: collapse; + border: 1px solid #ccc; +} + +th, +td { + padding: 0.5rem 0.75rem; + max-width: 20rem; /* Avoid dropping lines for nothing without having ridiculously wide tables */ +} + +th { + border-bottom: 2px solid #222; +} + +tr { + border-bottom: 1px solid #ccc; +} + +tbody tr:nth-child(odd) { + background-color: #f9f9f9; +} + +table code { + font-size: 85%; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +img { + max-width: 100% +} + +caption, +figcaption { + font-size: 0.85rem; + line-height: 1.6rem; + margin: 0 1.6rem; + text-align: left; +} + +figcaption { + margin-bottom: 1.6rem; +} + +h1, /* White-space mentions in order to force wrapping */ +h2, +a:link { + white-space: pre; /* CSS 2.0 */ + white-space: pre-wrap; /* CSS 2.1 */ + white-space: pre-line; /* CSS 3.0 */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: -moz-pre-wrap; /* Mozilla */ + white-space: -hp-pre-wrap; /* HP Printers */ + word-wrap: break-word; /* IE 5+ */ +} + +code { + font-family: "Menlo", "Courier New", "Courier", monospace; + font-size: 85%; + color: #666; + background-color: rgba(0,0,0,0.08); + padding: 2px 4px; + border-radius: 2px; +} + +pre { + background-color: rgba(0,0,0,0.08); + border-radius: 8px; + padding: 0.4rem; + margin-bottom: 1.6rem; +} + +pre code { /* Counter the code mentions */ + background-color: transparent; + padding: 0; +} + +sup, +sub, +a.footnote { /* Keep line-height from being affected by sub, cf https://gist.github.com/unruthless/413930 */ + font-size: 75%; + height: 0; + line-height: 1; + position: relative; +} + +sup, +a.footnote { + vertical-align:super; +} + +sub { + vertical-align: sub; +} + +dt { + font-weight: 600; +} + +dd { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +hr { + clear: none; + height: 0.2rem; + border: none; + margin: 0 auto 1.4rem auto; /* 2.4rem auto 2.2rem auto; */ + width: 100%; + color: #ccc; + background-color: #ccc; +} + +::selection { + background-color: #f8dc77; +} + +::-moz-selection { + background-color: #f8dc77; +} + +a:focus { + outline: 2px solid; + outline-color: #165bd4; +} + +/* ------------------------------------------------- */ +/* Animations */ +/* ------------------------------------------------- */ + +a:hover { + -moz-transition: all 0.2s ease-in-out; + -webkit-transition: all 0.2s ease-in-out; +} + +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote { + -moz-transition: all 0.2s ease; + -webkit-transition: all 0.2s ease; +} + + + +/* ================================================ */ +/* 3. Media Queries */ +/* ================================================ */ + +/* Base styles are for smartphones; elements are then tweaked as the viewport grows. */ + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 15px; + } + + body { + margin: 2.4rem 2.4rem 3.2rem 2.4rem; + } + + h1 { + font-size: 3.57rem; /* 53.2px @15px */ + line-height: 4rem; /* 60px @15px */ + } + + h2 { + font-size: 2.24rem; /* 33.6px @15px */ + line-height: 2.8rem; /* 42px @15px */ + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} + + + +/* ================================================ */ +/* 4. Print Styles */ +/* ================================================ */ + +/* Inconsistent and buggy across browsers */ + +@media print { + + * { + background: transparent !important; + color: #000 !important; /* Black text prints faster and browsers are inconsistent in color reproduction anyway: h5bp.com/s */ + } + + @page { + margin: 1cm; /* Added to any #wrapper margin*/ + } + + html { + font-size: 15px; + } + + body { + margin: 1rem !important; /* Security margins for browser without @page support */ + } + + #wrapper { + max-width: none; + } + + h1, + h2, + h3, + h4, + h5, + h6, + p { + orphans: 3; + widows: 3; + page-break-after: avoid; + } + + ul, + ol { + list-style-position: inside !important; + padding-right: 0 !important; + margin-left: 0 !important; + } + + ul ul, + ul ol, + ol ul, + ol ol, + ul p:not(:first-child), + ol p:not(:first-child) { + margin-left: 2rem !important; + } + + a:link, + a:visited { + text-decoration: underline !important; + font-weight: normal !important; + } + + a[href]:after { + content: " (" attr(href) ")"; + } + + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; /* Do not show javascript and internal links */ + } + + a[href^="#"] { + text-decoration: none !important; + } + + th { + background-color: rgba(0,0,0,0.2) !important; + border-bottom: none !important; + } + + tr { + page-break-inside: avoid; + } + + tbody tr:nth-child(even) { + background-color: rgba(0,0,0,0.1) !important; + } + + pre { + border: 1px solid rgba(0,0,0,0.2); + page-break-inside: avoid; + } + + img { + max-width: 100% !important; + page-break-inside: avoid; + } + + /* #generated-toc: added by Marked for its table of contents */ + + #wrapper #generated-toc ul, /* Table of contents printing in Marked */ + #wrapper #generated-toc ol { + list-style-type: decimal; + } + + #wrapper #generated-toc ul li, + #wrapper #generated-toc ol li { + margin: 1rem 0; + } + +} + + + +/* ================================================ */ +/* 5. Godspeed Overrides */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +body { + font-family: 'Source Sans Pro', Avenir, sans-serif; + background-color: #3c3d46; + color: #7d7d7a; + margin-bottom: 2.4rem; /* Visual tweak */ +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1 { + font-family: 'Brush Up Too', 'Source Sans Pro', Avenir, sans-serif; + color: #e6ceaa; +} + +h2, +h3 { + color: #b98552; + text-transform: uppercase; +} + +h4, +h5, +h6 { + text-transform: uppercase; +} + +blockquote { + border-color: rgba(0,0,0,0.1); /* Pour correspondre à l'opacité des bordures ajoutées au #wrapper */ +} + +a:link { + color: #6190d2; + text-decoration: none; + border-bottom: 2px solid rgba(0,0,0,0.2); +} + +a:hover { + color: #6190d2; + text-decoration: none; + border-bottom: 1px solid #6190d2; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + border: 1px solid #7d7d7a; + border-radius: 8px; +} + +th { + color: #b98552; + border-bottom: 1px solid #7d7d7a; +} + +tr { + border-bottom: 1px solid #7d7d7a; +} + +tbody tr:nth-child(odd) { + background-color: rgba(60,75,94,0.5); +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +code { + font-size: 75%; /* Matching better Source Sans */ + color: #3c3d46; + background-color: #7d7d7a; +} + +pre { + background-color: rgba(60,75,94,0.5); +} + +pre code { + color: #7d7d7a; +} + +hr { + color: rgba(0,0,0,0.2); + background-color: rgba(0,0,0,0.2); +} + +::selection { + background-color: rgba(0,0,0,0.2); +} + +::-moz-selection { + background-color: rgba(0,0,0,0.2); +} + +a:focus { + outline: 2px dotted; + outline-color: #7d7d7a; +} + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 16px; + } + + #wrapper { + padding: 0 2.4rem; + border: 1px solid rgba(0,0,0,0.2) !important; /* !important otherwise doesn't show up in Marked */ + box-shadow: 0 0 0 6px rgba(0,0,0,0.1); + padding: 2.4rem !important; + border-radius: 4px; + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} diff --git a/doc/themes/markdown-alt.css b/doc/themes/markdown-alt.css new file mode 100644 index 00000000..480eea3f --- /dev/null +++ b/doc/themes/markdown-alt.css @@ -0,0 +1,75 @@ +body { + line-height: 1.4em; + color: black; + padding:1em; + margin:auto; + max-width:42em; +} + +li { + color: black; +} + +h1, +h2, +h3, +h4, +h5, +h6 { + border: 0 none !important; +} + +h1 { + margin-top: 0.5em; + margin-bottom: 0.5em; + border-bottom: 2px solid #000080 !important; +} + +h2 { + margin-top: 1em; + margin-bottom: 0.5em; + border-bottom: 2px solid #000080 !important; +} + +pre { + background-color: #f8f8f8; + border: 1px solid #2f6fab; + border-radius: 3px; + overflow: auto; + padding: 5px; +} + +pre code { + background-color: inherit; + border: none; + padding: 0; +} + +code { + background-color: #ffffe0; + border: 1px solid orange; + border-radius: 3px; + padding: 0 0.2em; +} + +a { + text-decoration: underline; +} + +ul, ol { + padding-left: 30px; +} + +li { + margin: 0.2em 0 0 0em; padding: 0px; +} + +em { + color: #b05000; +} + +table.text th, table.text td { + vertical-align: top; + border-top: 1px solid #ccc; + padding:5px; +} diff --git a/doc/themes/markdown.css b/doc/themes/markdown.css new file mode 100644 index 00000000..7dd96522 --- /dev/null +++ b/doc/themes/markdown.css @@ -0,0 +1,102 @@ +html { font-size: 100%; overflow-y: scroll; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; } + +body{ +color:#444; +font-family:Georgia, Palatino, 'Palatino Linotype', Times, 'Times New Roman', serif; +font-size:12px; +line-height:1.5em; +padding:1em; +margin:auto; +max-width:42em; +background:#fefefe; +} + +a{ color: #0645ad; text-decoration:none;} +a:visited{ color: #0b0080; } +a:hover{ color: #06e; } +a:active{ color:#faa700; } +a:focus{ outline: thin dotted; } +a:hover, a:active{ outline: 0; } + +::-moz-selection{background:rgba(255,255,0,0.3);color:#000} +::selection{background:rgba(255,255,0,0.3);color:#000} + +a::-moz-selection{background:rgba(255,255,0,0.3);color:#0645ad} +a::selection{background:rgba(255,255,0,0.3);color:#0645ad} + +p{ +margin:1em 0; +} + +img{ +max-width:100%; +} + +h1,h2,h3,h4,h5,h6{ +font-weight:normal; +color:#111; +line-height:1em; +} +h4,h5,h6{ font-weight: bold; } +h1{ font-size:2.5em; } +h2{ font-size:2em; } +h3{ font-size:1.5em; } +h4{ font-size:1.2em; } +h5{ font-size:1em; } +h6{ font-size:0.9em; } + +blockquote{ +color:#666666; +margin:0; +padding-left: 3em; +border-left: 0.5em #EEE solid; +} +hr { display: block; height: 2px; border: 0; border-top: 1px solid #aaa;border-bottom: 1px solid #eee; margin: 1em 0; padding: 0; } +pre, code, kbd, samp { color: #000; font-family: monospace, monospace; _font-family: 'courier new', monospace; font-size: 0.98em; } +pre { white-space: pre; white-space: pre-wrap; word-wrap: break-word; } + +b, strong { font-weight: bold; } + +dfn { font-style: italic; } + +ins { background: #ff9; color: #000; text-decoration: none; } + +mark { background: #ff0; color: #000; font-style: italic; font-weight: bold; } + +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } +sup { top: -0.5em; } +sub { bottom: -0.25em; } + +ul, ol { margin: 1em 0; padding: 0 0 0 2em; } +li p:last-child { margin:0 } +dd { margin: 0 0 0 2em; } + +img { border: 0; -ms-interpolation-mode: bicubic; vertical-align: middle; } + +table { border-collapse: collapse; border-spacing: 0; } +td { vertical-align: top; } + +@media only screen and (min-width: 480px) { +body{font-size:14px;} +} + +@media only screen and (min-width: 768px) { +body{font-size:16px;} +} + +@media print { + * { background: transparent !important; color: black !important; filter:none !important; -ms-filter: none !important; } + body{font-size:12pt; max-width:100%;} + a, a:visited { text-decoration: underline; } + hr { height: 1px; border:0; border-bottom:1px solid black; } + a[href]:after { content: " (" attr(href) ")"; } + abbr[title]:after { content: " (" attr(title) ")"; } + .ir a:after, a[href^="javascript:"]:after, a[href^="#"]:after { content: ""; } + pre, blockquote { border: 1px solid #999; padding-right: 1em; page-break-inside: avoid; } + tr, img { page-break-inside: avoid; } + img { max-width: 100% !important; } + @page :left { margin: 15mm 20mm 15mm 10mm; } + @page :right { margin: 15mm 10mm 15mm 20mm; } + p, h2, h3 { orphans: 3; widows: 3; } + h2, h3 { page-break-after: avoid; } +} diff --git a/doc/themes/markdown5.css b/doc/themes/markdown5.css new file mode 100644 index 00000000..9c5d03ce --- /dev/null +++ b/doc/themes/markdown5.css @@ -0,0 +1,139 @@ +body{ + margin: 0 auto; + background-color:white; + +/* --------- FONT FAMILY -------- + following are some optional font families. Usually a family + is safer to choose than a specific font, + which may not be on the users computer */ + font-family:Georgia, Palatino, serif; + +/* -------------- COLOR OPTIONS ------------ + following are additional color options for base font + you could uncomment another one to easily change the base color + or add one to a specific element style below */ + color: #333333; /* dark gray not black */ + + line-height: 1; + max-width: 800px; + padding: 30px; + font-size: 18px; +} + + +p { + line-height: 150%; + max-width: 960px; + font-weight: 400; + color: #333333 +} + + +h1, h2, h3, h4 { + font-weight: 400; +} + +h2, h3, h4, h5, p { + margin-bottom: 25px; + padding: 0; +} + +h1 { + margin-bottom: 10px; + font-size:300%; + padding: 0px; + font-variant:small-caps; +} + +h2 { + font-size:150% +} + +h3 { + font-size:120% +} +h4 { + font-size:100% + font-variant:small-caps; + +} +h5 { + font-size:80% + font-weight: 100; +} + +h6 { + font-size:80% + font-weight: 100; + color:red; + font-variant:small-caps; +} +a { + color: grey; + margin: 0; + padding: 0; + vertical-align: baseline; +} +a:hover { + text-decoration: blink; + color: green; +} +a:visited { + color: black; +} +ul, ol { + padding: 0; + margin: 0px 0px 0px 50px; +} +ul { + list-style-type: square; + list-style-position: inside; + +} + +li { + line-height:150% +} +li ul, li ul { + margin-left: 24px; +} + +pre { + padding: 0px 24px; + max-width: 800px; + white-space: pre-wrap; +} +code { + font-family: Consolas, Monaco, Andale Mono, monospace; + line-height: 1.5; + font-size: 13px; +} +aside { + display: block; + float: right; + width: 390px; +} +blockquote { + border-left:.5em solid #eee; + padding: 0 1em; + margin-left:0; + max-width: 476px; +} +blockquote cite { + line-height:20px; + color:#bfbfbf; +} +blockquote cite:before { + content: '\2014 \00A0'; +} + +blockquote p { + color: #666; + max-width: 460px; +} +hr { + text-align: left; + margin: 0 auto 0 0; + color: #999; +} + diff --git a/doc/themes/markdown6.css b/doc/themes/markdown6.css new file mode 100644 index 00000000..8c6d0631 --- /dev/null +++ b/doc/themes/markdown6.css @@ -0,0 +1,222 @@ +/* Extracted and interpreted from adcstyle.css and frameset_styles.css */ + +/* body */ +body { + margin: 20px auto; + width: 800px; + background-color: #fff; + color: #000; + font: 13px "Myriad Pro", "Lucida Grande", Lucida, Verdana, sans-serif; +} + +/* links */ +a:link { + color: #00f; + text-decoration: none; +} + +a:visited { + color: #00a; + text-decoration: none; +} + +a:hover { + color: #f60; + text-decoration: underline; +} + +a:active { + color: #f60; + text-decoration: underline; +} + + +/* html tags */ + +/* Work around IE/Win code size bug - courtesy Jesper, waffle.wootest.net */ + +* html code { + font-size: 101%; +} + +* html pre { + font-size: 101%; +} + +/* code */ + +pre, code { + font-size: 11px; font-family: monaco, courier, consolas, monospace; +} + +pre { + margin-top: 5px; + margin-bottom: 10px; + border: 1px solid #c7cfd5; + background: #f1f5f9; + margin: 20px 0; + padding: 8px; + text-align: left; +} + +hr { + color: #919699; + size: 1; + width: 100%; + noshade: "noshade" +} + +/* headers */ + + +h1, h2, h3, h4, h5, h6 { + font-family: "Myriad Pro", "Lucida Grande", Lucida, Verdana, sans-serif; + font-weight: bold; +} + +h1 { + margin-top: 1em; + margin-bottom: 25px; + color: #000; + font-weight: bold; + font-size: 30px; +} +h2 { + margin-top: 2.5em; + font-size: 24px; + color: #000; + padding-bottom: 2px; + border-bottom: 1px solid #919699; +} +h3 { + margin-top: 2em; + margin-bottom: .5em; + font-size: 17px; + color: #000; +} +h4 { + margin-top: 2em; + margin-bottom: .5em; + font-size: 15px; + color: #000; +} +h5 { + margin-top: 20px; + margin-bottom: .5em; + padding: 0; + font-size: 13px; + color: #000; +} + +h6 { + margin-top: 20px; + margin-bottom: .5em; + padding: 0; + font-size: 11px; + color: #000; +} + +p { + margin-top: 0px; + margin-bottom: 10px; +} + +/* lists */ + +ul { + list-style: square outside; + margin: 0 0 0 30px; + padding: 0 0 12px 6px; +} + +li { + margin-top: 7px; +} + +ol { + list-style-type: decimal; + list-style-position: outside; + margin: 0 0 0 30px; + padding: 0 0 12px 6px; +} + +ol ol { + list-style-type: lower-alpha; + list-style-position: outside; + margin: 7px 0 0 30px; + padding: 0 0 0 10px; + } + +ul ul { + margin-left: 40px; + padding: 0 0 0 6px; +} + +li>p { display: inline } +li>p+p { display: block } +li>a+p { display: block } + + +/* table */ + +table { + width: 100%; + border-top: 1px solid #919699; + border-left: 1px solid #919699; + border-spacing: 0; +} + +table th { + padding: 4px 8px 4px 8px; + background: #E2E2E2; + font-size: 12px; + border-bottom: 1px solid #919699; + border-right: 1px solid #919699; +} +table th p { + font-weight: bold; + margin-bottom: 0px; +} + +table td { + padding: 8px; + font-size: 12px; + vertical-align: top; + border-bottom: 1px solid #919699; + border-right: 1px solid #919699; +} +table td p { + margin-bottom: 0px; +} +table td p + p { + margin-top: 5px; +} +table td p + p + p { + margin-top: 5px; +} + +/* forms */ + +form { + margin: 0; +} + +button { + margin: 3px 0 10px 0; +} +input { + vertical-align: middle; + padding: 0; + margin: 0 0 5px 0; +} + +select { + vertical-align: middle; + padding: 0; + margin: 0 0 3px 0; +} + +textarea { + margin: 0 0 10px 0; + width: 100%; +} \ No newline at end of file diff --git a/doc/themes/markdown7.css b/doc/themes/markdown7.css new file mode 100644 index 00000000..85355624 --- /dev/null +++ b/doc/themes/markdown7.css @@ -0,0 +1,295 @@ +body { + font-family: Helvetica, arial, sans-serif; + font-size: 14px; + line-height: 1.6; + padding-top: 10px; + padding-bottom: 10px; + background-color: white; + padding: 30px; } + +body > *:first-child { + margin-top: 0 !important; } +body > *:last-child { + margin-bottom: 0 !important; } + +a { + color: #4183C4; } +a.absent { + color: #cc0000; } +a.anchor { + display: block; + padding-left: 30px; + margin-left: -30px; + cursor: pointer; + position: absolute; + top: 0; + left: 0; + bottom: 0; } + +h1, h2, h3, h4, h5, h6 { + margin: 20px 0 10px; + padding: 0; + font-weight: bold; + -webkit-font-smoothing: antialiased; + cursor: text; + position: relative; } + +h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { + text-decoration: none; } + +h1 tt, h1 code { + font-size: inherit; } + +h2 tt, h2 code { + font-size: inherit; } + +h3 tt, h3 code { + font-size: inherit; } + +h4 tt, h4 code { + font-size: inherit; } + +h5 tt, h5 code { + font-size: inherit; } + +h6 tt, h6 code { + font-size: inherit; } + +h1 { + font-size: 28px; + color: black; } + +h2 { + font-size: 24px; + border-bottom: 1px solid #cccccc; + color: black; } + +h3 { + font-size: 18px; } + +h4 { + font-size: 16px; } + +h5 { + font-size: 14px; } + +h6 { + color: #777777; + font-size: 14px; } + +p, blockquote, ul, ol, dl, li, table, pre { + margin: 15px 0; } + +hr { + border: 0 none; + color: #cccccc; + height: 4px; + padding: 0; +} + +body > h2:first-child { + margin-top: 0; + padding-top: 0; } +body > h1:first-child { + margin-top: 0; + padding-top: 0; } +body > h1:first-child + h2 { + margin-top: 0; + padding-top: 0; } +body > h3:first-child, body > h4:first-child, body > h5:first-child, body > h6:first-child { + margin-top: 0; + padding-top: 0; } + +a:first-child h1, a:first-child h2, a:first-child h3, a:first-child h4, a:first-child h5, a:first-child h6 { + margin-top: 0; + padding-top: 0; } + +h1 p, h2 p, h3 p, h4 p, h5 p, h6 p { + margin-top: 0; } + +li p.first { + display: inline-block; } +li { + margin: 0; } +ul, ol { + padding-left: 30px; } + +ul :first-child, ol :first-child { + margin-top: 0; } + +dl { + padding: 0; } +dl dt { + font-size: 14px; + font-weight: bold; + font-style: italic; + padding: 0; + margin: 15px 0 5px; } +dl dt:first-child { + padding: 0; } +dl dt > :first-child { + margin-top: 0; } +dl dt > :last-child { + margin-bottom: 0; } +dl dd { + margin: 0 0 15px; + padding: 0 15px; } +dl dd > :first-child { + margin-top: 0; } +dl dd > :last-child { + margin-bottom: 0; } + +blockquote { + border-left: 4px solid #dddddd; + padding: 0 15px; + color: #777777; } +blockquote > :first-child { + margin-top: 0; } +blockquote > :last-child { + margin-bottom: 0; } + +table { + padding: 0;border-collapse: collapse; } +table tr { + border-top: 1px solid #cccccc; + background-color: white; + margin: 0; + padding: 0; } +table tr:nth-child(2n) { + background-color: #f8f8f8; } +table tr th { + font-weight: bold; + border: 1px solid #cccccc; + margin: 0; + padding: 6px 13px; } +table tr td { + border: 1px solid #cccccc; + margin: 0; + padding: 6px 13px; } +table tr th :first-child, table tr td :first-child { + margin-top: 0; } +table tr th :last-child, table tr td :last-child { + margin-bottom: 0; } + +img { + max-width: 100%; } + +span.frame { + display: block; + overflow: hidden; } +span.frame > span { + border: 1px solid #dddddd; + display: block; + float: left; + overflow: hidden; + margin: 13px 0 0; + padding: 7px; + width: auto; } +span.frame span img { + display: block; + float: left; } +span.frame span span { + clear: both; + color: #333333; + display: block; + padding: 5px 0 0; } +span.align-center { + display: block; + overflow: hidden; + clear: both; } +span.align-center > span { + display: block; + overflow: hidden; + margin: 13px auto 0; + text-align: center; } +span.align-center span img { + margin: 0 auto; + text-align: center; } +span.align-right { + display: block; + overflow: hidden; + clear: both; } +span.align-right > span { + display: block; + overflow: hidden; + margin: 13px 0 0; + text-align: right; } +span.align-right span img { + margin: 0; + text-align: right; } +span.float-left { + display: block; + margin-right: 13px; + overflow: hidden; + float: left; } +span.float-left span { + margin: 13px 0 0; } +span.float-right { + display: block; + margin-left: 13px; + overflow: hidden; + float: right; } +span.float-right > span { + display: block; + overflow: hidden; + margin: 13px auto 0; + text-align: right; } + +code, tt { + margin: 0 2px; + padding: 0 5px; + white-space: nowrap; + border: 1px solid #eaeaea; + background-color: #f8f8f8; + border-radius: 3px; } + +pre code { + margin: 0; + padding: 0; + white-space: pre; + border: none; + background: transparent; } + +.highlight pre { + background-color: #f8f8f8; + border: 1px solid #cccccc; + font-size: 13px; + line-height: 19px; + overflow: auto; + padding: 6px 10px; + border-radius: 3px; } + +pre { + background-color: #f8f8f8; + border: 1px solid #cccccc; + font-size: 13px; + line-height: 19px; + overflow: auto; + padding: 6px 10px; + border-radius: 3px; } +pre code, pre tt { + background-color: transparent; + border: none; } + +sup { + font-size: 0.83em; + vertical-align: super; + line-height: 0; +} +* { + -webkit-print-color-adjust: exact; +} +@media screen and (min-width: 914px) { + body { + width: 854px; + margin:0 auto; + } +} +@media print { + table, pre { + page-break-inside: avoid; + } + pre { + word-wrap: break-word; + } +} diff --git a/doc/themes/markdown8.css b/doc/themes/markdown8.css new file mode 100644 index 00000000..90c1820c --- /dev/null +++ b/doc/themes/markdown8.css @@ -0,0 +1,136 @@ +h1, h2, h3, h4, h5, h6, p, blockquote { + margin: 0; + padding: 0; +} +body { + font-family: "Helvetica Neue", Helvetica, "Hiragino Sans GB", Arial, sans-serif; + font-size: 13px; + line-height: 18px; + color: #737373; + background-color: white; + margin: 10px 13px 10px 13px; +} +table { + margin: 10px 0 15px 0; + border-collapse: collapse; +} +td,th { + border: 1px solid #ddd; + padding: 3px 10px; +} +th { + padding: 5px 10px; +} + +a { + color: #0069d6; +} +a:hover { + color: #0050a3; + text-decoration: none; +} +a img { + border: none; +} +p { + margin-bottom: 9px; +} + +h1, h2, h3, h4, h5, h6 { + color: #404040; + line-height: 36px; +} +h1 { + margin-bottom: 18px; + font-size: 30px; +} +h2 { + font-size: 24px; +} +h3 { + font-size: 18px; +} +h4 { + font-size: 16px; +} +h5 { + font-size: 14px; +} +h6 { + font-size: 13px; +} +hr { + margin: 0 0 19px; + border: 0; + border-bottom: 1px solid #ccc; +} +blockquote { + padding: 13px 13px 21px 15px; + margin-bottom: 18px; + font-family:georgia,serif; + font-style: italic; +} +blockquote:before { + content:"\201C"; + font-size:40px; + margin-left:-10px; + font-family:georgia,serif; + color:#eee; +} +blockquote p { + font-size: 14px; + font-weight: 300; + line-height: 18px; + margin-bottom: 0; + font-style: italic; +} +code, pre { + font-family: Monaco, Andale Mono, Courier New, monospace; +} +code { + background-color: #fee9cc; + color: rgba(0, 0, 0, 0.75); + padding: 1px 3px; + font-size: 12px; + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + border-radius: 3px; +} +pre { + display: block; + padding: 14px; + margin: 0 0 18px; + line-height: 16px; + font-size: 11px; + border: 1px solid #d9d9d9; + white-space: pre-wrap; + word-wrap: break-word; +} +pre code { + background-color: #fff; + color:#737373; + font-size: 11px; + padding: 0; +} +sup { + font-size: 0.83em; + vertical-align: super; + line-height: 0; +} +* { + -webkit-print-color-adjust: exact; +} +@media screen and (min-width: 914px) { + body { + width: 854px; + margin:10px auto; + } +} +@media print { + body,code,pre code,h1,h2,h3,h4,h5,h6 { + color: black; + } + table, pre { + page-break-inside: avoid; + } +} diff --git a/doc/themes/markdown9.css b/doc/themes/markdown9.css new file mode 100644 index 00000000..42d55eb7 --- /dev/null +++ b/doc/themes/markdown9.css @@ -0,0 +1,138 @@ +h1, h2, h3, h4, h5, h6, p, blockquote { + margin: 0; + padding: 0; +} +body { + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + font-size: 13px; + line-height: 18px; + color: #fff; + background-color: #110F14; + margin: 10px 13px 10px 13px; +} +table { + margin: 10px 0 15px 0; + border-collapse: collapse; +} +td,th { + border: 1px solid #ddd; + padding: 3px 10px; +} +th { + padding: 5px 10px; +} +a { + color: #59acf3; +} +a:hover { + color: #a7d8ff; + text-decoration: none; +} +a img { + border: none; +} +p { + margin-bottom: 9px; +} +h1, h2, h3, h4, h5, h6 { + color: #fff; + line-height: 36px; +} +h1 { + margin-bottom: 18px; + font-size: 30px; +} +h2 { + font-size: 24px; +} +h3 { + font-size: 18px; +} +h4 { + font-size: 16px; +} +h5 { + font-size: 14px; +} +h6 { + font-size: 13px; +} +hr { + margin: 0 0 19px; + border: 0; + border-bottom: 1px solid #ccc; +} +blockquote { + padding: 13px 13px 21px 15px; + margin-bottom: 18px; + font-family:georgia,serif; + font-style: italic; +} +blockquote:before { + content:"\201C"; + font-size:40px; + margin-left:-10px; + font-family:georgia,serif; + color:#eee; +} +blockquote p { + font-size: 14px; + font-weight: 300; + line-height: 18px; + margin-bottom: 0; + font-style: italic; +} + +code, pre { + font-family: Menlo, Monaco, Andale Mono, Courier New, monospace; +} + +code { + padding: 1px 3px; + font-size: 12px; + -webkit-border-radius: 3px; + -moz-border-radius: 3px; + border-radius: 3px; + background: #334; +} + +pre { + display: block; + padding: 14px; + margin: 0 0 18px; + line-height: 16px; + font-size: 11px; + border: 1px solid #334; + white-space: pre; + white-space: pre-wrap; + word-wrap: break-word; + background-color: #282a36; + border-radius: 6px; +} +pre code { + font-size: 11px; + padding: 0; + background: transparent; +} +sup { + font-size: 0.83em; + vertical-align: super; + line-height: 0; +} +* { + -webkit-print-color-adjust: exact; +} +@media screen and (min-width: 914px) { + body { + width: 854px; + margin:10px auto; + } +} +@media print { + body,code,pre code,h1,h2,h3,h4,h5,h6 { + color: black; + } + table, pre { + page-break-inside: avoid; + } +} diff --git a/doc/themes/markedapp-byword.css b/doc/themes/markedapp-byword.css new file mode 100644 index 00000000..643aa9ab --- /dev/null +++ b/doc/themes/markedapp-byword.css @@ -0,0 +1,314 @@ +/* + * This document has been created with Marked.app . + * Copyright 2011 Brett Terpstra + * --------------------------------------------------------------------------- + * Please leave this notice in place, along with any additional credits below. + * + * Byword.css theme is based on Byword.app + * Authors: @brunodecarvalho, @jpedroso, @rcabaco + * Copyright 2011 Metaclassy, Lda. + */ + +html { + font-size: 62.5%; /* base font-size: 10px */ +} + +body { + background-color: #f2f2f2; + color: #3c3c3c; + + /* Change font size below */ + font-size: 1.7em; + line-height: 1.4em; + + /* Change font below */ + + /* Sans-serif fonts */ + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + -webkit-font-smoothing: antialiased; + + /* Serif fonts */ + /* + font-family: "Cochin", "Baskerville", "Georgia", serif; + -webkit-font-smoothing: subpixel-antialiased; + */ + + /* Monospaced fonts */ + /* + font-family: "Courier New", Menlo, Monaco, mono; + -webkit-font-smoothing: antialiased; + */ + + margin: auto; + max-width: 42em; +} +a { + color: #308bd8; + text-decoration:none; +} +a:hover { + text-decoration: underline; +} +/* headings */ +h1, h2 { + line-height:1.2em; + margin-top:32px; + margin-bottom:12px; +} +h1:first-child { + margin-top:0; +} +h3, h4, h5, h6 { + margin-top:12px; + margin-bottom:0; +} +h5, h6 { + font-size:0.9em; + line-height:1.0em; +} +/* end of headings */ +p { + margin:0 0 24px 0; +} +p:last-child { + margin:0; +} +#wrapper hr { + width: 100%; + margin: 3em auto; + border: 0; + color: #eee; + background-color: #ccc; + height: 1px; + -webkit-box-shadow:0px 1px 0px rgba(255, 255, 255, 0.75); +} +/* lists */ +ol { + list-style: outside decimal; +} +ul { + list-style: outside disc; +} +ol, ul { + padding-left:0; + margin-bottom:24px; +} +ol li { + margin-left:28px; +} +ul li { + margin-bottom:8px; + margin-left:16px; +} +ol:last-child, ul:last-child { + margin:0; +} +li > ol, li > ul { + padding-left:12px; +} +dl { + margin-bottom:24px; +} +dl dt { + font-weight:bold; + margin-bottom:8px; +} +dl dd { + margin-left:0; + margin-bottom:12px; +} +dl dd:last-child, dl:last-child { + margin-bottom:0; +} +/* end of lists */ +pre { + white-space: pre-wrap; + width: 96%; + margin-bottom: 24px; + overflow: hidden; + padding: 3px 10px; + -webkit-border-radius: 3px; + background-color: #eee; + border: 1px solid #ddd; +} +code { + white-space: nowrap; + font-size: 1.1em; + padding: 2px; + -webkit-border-radius: 3px; + background-color: #eee; + border: 1px solid #ddd; +} +pre code { + white-space: pre-wrap; + border: none; + padding: 0; + background-color: transparent; + -webkit-border-radius: 0; +} +blockquote { + margin-left: 0; + margin-right: 0; + width: 96%; + padding: 0 10px; + border-left: 3px solid #ddd; + color: #777; +} +table { + margin-left: auto; + margin-right: auto; + margin-bottom: 24px; + border-bottom: 1px solid #ddd; + border-right: 1px solid #ddd; + border-spacing: 0; +} +table th { + padding: 3px 10px; + background-color: #eee; + border-top: 1px solid #ddd; + border-left: 1px solid #ddd; +} +table tr { +} +table td { + padding: 3px 10px; + border-top: 1px solid #ddd; + border-left: 1px solid #ddd; +} +caption { + font-size: 1.2em; + font-weight: bold; + margin-bottom: 5px; +} +figure { + display: block; + text-align: center; +} +#wrapper img { + border: none; + display: block; + margin: 1em auto; + max-width: 100%; +} +figcaption { + font-size: 0.8em; + font-style: italic; +} +mark { + background: #fefec0; + padding:1px 3px; +} + + +/* classes */ + +.markdowncitation { +} +.footnote { + font-size: 0.8em; + vertical-align: super; +} +.footnotes ol { + font-weight: bold; +} +.footnotes ol li p { + font-weight: normal; +} + +/* custom formatting classes */ + +.shadow { + -webkit-box-shadow: 0 2px 4px #999; +} + +.source { + text-align: center; + font-size: 0.8em; + color: #777; + margin: -40px; +} + +@media screen { + .inverted, .inverted #wrapper { + background-color: #1a1a1a !important; + color: #bebebe !important; + + /* SANS-SERIF */ + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif !important; + -webkit-font-smoothing: antialiased !important; + + /* SERIF */ + /* + font-family: "Cochin", "Baskerville", "Georgia", serif !important; + -webkit-font-smoothing: subpixel-antialiased !important; + */ + /* MONO */ + /* + font-family: "Courier", mono !important; + -webkit-font-smoothing: antialiased !important; + */ + } + .inverted a { + color: #308bd8 !important; + } + .inverted hr { + color: #666 !important; + border: 0; + background-color: #666 !important; + -webkit-box-shadow: none !important; + } + .inverted pre { + background-color: #222 !important; + border-color: #3c3c3c !important; + } + .inverted code { + background-color: #222 !important; + border-color: #3c3c3c !important; + } + .inverted blockquote { + border-color: #333 !important; + color: #999 !important; + } + .inverted table { + border-color: #3c3c3c !important; + } + .inverted table th { + background-color: #222 !important; + border-color: #3c3c3c !important; + } + .inverted table td { + border-color: #3c3c3c !important; + } + .inverted mark { + background: #bc990b !important; + color:#000 !important; + } + .inverted .shadow { -webkit-box-shadow: 0 2px 4px #000 !important; } + #wrapper { + background: transparent; + margin: 40px; + } +} + +/* Printing support */ +@media print { + body { + overflow: auto; + } + img, pre, blockquote, table, figure { + page-break-inside: avoid; + } + pre, code { + border: none !important; + } + #wrapper { + background: #fff; + position: relative; + text-indent: 0px; + padding: 10px; + font-size:85%; + } + .footnotes { + page-break-before: always; + } +} diff --git a/doc/themes/new-modern.css b/doc/themes/new-modern.css new file mode 100644 index 00000000..071d0d47 --- /dev/null +++ b/doc/themes/new-modern.css @@ -0,0 +1,482 @@ +/* Title: New Modern */ +/* Author: Jocelyn Richard http://jocelynrichard.com/ */ +/* Description: Baseline style, meant to be used on its own or to serve as development basis. */ + +/* ================================================ */ +/* 1. Reset */ +/* 2. Skeleton */ +/* 3. Media Queries */ +/* 4. Print Styles */ +/* ================================================ */ + + + +/* ================================================ */ +/* 1. Reset */ +/* ================================================ */ + +html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, +b, u, i, center, dl, dt, dd, ol, ul, li, fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, aside, canvas, details, embed, figure, figcaption, footer, header, hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video {margin: 0; padding: 0; border: 0;} /* Edited from http://www.cssreset.com/scripts/eric-meyer-reset-css/ */ + +article, aside, details, figcaption, figure, footer, header, hgroup, nav, section, summary {display: block;} /* Semantic tags definition for IE 6/7/8/9 and Firefox 3 */ + +html {-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;} /* Prevents iOS text size adjust after orientation change, without disabling user zoom */ + + + +/* ================================================ */ +/* 2. Skeleton */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 14px; +} + +body { + font-family: 'Open Sans', sans-serif; + /*margin: 3.42rem 1.71rem !important;*/ /* Get margins even if the Markdown rendering app doesn't include any */ + background-color: white; + color: #222; +} + +#wrapper { /* #wrapper: ID added by Marked */ + max-width: 42rem; + margin: 0 auto; + margin-left: auto !important; /* Countering toc.css added by Marked */ + padding: 1.71rem 0 !important; /* Countering toc.css added by Marked */ +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-bottom: 1.6rem; +} + +h1, +h2 { + margin-top: 3.2rem; +} + +h1 { + font-size: 2.82rem; /* 42.3px @15px */ + line-height: 3.2rem; /* 48px @15px */ +} + +h2 { + font-size: 1.99rem; /* 29.9px @15px */ + line-height: 2.4rem; /* 36px @15px */ +} + +h3 { + font-size: 1.41rem; /* 21.2px @15px */ + line-height: 2rem; /* 30px @15px */ +} + +h4 { + font-size: 1rem; /* 15px @15px */ + line-height: 1.6rem; /* 24px @15px */ +} + +h5, h6 { + font-size: 0.8rem; + line-height: 1.2rem; + text-transform: uppercase; +} + +h6 { + margin-left: 1.6rem; +} + +p, +ol, +ul, +blockquote { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +ul ul, +ul ol, +ol ul, +ol ol { + margin-left: 1.6rem; + margin-top: 1.6rem; +} + +#generated-toc ul ul, /* #generated-toc: added by Marked for its table of contents */ +#generated-toc ul ol, +#generated-toc ol ul, +#generated-toc ol ol { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; +} + +blockquote { + margin: 0 0 1.6rem 2.4rem; + padding-left: 0.8rem; /* Voire */ + border-left: 4px solid rgba(0,0,0,0.08); + font-style: normal; +} + +blockquote ul { + margin-left: 0.8rem; /* Pour ne pas que les hanging bullets mordent sur le blockquote */ +} + +ol li blockquote, /* So that blockquote work in lists */ +ul li blockquote { + margin-left: 0; +} + +a:link { + text-decoration: none; + color: #165bd4; + border-bottom: 1px solid #ccc; +} + +a:visited { + color: #7697cf; + border-bottom: 1px solid #ccc; +} + +a:hover { + border-color: #165bd4; +} + +a:active { + background-color: #e6e6e6; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + font-size: 0.85rem; + margin: 0 0 1.6rem 0; + border-collapse: collapse; + border: 1px solid #ccc; +} + +th, +td { + padding: 0.5rem 0.75rem; + max-width: 20rem; /* Avoid dropping lines for nothing without having ridiculously wide tables */ +} + +th { + border-bottom: 2px solid #222; +} + +tr { + border-bottom: 1px solid #ccc; +} + +tbody tr:nth-child(odd) { + background-color: #f9f9f9; +} + +table code { + font-size: 85%; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +img { + max-width: 100% +} + +caption, +figcaption { + font-size: 0.85rem; + line-height: 1.6rem; + margin: 0 1.6rem; + text-align: left; +} + +figcaption { + margin-bottom: 1.6rem; +} + +h1, /* White-space mentions in order to force wrapping */ +h2, +a:link, +pre { + white-space: pre; /* CSS 2.0 */ + white-space: pre-wrap; /* CSS 2.1 */ + white-space: pre-line; /* CSS 3.0 */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: -moz-pre-wrap; /* Mozilla */ + white-space: -hp-pre-wrap; /* HP Printers */ + word-wrap: break-word; /* IE 5+ */ +} + +code { + font-family: "Menlo", "Courier New", "Courier", monospace; + font-size: 85%; + color: #666; + background-color: rgba(0,0,0,0.08); + padding: 2px 4px; + border-radius: 2px; +} + +pre { + background-color: rgba(0,0,0,0.08); + border-radius: 8px; + padding: 0.4rem; + margin-bottom: 1.6rem; +} + +pre code { /* Counter the code mentions */ + background-color: transparent; + padding: 0; +} + +sup, +sub, +a.footnote { /* Keep line-height from being affected by sub, cf https://gist.github.com/unruthless/413930 */ + font-size: 75%; + height: 0; + line-height: 1; + position: relative; +} + +sup, +a.footnote { + vertical-align:super; +} + +sub { + vertical-align: sub; +} + +dt { + font-weight: 600; +} + +dd { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +hr { + clear: none; + height: 0.2rem; + border: none; + margin: 0 auto 1.4rem auto; /* 2.4rem auto 2.2rem auto; */ + width: 100%; + color: #ccc; + background-color: #ccc; +} + +::selection { + background-color: #f8dc77; +} + +::-moz-selection { + background-color: #f8dc77; +} + +a:focus { + outline: 2px solid; + outline-color: #165bd4; +} + +/* ------------------------------------------------- */ +/* Animations */ +/* ------------------------------------------------- */ + +a:hover { + -moz-transition: all 0.2s ease-in-out; + -webkit-transition: all 0.2s ease-in-out; +} + +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote { + -moz-transition: all 0.2s ease; + -webkit-transition: all 0.2s ease; +} + + + +/* ================================================ */ +/* 3. Media Queries */ +/* ================================================ */ + +/* Base styles are for smartphones; elements are then tweaked as the viewport grows. */ + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 15px; + } + + body { + /*margin: 4.8em 2.4rem 3.2rem 2.4rem !important;*/ + } + + h1 { + font-size: 3.57rem; /* 53.2px @15px */ + line-height: 4rem; /* 60px @15px */ + } + + h2 { + font-size: 2.24rem; /* 33.6px @15px */ + line-height: 2.8rem; /* 42px @15px */ + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} + + + +/* ================================================ */ +/* 4. Print Styles */ +/* ================================================ */ + +/* Inconsistent and buggy across browsers */ + +@media print { + + * { + background: transparent !important; + color: #000 !important; /* Black text prints faster and browsers are inconsistent in color reproduction anyway: h5bp.com/s */ + } + + @page { + margin: 1cm; /* Added to any #wrapper margin*/ + } + + html { + font-size: 15px; + } + + body { + margin: 1rem !important; /* Security margins for browser without @page support */ + } + + #wrapper { + max-width: none; + } + + h1, + h2, + h3, + h4, + h5, + h6, + p { + orphans: 3; + widows: 3; + page-break-after: avoid; + } + + ul, + ol { + list-style-position: inside !important; + padding-right: 0 !important; + margin-left: 0 !important; + } + + ul ul, + ul ol, + ol ul, + ol ol, + ul p:not(:first-child), + ol p:not(:first-child) { + margin-left: 2rem !important; + } + + a:link, + a:visited { + text-decoration: underline !important; + font-weight: normal !important; + } + + a[href]:after { + content: " (" attr(href) ")"; + } + + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; /* Do not show javascript and internal links */ + } + + a[href^="#"] { + text-decoration: none !important; + } + + th { + background-color: rgba(0,0,0,0.2) !important; + border-bottom: none !important; + } + + tr { + page-break-inside: avoid; + } + + tbody tr:nth-child(even) { + background-color: rgba(0,0,0,0.1) !important; + } + + pre { + border: 1px solid rgba(0,0,0,0.2); + page-break-inside: avoid; + } + + img { + max-width: 100% !important; + page-break-inside: avoid; + } + + /* #generated-toc: added by Marked for its table of contents */ + + #wrapper #generated-toc ul, /* Table of contents printing in Marked */ + #wrapper #generated-toc ol { + list-style-type: decimal; + } + + #wrapper #generated-toc ul li, + #wrapper #generated-toc ol li { + margin: 1rem 0; + } + +} diff --git a/doc/themes/radar.css b/doc/themes/radar.css new file mode 100644 index 00000000..be0e86c9 --- /dev/null +++ b/doc/themes/radar.css @@ -0,0 +1,355 @@ + +body { + margin: 0px; + + font-family: 'PT Sans', Helvetica, 'Helvetica Neuve', Arial, Tahoma, sans-serif; + font-size: 17px; + + color: #333; +} + +h1, h2, h3, h4, h5, h6 { + color:#222; + margin:0 0 20px; +} + +p, ul, ol, table, pre, dl { + margin:0 0 20px; +} + +h1, h2, h3 { + line-height:1.1; +} + +h1 { + font-size:28px; +} + +h2 { + color:#393939; +} + +h3, h4, h5, h6 { + color:#494949; +} + +a { + color:#39c; + font-weight:400; + text-decoration:none; +} + +a small { + font-size:11px; + color:#777; + margin-top:-0.6em; + display:block; +} + +.wrapper { + width:860px; + margin:0 auto; +} + +blockquote { + border-left:1px solid #e5e5e5; + margin:0; + padding:0 0 0 20px; + font-style:italic; +} + +code, pre { + color:#333; + font-size:12px; +} + +pre { + padding:8px 15px; + background: #f8f8f8; + border-radius:5px; + border:1px solid #e5e5e5; + overflow-x: auto; +} + +table { + width:100%; + border-collapse:collapse; +} + +th, td { + text-align:left; + padding:5px 10px; + border-bottom:1px solid #e5e5e5; +} + +dt { + color:#444; + font-weight:700; +} + +th { + color:#444; +} + +img { + max-width:100%; +} + +header { + width:270px; + float:left; + position:fixed; +} + +header ul { + list-style:none; + height:40px; + + padding:0; + + background: #eee; + background: -moz-linear-gradient(top, #f8f8f8 0%, #dddddd 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#f8f8f8), color-stop(100%,#dddddd)); + background: -webkit-linear-gradient(top, #f8f8f8 0%,#dddddd 100%); + background: -o-linear-gradient(top, #f8f8f8 0%,#dddddd 100%); + background: -ms-linear-gradient(top, #f8f8f8 0%,#dddddd 100%); + background: linear-gradient(top, #f8f8f8 0%,#dddddd 100%); + + border-radius:5px; + border:1px solid #d2d2d2; + box-shadow:inset #fff 0 1px 0, inset rgba(0,0,0,0.03) 0 -1px 0; + width:270px; +} + +header li { + width:89px; + float:left; + border-right:1px solid #d2d2d2; + height:40px; +} + +header ul a { + line-height:1; + font-size:11px; + color:#999; + display:block; + text-align:center; + padding-top:6px; + height:40px; +} + +strong { + color:#222; + font-weight:700; +} + +header ul li + li { + width:88px; + border-left:1px solid #fff; +} + +header ul li + li + li { + border-right:none; + width:89px; +} + +header ul a strong { + font-size:14px; + display:block; + color:#222; +} + +section { + width:500px; + float:right; + padding-bottom:50px; +} + +small { + font-size:11px; +} + +hr { + border:0; + background:#e5e5e5; + height:1px; + margin:0 0 20px; +} + +footer { + width:270px; + float:left; + position:fixed; + bottom:50px; +} + +@media print, screen and (max-width: 960px) { + + div.wrapper { + width:auto; + margin:0; + } + + header, section, footer { + float:none; + position:static; + width:auto; + } + + header { + padding-right:320px; + } + + section { + border:1px solid #e5e5e5; + border-width:1px 0; + padding:20px 0; + margin:0 0 20px; + } + + header a small { + display:inline; + } + + header ul { + position:absolute; + right:50px; + top:52px; + } +} + +@media print, screen and (max-width: 720px) { + body { + word-wrap:break-word; + } + + header { + padding:0; + } + + header ul, header p.view { + position:static; + } + + pre, code { + word-wrap:normal; + } +} + +@media print, screen and (max-width: 480px) { + body { + padding:15px; + } + + header ul { + display:none; + } +} + +@media print { + body { + padding:0.4in; + font-size:12pt; + color:#444; + } +} + + +#wrapper { + padding: 1em; +} + +.ca-menu { + list-style: none; + padding: 0; + margin: 20px auto; +} + +#toc { + top: 0; + right: 0; + bottom: 0; + left: auto; + width: 20%; + background-color: #fff; + padding: 20px; + position: fixed; + z-index: 1; + display: none; + height: 100%; +} + + +#toc::before { + content: ""; + position: absolute; + + top: 15%; + bottom: 15%; + left: -1px; + width: 1px; + background: -webkit-gradient(linear, 50% 0%, 50% 100%, color-stop(0%, rgba(227,224,216,0)), color-stop(20%, #e3e0d8), color-stop(80%, #e3e0d8), color-stop(100%, rgba(227,224,216,0))); + background: -webkit-linear-gradient(top, rgba(227,224,216,0) 0%,#e3e0d8 20%,#e3e0d8 80%,rgba(227,224,216,0) 100%); + background: -moz-linear-gradient(top, rgba(227,224,216,0) 0%,#e3e0d8 20%,#e3e0d8 80%,rgba(227,224,216,0) 100%); + background: -o-linear-gradient(top, rgba(227,224,216,0) 0%,#e3e0d8 20%,#e3e0d8 80%,rgba(227,224,216,0) 100%); + background: linear-gradient(top, rgba(227,224,216,0) 0%,#e3e0d8 20%,#e3e0d8 80%,rgba(227,224,216,0) 100%); +} + +#toc-inner { + display: table-cell; + vertical-align: middle; +} + +.nav-list { + height: 50%; + margin: auto 0; +} + +div.clear { + clear: both; +} + +h1 { font-size: 2.5em; line-height: 1; } +h2 { font-size: 2em; line-height: 1; } +h3 { font-size: 1.5em; line-height: 1; } +h4 { font-size: 1.2em; line-height: 1.25; } +h5 { font-size: 1em; line-height: 1; font-weight: bold; } +h6 { font-size: 1em; line-height: 1; font-weight: bold; } + +h1, h2, h3, h4, h5, h6 { font-weight: normal; margin-top: 1em; margin-bottom: 0.5em; } +h1, h2 { margin-bottom: 0.5em; } + +.post p { + max-width: 580px; +} + +ul.list, ol.list { + padding-left: 3.333em; + max-width: 580px; +} + +.post h2 { + border-bottom: 1px solid #EDEDED; +} + +h1:nth-child(1), +h2:nth-child(1), +h3:nth-child(1), +h4:nth-child(1), +h5:nth-child(1), +h6:nth-child(1) { + margin-top: 0; +} + +@media (min-width: 43.75em) { + #wrapper { + width: 650px; + padding: 20px 50px; + } +} + +@media (min-width: 62em) { + #toc { + display: table; + } +} diff --git a/doc/themes/screen.css b/doc/themes/screen.css new file mode 100644 index 00000000..89debb6c --- /dev/null +++ b/doc/themes/screen.css @@ -0,0 +1,77 @@ +html { font-size: 62.5%; } +html, body { height: 100%; } + +body { + font-family: Helvetica, Arial, sans-serif; + font-size: 150%; + line-height: 1.3; + color: #f6e6cc; + width: 700px; + margin: auto; + background: #27221a; + position: relative; + padding: 0 30px; +} + +p,ul,ol,dl,table,pre { margin-bottom: 1em; } +ul { margin-left: 20px; } +a { text-decoration: none; cursor: pointer; color: #ba832c; font-weight: bold; } +a:focus { outline: 1px dotted; } +a:visited { } +a:hover, a:focus { color: #d3a459; text-decoration: none; } +a *, button * { cursor: pointer; } +hr { display: none; } +small { font-size: 90%; } +input, select, button, textarea, option { font-family: Arial, "Lucida Grande", "Lucida Sans Unicode", Arial, Verdana, sans-serif; font-size: 100%; } +button, label, select, option, input[type=submit] { cursor: pointer; } +.group:after { content: "."; display: block; height: 0; clear: both; visibility: hidden; } .group {display: inline-block;} +/* Hides from IE-mac \*/ * html .group {height: 1%;} .group {display: block;} /* End hide from IE-mac */ +sup { font-size: 80%; line-height: 1; vertical-align: super; } +button::-moz-focus-inner { border: 0; padding: 1px; } +span.amp { font-family: Baskerville, "Goudy Old Style", "Palatino", "Book Antiqua", serif; font-weight: normal; font-style: italic; font-size: 1.2em; line-height: 0.8; } + +h1,h2,h3,h4,h5,h6 { + line-height: 1.1; + font-family: Baskerville, "Goudy Old Style", "Palatino", "Book Antiqua", serif; +} + +h2 { font-size: 22pt; } +h3 { font-size: 20pt; } +h4 { font-size: 18pt; } +h5 { font-size: 16pt; } +h6 { font-size: 14pt; } + +::selection { background: #745626; } +::-moz-selection { background: #745626; } + +h1 { + font-size: 420%; + margin: 0 0 0.1em; + font-family: Baskerville, "Goudy Old Style", "Palatino", "Book Antiqua", serif; +} + +h1 a, +h1 a:hover { + color: #d7af72; + font-weight: normal; + text-decoration: none; +} + +pre { + background: rgba(0,0,0,0.3); + color: #fff; + padding: 8px 10px; + border-radius: 0.4em; + -moz-border-radius: 0.4em; + -webkit-border-radius: 0.4em; + overflow-x: hidden; +} + +pre code { + font-size: 10pt; +} + +.thumb { + float:left; + margin: 10px; +} diff --git a/doc/themes/solarized-dark.css b/doc/themes/solarized-dark.css new file mode 100644 index 00000000..d6ca5c71 --- /dev/null +++ b/doc/themes/solarized-dark.css @@ -0,0 +1,294 @@ +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +nav, +section, +summary { + display: block; +} +audio, +canvas, +video { + display: inline-block; +} +audio:not([controls]) { + display: none; + height: 0; +} +[hidden] { + display: none; +} +html { + font-family: sans-serif; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; +} +body { + margin: 0; +} +a:focus { + outline: thin dotted; +} +a:active, +a:hover { + outline: 0; +} +h1 { + font-size: 2em; +} +abbr[title] { + border-bottom: 1px dotted; +} +b, +strong { + font-weight: bold; +} +dfn { + font-style: italic; +} +mark { + background: #ff0; + color: #000; +} +code, +kbd, +pre, +samp { + font-family: monospace, serif; + font-size: 1em; +} +pre { + white-space: pre-wrap; + word-wrap: break-word; +} +q { + quotes: "\201C" "\201D" "\2018" "\2019"; +} +small { + font-size: 80%; +} +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} +sup { + top: -0.5em; +} +sub { + bottom: -0.25em; +} +img { + border: 0; +} +svg:not(:root) { + overflow: hidden; +} +figure { + margin: 0; +} +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: 0.35em 0.625em 0.75em; +} +legend { + border: 0; + padding: 0; +} +button, +input, +select, +textarea { + font-family: inherit; + font-size: 100%; + margin: 0; +} +button, +input { + line-height: normal; +} +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; +} +button[disabled], +input[disabled] { + cursor: default; +} +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0; +} +input[type="search"] { + -webkit-appearance: textfield; + -moz-box-sizing: content-box; + -webkit-box-sizing: content-box; + box-sizing: content-box; +} +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; +} +textarea { + overflow: auto; + vertical-align: top; +} +table { + border-collapse: collapse; + border-spacing: 0; +} + +html { + font-family: 'PT Sans', sans-serif; +} +pre, +code { + font-family: 'Inconsolata', sans-serif; +} +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: 'PT Sans Narrow', sans-serif; + font-weight: 700; +} +html { + background-color: #002b36; + color: #839496; + margin: 1em; +} +code { + /*background-color: #073642;*/ + padding: 2px; +} +a { + color: #b58900; +} +a:visited { + color: #cb4b16; +} +a:hover { + color: #cb4b16; +} +h1 { + color: #d33682; +} +h2, +h3, +h4, +h5, +h6 { + color: #859900; +} +pre { + /*background-color: #002b36;*/ + color: #839496; + border: 1pt solid #586e75; + padding: 1em; + box-shadow: 5pt 5pt 8pt #073642; +} +pre code { + /*background-color: #002b36;*/ +} +h1 { + font-size: 2.8em; +} +h2 { + font-size: 2.4em; +} +h3 { + font-size: 1.8em; +} +h4 { + font-size: 1.4em; +} +h5 { + font-size: 1.3em; +} +h6 { + font-size: 1.15em; +} +.tag { + /*background-color: #073642;*/ + color: #d33682; + padding: 0 0.2em; +} +.todo, +.next, +.done { + color: #002b36; + background-color: #dc322f; + padding: 0 0.2em; +} +.tag { + -webkit-border-radius: 0.35em; + -moz-border-radius: 0.35em; + border-radius: 0.35em; +} +.TODO { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #2aa198; +} +.NEXT { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #268bd2; +} +.ACTIVE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #268bd2; +} +.DONE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #859900; +} +.WAITING { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #cb4b16; +} +.HOLD { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #d33682; +} +.NOTE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #d33682; +} +.CANCELLED { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #859900; +} diff --git a/doc/themes/solarized-light.css b/doc/themes/solarized-light.css new file mode 100644 index 00000000..87f26725 --- /dev/null +++ b/doc/themes/solarized-light.css @@ -0,0 +1,294 @@ +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +nav, +section, +summary { + display: block; +} +audio, +canvas, +video { + display: inline-block; +} +audio:not([controls]) { + display: none; + height: 0; +} +[hidden] { + display: none; +} +html { + font-family: sans-serif; + -webkit-text-size-adjust: 100%; + -ms-text-size-adjust: 100%; +} +body { + margin: 0; +} +a:focus { + outline: thin dotted; +} +a:active, +a:hover { + outline: 0; +} +h1 { + font-size: 2em; +} +abbr[title] { + border-bottom: 1px dotted; +} +b, +strong { + font-weight: bold; +} +dfn { + font-style: italic; +} +mark { + background: #ff0; + color: #000; +} +code, +kbd, +pre, +samp { + font-family: monospace, serif; + font-size: 1em; +} +pre { + white-space: pre-wrap; + word-wrap: break-word; +} +q { + quotes: "\201C" "\201D" "\2018" "\2019"; +} +small { + font-size: 80%; +} +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} +sup { + top: -0.5em; +} +sub { + bottom: -0.25em; +} +img { + border: 0; +} +svg:not(:root) { + overflow: hidden; +} +figure { + margin: 0; +} +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: 0.35em 0.625em 0.75em; +} +legend { + border: 0; + padding: 0; +} +button, +input, +select, +textarea { + font-family: inherit; + font-size: 100%; + margin: 0; +} +button, +input { + line-height: normal; +} +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; +} +button[disabled], +input[disabled] { + cursor: default; +} +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0; +} +input[type="search"] { + -webkit-appearance: textfield; + -moz-box-sizing: content-box; + -webkit-box-sizing: content-box; + box-sizing: content-box; +} +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; +} +textarea { + overflow: auto; + vertical-align: top; +} +table { + border-collapse: collapse; + border-spacing: 0; +} + +html { + font-family: 'PT Sans', sans-serif; +} +pre, +code { + font-family: 'Inconsolata', sans-serif; +} +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: 'PT Sans Narrow', sans-serif; + font-weight: 700; +} +html { + background-color: #fdf6e3; + color: #657b83; + margin: 1em; +} +code { + background-color: #eee8d5; + padding: 2px; +} +a { + color: #b58900; +} +a:visited { + color: #cb4b16; +} +a:hover { + color: #cb4b16; +} +h1 { + color: #d33682; +} +h2, +h3, +h4, +h5, +h6 { + color: #859900; +} +pre { + background-color: #fdf6e3; + color: #657b83; + border: 1pt solid #93a1a1; + padding: 1em; + box-shadow: 5pt 5pt 8pt #eee8d5; +} +pre code { + background-color: #fdf6e3; +} +h1 { + font-size: 2.8em; +} +h2 { + font-size: 2.4em; +} +h3 { + font-size: 1.8em; +} +h4 { + font-size: 1.4em; +} +h5 { + font-size: 1.3em; +} +h6 { + font-size: 1.15em; +} +.tag { + background-color: #eee8d5; + color: #d33682; + padding: 0 0.2em; +} +.todo, +.next, +.done { + color: #fdf6e3; + background-color: #dc322f; + padding: 0 0.2em; +} +.tag { + -webkit-border-radius: 0.35em; + -moz-border-radius: 0.35em; + border-radius: 0.35em; +} +.TODO { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #2aa198; +} +.NEXT { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #268bd2; +} +.ACTIVE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #268bd2; +} +.DONE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + background-color: #859900; +} +.WAITING { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #cb4b16; +} +.HOLD { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #d33682; +} +.NOTE { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #d33682; +} +.CANCELLED { + -webkit-border-radius: 0.2em; + -moz-border-radius: 0.2em; + border-radius: 0.2em; + foreground-color: #859900; +} diff --git a/doc/themes/torpedo.css b/doc/themes/torpedo.css new file mode 100644 index 00000000..dcd92188 --- /dev/null +++ b/doc/themes/torpedo.css @@ -0,0 +1,666 @@ +/* Title: Torpedo */ +/* Author: Jocelyn Richard http://jocelynrichard.com/ */ +/* Description: A muted color palette for long-form writing or reading, suited for technical documentation. Works best with Cinta: http://www.myfonts.com/fonts/tipo-pepel/cinta/ */ + +/* ================================================ */ +/* 1. Reset */ +/* 2. Skeleton */ +/* 3. Media Queries */ +/* 4. Print Styles */ +/* 5. Torpedo Overrides */ +/* ================================================ */ + + + +/* ================================================ */ +/* 1. Reset */ +/* ================================================ */ + +html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, +b, u, i, center, dl, dt, dd, ol, ul, li, fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, aside, canvas, details, embed, figure, figcaption, footer, header, hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video {margin: 0; padding: 0; border: 0;} /* Edited from http://www.cssreset.com/scripts/eric-meyer-reset-css/ */ + +article, aside, details, figcaption, figure, footer, header, hgroup, nav, section, summary {display: block;} /* Semantic tags definition for IE 6/7/8/9 and Firefox 3 */ + +html {-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;} /* Prevents iOS text size adjust after orientation change, without disabling user zoom */ + + + +/* ================================================ */ +/* 2. Skeleton */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 14px; +} + +body { + font-family: 'Open Sans', sans-serif; + margin: 1.71rem 1.71rem 3rem 1.71rem ; /* Get margins even if the Markdown rendering app doesn't include any */ + background-color: white; + color: #222; +} + +#wrapper { /* #wrapper: ID added by Marked */ + max-width: 42rem; + margin: 0 auto; + margin-left: auto !important; /* Countering toc.css added by Marked */ + padding: 1.71rem 0 !important; /* Countering toc.css added by Marked */ +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-bottom: 1.6rem; +} + +h1, +h2 { + margin-top: 3.2rem; +} + +h1 { + font-size: 2.82rem; /* 42.3px @15px */ + line-height: 3.2rem; /* 48px @15px */ +} + +h2 { + font-size: 1.99rem; /* 29.9px @15px */ + line-height: 2.4rem; /* 36px @15px */ +} + +h3 { + font-size: 1.41rem; /* 21.2px @15px */ + line-height: 2rem; /* 30px @15px */ +} + +h4 { + font-size: 1rem; /* 15px @15px */ + line-height: 1.6rem; /* 24px @15px */ +} + +h5, h6 { + font-size: 0.8rem; + line-height: 1.2rem; + text-transform: uppercase; +} + +h6 { + margin-left: 1.6rem; +} + +p, +ol, +ul, +blockquote { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +ul ul, +ul ol, +ol ul, +ol ol { + margin-left: 1.6rem; + margin-top: 1.6rem; +} + +#generated-toc ul ul, /* #generated-toc: added by Marked for its table of contents */ +#generated-toc ul ol, +#generated-toc ol ul, +#generated-toc ol ol { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; +} + +blockquote { + margin: 0 0 1.6rem 2.4rem; + padding-left: 0.8rem; /* Voire */ + border-left: 4px solid rgba(0,0,0,0.08); + font-style: normal; +} + +blockquote ul { + margin-left: 0.8rem; /* Pour ne pas que les hanging bullets mordent sur le blockquote */ +} + +ol li blockquote, /* So that blockquote work in lists */ +ul li blockquote { + margin-left: 0; +} + +a:link { + text-decoration: none; + color: #165bd4; + border-bottom: 1px solid #ccc; +} + +a:visited { + color: #7697cf; + border-bottom: 1px solid #ccc; +} + +a:hover { + border-color: #165bd4; +} + +a:active { + background-color: #e6e6e6; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + font-size: 0.85rem; + margin: 0 0 1.6rem 0; + border-collapse: collapse; + border: 1px solid #ccc; +} + +th, +td { + padding: 0.5rem 0.75rem; + max-width: 20rem; /* Avoid dropping lines for nothing without having ridiculously wide tables */ +} + +th { + border-bottom: 2px solid #222; +} + +tr { + border-bottom: 1px solid #ccc; +} + +tbody tr:nth-child(odd) { + background-color: #f9f9f9; +} + +table code { + font-size: 85%; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +img { + max-width: 100% +} + +caption, +figcaption { + font-size: 0.85rem; + line-height: 1.6rem; + margin: 0 1.6rem; + text-align: left; +} + +figcaption { + margin-bottom: 1.6rem; +} + +h1, /* White-space mentions in order to force wrapping */ +h2, +a:link, +pre { + white-space: pre; /* CSS 2.0 */ + white-space: pre-wrap; /* CSS 2.1 */ + white-space: pre-line; /* CSS 3.0 */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: -moz-pre-wrap; /* Mozilla */ + white-space: -hp-pre-wrap; /* HP Printers */ + word-wrap: break-word; /* IE 5+ */ +} + +code { + font-family: "Menlo", "Courier New", "Courier", monospace; + font-size: 85%; + color: #666; + background-color: rgba(0,0,0,0.08); + padding: 2px 4px; + border-radius: 2px; +} + +pre { + background-color: rgba(0,0,0,0.08); + border-radius: 8px; + padding: 0.4rem; + margin-bottom: 1.6rem; +} + +pre code { /* Counter the code mentions */ + background-color: transparent; + padding: 0; +} + +sup, +sub, +a.footnote { /* Keep line-height from being affected by sub, cf https://gist.github.com/unruthless/413930 */ + font-size: 75%; + height: 0; + line-height: 1; + position: relative; +} + +sup, +a.footnote { + vertical-align:super; +} + +sub { + vertical-align: sub; +} + +dt { + font-weight: 600; +} + +dd { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +hr { + clear: none; + height: 0.2rem; + border: none; + margin: 0 auto 1.4rem auto; /* 2.4rem auto 2.2rem auto; */ + width: 100%; + color: #ccc; + background-color: #ccc; +} + +::selection { + background-color: #f8dc77; +} + +::-moz-selection { + background-color: #f8dc77; +} + +a:focus { + outline: 2px solid; + outline-color: #165bd4; +} + +/* ------------------------------------------------- */ +/* Animations */ +/* ------------------------------------------------- */ + +a:hover { + -moz-transition: all 0.2s ease-in-out; + -webkit-transition: all 0.2s ease-in-out; +} + +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote { + -moz-transition: all 0.2s ease; + -webkit-transition: all 0.2s ease; +} + + + +/* ================================================ */ +/* 3. Media Queries */ +/* ================================================ */ + +/* Base styles are for smartphones; elements are then tweaked as the viewport grows. */ + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 15px; + } + + body { + margin: 2.4rem 2.4rem 3.2rem 2.4rem; + } + + h1 { + font-size: 3.57rem; /* 53.2px @15px */ + line-height: 4rem; /* 60px @15px */ + } + + h2 { + font-size: 2.24rem; /* 33.6px @15px */ + line-height: 2.8rem; /* 42px @15px */ + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} + + + +/* ================================================ */ +/* 4. Print Styles */ +/* ================================================ */ + +/* Inconsistent and buggy across browsers */ + +@media print { + + * { + background: transparent !important; + color: #000 !important; /* Black text prints faster and browsers are inconsistent in color reproduction anyway: h5bp.com/s */ + } + + @page { + margin: 1cm; /* Added to any #wrapper margin*/ + } + + html { + font-size: 15px; + } + + body { + margin: 1rem !important; /* Security margins for browser without @page support */ + } + + #wrapper { + max-width: none; + } + + h1, + h2, + h3, + h4, + h5, + h6, + p { + orphans: 3; + widows: 3; + page-break-after: avoid; + } + + ul, + ol { + list-style-position: inside !important; + padding-right: 0 !important; + margin-left: 0 !important; + } + + ul ul, + ul ol, + ol ul, + ol ol, + ul p:not(:first-child), + ol p:not(:first-child) { + margin-left: 2rem !important; + } + + a:link, + a:visited { + text-decoration: underline !important; + font-weight: normal !important; + } + + a[href]:after { + content: " (" attr(href) ")"; + } + + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; /* Do not show javascript and internal links */ + } + + a[href^="#"] { + text-decoration: none !important; + } + + th { + background-color: rgba(0,0,0,0.2) !important; + border-bottom: none !important; + } + + tr { + page-break-inside: avoid; + } + + tbody tr:nth-child(even) { + background-color: rgba(0,0,0,0.1) !important; + } + + pre { + border: 1px solid rgba(0,0,0,0.2); + page-break-inside: avoid; + } + + img { + max-width: 100% !important; + page-break-inside: avoid; + } + + /* #generated-toc: added by Marked for its table of contents */ + + #wrapper #generated-toc ul, /* Table of contents printing in Marked */ + #wrapper #generated-toc ol { + list-style-type: decimal; + } + + #wrapper #generated-toc ul li, + #wrapper #generated-toc ol li { + margin: 1rem 0; + } + +} + + + +/* ================================================ */ +/* 5. Torpedo Overrides */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 16px; +} + +body { + font-family: 'Cinta', 'Source Sans Pro', Avenir, sans-serif; + background-color: #F9F9F9; + color: #636463; +} + +#wrapper { + max-width: 40rem; +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2 { + color: #febf60; +} + +h3 { + color: #b46864; +} + +h4, +h5, +h6 { + font-size: 1rem; + color: #b46864; + text-transform: none; +} + +h5 { + margin-left: 1.6rem; +} + +h6 { + margin-left: 3.2rem; +} + +p, +ol, +ul, +dd, +figcaption { + font-weight: 300; +} + +blockquote { + border-color: #efefef; +} + +a:link, +a:visited, +a:hover, +a:visited { + color: #636463; +} + +a:link { + font-weight: 600; + text-decoration: none; + border-bottom: none; + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMEAAAAJCAYAAACL+UhFAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAE9JREFUWAnt0wENACEQBLHn/SEGK1jCGAk2pufgmp2x9pmfIxAW+MO/e53AExCBIeQFRJCfAAAR2EBeQAT5CQAQgQ3kBUSQnwAAEdhAXuACbF4CJXG10MIAAAAASUVORK5CYII=); + background-repeat: repeat-x; + background-position: 0 0.7rem; +} + +a:visited { /* Doesn't work? */ + background-position: 0 1rem; +} + +a:hover { + font-weight: 600; + text-decoration: none; + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMEAAAAJCAYAAACL+UhFAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAE9JREFUWAnt0wENACEQBLHn/SEGK1jCGAk2pufgmp2x9pmfIxAW+MO/e53AExCBIeQFRJCfAAAR2EBeQAT5CQAQgQ3kBUSQnwAAEdhAXuACbF4CJXG10MIAAAAASUVORK5CYII=); + background-repeat: repeat; + background-position-y: 0 0; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + font-weight: 300; +} + +table, +th, +tr { + border: none; +} + +th { + border-bottom: 1px solid #da9e1a; + background-color: #ffd18c; +} + +tbody tr:nth-child(even) { + background-color: #efefef; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +code { + color: #5597e7; + padding: 0; /* PLus besoin car plus de risque de collision avec l'arrière-plan coloré */ + background-color: transparent; +} + +pre { + background-color: transparent; +} + +hr { + clear: none; + height: 1rem; + width: 14rem; + margin: 2.5rem auto; + border: none; + color: none; + background-color: transparent; + background-image: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+Cjxzdmcgd2lkdGg9IjEyNnB4IiBoZWlnaHQ9IjE2cHgiIHZpZXdCb3g9IjAgMCAxMjYgMTYiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgeG1sbnM6c2tldGNoPSJodHRwOi8vd3d3LmJvaGVtaWFuY29kaW5nLmNvbS9za2V0Y2gvbnMiPgogICAgPHRpdGxlPmhyPC90aXRsZT4KICAgIDxkZXNjcmlwdGlvbj5DcmVhdGVkIHdpdGggU2tldGNoIChodHRwOi8vd3d3LmJvaGVtaWFuY29kaW5nLmNvbS9za2V0Y2gpPC9kZXNjcmlwdGlvbj4KICAgIDxkZWZzPjwvZGVmcz4KICAgIDxnIGlkPSJQYWdlLTEiIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiIHNrZXRjaDp0eXBlPSJNU1BhZ2UiPgogICAgICAgIDxwYXRoIGQ9Ik0xMywzIEwzLDEzIiBpZD0iTGluZSIgc3Ryb2tlPSIjRDRERUVCIiBzdHJva2Utd2lkdGg9IjQiIHN0cm9rZS1saW5lY2FwPSJzcXVhcmUiIHNrZXRjaDp0eXBlPSJNU1NoYXBlR3JvdXAiPjwvcGF0aD4KICAgICAgICA8cGF0aCBkPSJNMTMsMyBMMjMuMDQ5ODc1MywxMy4wNDk4NzUzIiBpZD0iTGluZSIgc3Ryb2tlPSIjRDRERUVCIiBzdHJva2Utd2lkdGg9IjQiIHN0cm9rZS1saW5lY2FwPSJzcXVhcmUiIHNrZXRjaDp0eXBlPSJNU1NoYXBlR3JvdXAiPjwvcGF0aD4KICAgICAgICA8cGF0aCBkPSJNMzMsMyBMMjMsMTMiIGlkPSJMaW5lIiBzdHJva2U9IiNENERFRUIiIHN0cm9rZS13aWR0aD0iNCIgc3Ryb2tlLWxpbmVjYXA9InNxdWFyZSIgc2tldGNoOnR5cGU9Ik1TU2hhcGVHcm91cCI+PC9wYXRoPgogICAgICAgIDxwYXRoIGQ9Ik0zMywzIEw0My4wNDk4NzUzLDEzLjA0OTg3NTMiIGlkPSJMaW5lIiBzdHJva2U9IiNENERFRUIiIHN0cm9rZS13aWR0aD0iNCIgc3Ryb2tlLWxpbmVjYXA9InNxdWFyZSIgc2tldGNoOnR5cGU9Ik1TU2hhcGVHcm91cCI+PC9wYXRoPgogICAgICAgIDxwYXRoIGQ9Ik01MywzIEw0MywxMyIgaWQ9IkxpbmUiIHN0cm9rZT0iI0Q0REVFQiIgc3Ryb2tlLXdpZHRoPSI0IiBzdHJva2UtbGluZWNhcD0ic3F1YXJlIiBza2V0Y2g6dHlwZT0iTVNTaGFwZUdyb3VwIj48L3BhdGg+CiAgICAgICAgPHBhdGggZD0iTTUzLDMgTDYzLjA0OTg3NTMsMTMuMDQ5ODc1MyIgaWQ9IkxpbmUiIHN0cm9rZT0iI0Q0REVFQiIgc3Ryb2tlLXdpZHRoPSI0IiBzdHJva2UtbGluZWNhcD0ic3F1YXJlIiBza2V0Y2g6dHlwZT0iTVNTaGFwZUdyb3VwIj48L3BhdGg+CiAgICAgICAgPHBhdGggZD0iTTczLDMgTDYzLDEzIiBpZD0iTGluZSIgc3Ryb2tlPSIjRDRERUVCIiBzdHJva2Utd2lkdGg9IjQiIHN0cm9rZS1saW5lY2FwPSJzcXVhcmUiIHNrZXRjaDp0eXBlPSJNU1NoYXBlR3JvdXAiPjwvcGF0aD4KICAgICAgICA8cGF0aCBkPSJNNzMsMyBMODMuMDQ5ODc1MywxMy4wNDk4NzUzIiBpZD0iTGluZSIgc3Ryb2tlPSIjRDRERUVCIiBzdHJva2Utd2lkdGg9IjQiIHN0cm9rZS1saW5lY2FwPSJzcXVhcmUiIHNrZXRjaDp0eXBlPSJNU1NoYXBlR3JvdXAiPjwvcGF0aD4KICAgICAgICA8cGF0aCBkPSJNOTMsMyBMODMsMTMiIGlkPSJMaW5lIiBzdHJva2U9IiNENERFRUIiIHN0cm9rZS13aWR0aD0iNCIgc3Ryb2tlLWxpbmVjYXA9InNxdWFyZSIgc2tldGNoOnR5cGU9Ik1TU2hhcGVHcm91cCI+PC9wYXRoPgogICAgICAgIDxwYXRoIGQ9Ik05MywzIEwxMDMuMDQ5ODc1LDEzLjA0OTg3NTMiIGlkPSJMaW5lIiBzdHJva2U9IiNENERFRUIiIHN0cm9rZS13aWR0aD0iNCIgc3Ryb2tlLWxpbmVjYXA9InNxdWFyZSIgc2tldGNoOnR5cGU9Ik1TU2hhcGVHcm91cCI+PC9wYXRoPgogICAgICAgIDxwYXRoIGQ9Ik0xMTMsMyBMMTAzLDEzIiBpZD0iTGluZSIgc3Ryb2tlPSIjRDRERUVCIiBzdHJva2Utd2lkdGg9IjQiIHN0cm9rZS1saW5lY2FwPSJzcXVhcmUiIHNrZXRjaDp0eXBlPSJNU1NoYXBlR3JvdXAiPjwvcGF0aD4KICAgICAgICA8cGF0aCBkPSJNMTEzLDMgTDEyMy4wNDk4NzUsMTMuMDQ5ODc1MyIgaWQ9IkxpbmUiIHN0cm9rZT0iI0Q0REVFQiIgc3Ryb2tlLXdpZHRoPSI0IiBzdHJva2UtbGluZWNhcD0ic3F1YXJlIiBza2V0Y2g6dHlwZT0iTVNTaGFwZUdyb3VwIj48L3BhdGg+CiAgICA8L2c+Cjwvc3ZnPg==); + background-repeat: no-repeat; + background-position-x: 50%; +} + +::selection { + background-color: #D4DEEB; + color: #b46864; + } + +::-moz-selection { + background-color: #D4DEEB; + color: #b46864; +} + +/* #generated-toc: added by Marked for its table of contents */ + +#generated-toc ul li a { + font-weight: normal; +} + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 17px; + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} diff --git a/doc/themes/vostok.css b/doc/themes/vostok.css new file mode 100644 index 00000000..6f3e7b30 --- /dev/null +++ b/doc/themes/vostok.css @@ -0,0 +1,712 @@ +/* Title: Vostok */ +/* Author: Jocelyn Richard http://jocelynrichard.com/ */ +/* Description: Generous x-height and contrasted colors make for highly legible documents. Works best with the free PT fonts: http://www.paratype.com/public/ */ + +/* ================================================ */ +/* 1. Reset */ +/* 2. Skeleton */ +/* 3. Media Queries */ +/* 4. Print Styles */ +/* 5. Vostok Overrides */ +/* ================================================ */ + + + +/* ================================================ */ +/* 1. Reset */ +/* ================================================ */ + +html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, +b, u, i, center, dl, dt, dd, ol, ul, li, fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, article, aside, canvas, details, embed, figure, figcaption, footer, header, hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video {margin: 0; padding: 0; border: 0;} /* Edited from http://www.cssreset.com/scripts/eric-meyer-reset-css/ */ + +article, aside, details, figcaption, figure, footer, header, hgroup, nav, section, summary {display: block;} /* Semantic tags definition for IE 6/7/8/9 and Firefox 3 */ + +html {-webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%;} /* Prevents iOS text size adjust after orientation change, without disabling user zoom */ + + + +/* ================================================ */ +/* 2. Skeleton */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 14px; +} + +body { + font-family: 'Open Sans', sans-serif; + margin: 1.71rem 1.71rem 3rem 1.71rem ; /* Get margins even if the Markdown rendering app doesn't include any */ + background-color: white; + color: #222; +} + +#wrapper { /* #wrapper: ID added by Marked */ + max-width: 42rem; + margin: 0 auto; + margin-left: auto !important; /* Countering toc.css added by Marked */ + padding: 1.71rem 0 !important; /* Countering toc.css added by Marked */ +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-bottom: 1.6rem; +} + +h1, +h2 { + margin-top: 3.2rem; +} + +h1 { + font-size: 2.82rem; /* 42.3px @15px */ + line-height: 3.2rem; /* 48px @15px */ +} + +h2 { + font-size: 1.99rem; /* 29.9px @15px */ + line-height: 2.4rem; /* 36px @15px */ +} + +h3 { + font-size: 1.41rem; /* 21.2px @15px */ + line-height: 2rem; /* 30px @15px */ +} + +h4 { + font-size: 1rem; /* 15px @15px */ + line-height: 1.6rem; /* 24px @15px */ +} + +h5, h6 { + font-size: 0.8rem; + line-height: 1.2rem; + text-transform: uppercase; +} + +h6 { + margin-left: 1.6rem; +} + +p, +ol, +ul, +blockquote { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +ul ul, +ul ol, +ol ul, +ol ol { + margin-left: 1.6rem; + margin-top: 1.6rem; +} + +#generated-toc ul ul, /* #generated-toc: added by Marked for its table of contents */ +#generated-toc ul ol, +#generated-toc ol ul, +#generated-toc ol ol { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; +} + +blockquote { + margin: 0 0 1.6rem 2.4rem; + padding-left: 0.8rem; /* Voire */ + border-left: 4px solid rgba(0,0,0,0.08); + font-style: normal; +} + +blockquote ul { + margin-left: 0.8rem; /* Pour ne pas que les hanging bullets mordent sur le blockquote */ +} + +ol li blockquote, /* So that blockquote work in lists */ +ul li blockquote { + margin-left: 0; +} + +a:link { + text-decoration: none; + color: #165bd4; + border-bottom: 1px solid #ccc; +} + +a:visited { + color: #7697cf; + border-bottom: 1px solid #ccc; +} + +a:hover { + border-color: #165bd4; +} + +a:active { + background-color: #e6e6e6; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +table { + font-size: 0.85rem; + margin: 0 0 1.6rem 0; + border-collapse: collapse; + border: 1px solid #ccc; +} + +th, +td { + padding: 0.5rem 0.75rem; + max-width: 20rem; /* Avoid dropping lines for nothing without having ridiculously wide tables */ +} + +th { + border-bottom: 2px solid #222; +} + +tr { + border-bottom: 1px solid #ccc; +} + +tbody tr:nth-child(odd) { + background-color: #f9f9f9; +} + +table code { + font-size: 85%; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + +img { + max-width: 100% +} + +caption, +figcaption { + font-size: 0.85rem; + line-height: 1.6rem; + margin: 0 1.6rem; + text-align: left; +} + +figcaption { + margin-bottom: 1.6rem; +} + +h1, /* White-space mentions in order to force wrapping */ +h2, +a:link, +pre { + white-space: pre; /* CSS 2.0 */ + white-space: pre-wrap; /* CSS 2.1 */ + white-space: pre-line; /* CSS 3.0 */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: -moz-pre-wrap; /* Mozilla */ + white-space: -hp-pre-wrap; /* HP Printers */ + word-wrap: break-word; /* IE 5+ */ +} + +code { + font-family: "Menlo", "Courier New", "Courier", monospace; + font-size: 85%; + color: #666; + background-color: rgba(0,0,0,0.08); + padding: 2px 4px; + border-radius: 2px; +} + +pre { + background-color: rgba(0,0,0,0.08); + border-radius: 8px; + padding: 0.4rem; + margin-bottom: 1.6rem; +} + +pre code { /* Counter the code mentions */ + background-color: transparent; + padding: 0; +} + +sup, +sub, +a.footnote { /* Keep line-height from being affected by sub, cf https://gist.github.com/unruthless/413930 */ + font-size: 75%; + height: 0; + line-height: 1; + position: relative; +} + +sup, +a.footnote { + vertical-align:super; +} + +sub { + vertical-align: sub; +} + +dt { + font-weight: 600; +} + +dd { + font-size: 1rem; + line-height: 1.6rem; + margin-bottom: 1.6rem; +} + +hr { + clear: none; + height: 0.2rem; + border: none; + margin: 0 auto 1.4rem auto; /* 2.4rem auto 2.2rem auto; */ + width: 100%; + color: #ccc; + background-color: #ccc; +} + +::selection { + background-color: #f8dc77; +} + +::-moz-selection { + background-color: #f8dc77; +} + +a:focus { + outline: 2px solid; + outline-color: #165bd4; +} + +/* ------------------------------------------------- */ +/* Animations */ +/* ------------------------------------------------- */ + +a:hover { + -moz-transition: all 0.2s ease-in-out; + -webkit-transition: all 0.2s ease-in-out; +} + +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote { + -moz-transition: all 0.2s ease; + -webkit-transition: all 0.2s ease; +} + + + +/* ================================================ */ +/* 3. Media Queries */ +/* ================================================ */ + +/* Base styles are for smartphones; elements are then tweaked as the viewport grows. */ + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 15px; + } + + body { + margin: 2.4rem 2.4rem 3.2rem 2.4rem; + } + + h1 { + font-size: 3.57rem; /* 53.2px @15px */ + line-height: 4rem; /* 60px @15px */ + } + + h2 { + font-size: 2.24rem; /* 33.6px @15px */ + line-height: 2.8rem; /* 42px @15px */ + } + +} + +/* ------------------------------------------------- */ +/* Widescreens */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 1441px) { + + html { + font-size: 22px; + } + +} + + + +/* ================================================ */ +/* 4. Print Styles */ +/* ================================================ */ + +/* Inconsistent and buggy across browsers */ + +@media print { + + * { + background: transparent !important; + color: #000 !important; /* Black text prints faster and browsers are inconsistent in color reproduction anyway: h5bp.com/s */ + } + + @page { + margin: 1cm; /* Added to any #wrapper margin*/ + } + + html { + font-size: 15px; + } + + body { + margin: 1rem !important; /* Security margins for browser without @page support */ + } + + #wrapper { + max-width: none; + } + + h1, + h2, + h3, + h4, + h5, + h6, + p { + orphans: 3; + widows: 3; + page-break-after: avoid; + } + + ul, + ol { + list-style-position: inside !important; + padding-right: 0 !important; + margin-left: 0 !important; + } + + ul ul, + ul ol, + ol ul, + ol ol, + ul p:not(:first-child), + ol p:not(:first-child) { + margin-left: 2rem !important; + } + + a:link, + a:visited { + text-decoration: underline !important; + font-weight: normal !important; + } + + a[href]:after { + content: " (" attr(href) ")"; + } + + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; /* Do not show javascript and internal links */ + } + + a[href^="#"] { + text-decoration: none !important; + } + + th { + background-color: rgba(0,0,0,0.2) !important; + border-bottom: none !important; + } + + tr { + page-break-inside: avoid; + } + + tbody tr:nth-child(even) { + background-color: rgba(0,0,0,0.1) !important; + } + + pre { + border: 1px solid rgba(0,0,0,0.2); + page-break-inside: avoid; + } + + img { + max-width: 100% !important; + page-break-inside: avoid; + } + + /* #generated-toc: added by Marked for its table of contents */ + + #wrapper #generated-toc ul, /* Table of contents printing in Marked */ + #wrapper #generated-toc ol { + list-style-type: decimal; + } + + #wrapper #generated-toc ul li, + #wrapper #generated-toc ol li { + margin: 1rem 0; + } + +} + + + +/* ================================================ */ +/* 5. Vostok Overrides */ +/* ================================================ */ + +/* ------------------------------------------------- */ +/* General */ +/* ------------------------------------------------- */ + +html { + font-size: 15px; +} + +body { + font-family: "pt serif", Georgia, serif; + color: rgba(0,0,0,0.7); + text-shadow: 0 1px 0 white; + background-color: #ececec; +} + +#wrapper { + max-width: 40rem; +} + +/* ------------------------------------------------- */ +/* Typography */ +/* ------------------------------------------------- */ + +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: "pt sans", "avenir next", sans-serif; + font-weight: 700; + color: rgba(0,0,0,1); +} + +h2, +h3 { + font-family: "pt sans narrow", "avenir next condensed", sans-serif; +} + +h5 { + color: rgba(0,0,0,0.7); +} + +h6 { + color: rgba(0,0,0,0.7); +} + +ul { + list-style-type: none; +} + +ul > li:before { + content: "\2022"; + float: left; + margin-left: -1.2rem; + padding-right: 0.6rem; /* Empirically chosen to align horizontally with the position of standard bullet points */ +} + +ul li, +ol li { + margin: 0; +} + +ul ul, +ul ol, +ol ul, +ol ol, +ul p:not(:first-child), +ol p:not(:first-child) { + margin-left: 1.6rem; +} + +blockquote { + border-left: 2px solid rgba(0,0,0,0.5); +} + +a:link { + text-decoration: none; + border-bottom: none; + font-weight: 700; + color: rgba(25,107,240,1); +} + +a:visited { + color: #2c508a; +} + +a:hover { + color: #2DAB5F; +} + +a:focus { + outline: 0.125rem solid; +} + +/* ------------------------------------------------- */ +/* Tables */ +/* ------------------------------------------------- */ + +thead { + border: 1px solid #4C4C4C; /* In hex (= rgba(0,0,0,0.7)) otherwise the alpha gets the border of the tr below */ +} + +th { + background-color: #4C4C4C; /* = rgba(0,0,0,0.7) */ + border-bottom: 1px solid #4C4C4C; /* = rgba(0,0,0,0.7) ; mentionned otherwise the stroke is of the tr below */ + color: white; + text-shadow: 0px -1px 0px black; +} + +tbody tr:nth-child(even) { + background-color: rgba(255,255,255,0.5); +} + +tbody tr:nth-child(odd) { + background-color: transparent; +} + +/* ------------------------------------------------- */ +/* Misc */ +/* ------------------------------------------------- */ + + +table, +caption, +figcaption { + font-family: "pt sans", "avenir next", sans-serif; +} + +code { + border: 1px solid rgba(255,255,255,0.7); + background-color: rgba(255,255,255,0.5); + border-radius: 2px; + color: #2DAB5F; + text-shadow: none; +} + +pre { + border: 1px solid rgba(255,255,255,0.7); + background-color: rgba(255,255,255,0.5); + border-radius: 2px; +} + +pre code { + color: #81A181; + border: none; +} + +hr { + clear: none; + height: 2px; /* height: 0.125rem; */ + border: none; + margin: 1.5rem auto; + width: 14rem; + color: rgba(0,0,0,0.5); + background-color: rgba(0,0,0,0.5); +} + +::selection { + background-color: #fbfb48; +} + +::-moz-selection { + background-color: #fbfb48; +} + +/* #generated-toc: added by Marked for its table of contents */ + +#generated-toc { + text-shadow: none; +} + +#generated-toc ul { /* Compensate for the earlier custom bullet point */ + margin: 0; + list-style-type: none; +} + +#generated-toc ul li { /* Compensate for the earlier custom bullet point */ + margin: 0; +} + +#generated-toc ul > li:before { /* Compensate for the earlier custom bullet point */ + content: none; + margin-left: 0; + padding-right: 0; +} + +#generated-toc ul li a { + font-weight: normal; + display: inline; +} + +#generated-toc ul li ul li ul li a { + text-transform: lowercase; + +} + +/* ------------------------------------------------- */ +/* iPad and desktop */ +/* ------------------------------------------------- */ + +@media only screen and (min-width: 641px) { + + html { + font-size: 17px; + } + + h1 { + font-size: 2.81rem; + } + + h2 { + font-size: 1.78rem; + line-height: 2.21rem; + } + + ul, + ol { + margin-left: 0; + } + +} diff --git a/env/doc.go b/env/doc.go new file mode 100644 index 00000000..e0b20d0f --- /dev/null +++ b/env/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package env contain virtual database build, rehash, cleanup. +package env diff --git a/env/env.go b/env/env.go new file mode 100644 index 00000000..f2a2474b --- /dev/null +++ b/env/env.go @@ -0,0 +1,485 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package env + +import ( + "fmt" + "strings" + + "github.com/XiaoMi/soar/ast" + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/database" + + "github.com/dchest/uniuri" + "vitess.io/vitess/go/vt/sqlparser" +) + +// VirtualEnv SQL优化评审 测试环境 +// DB使用的信息从配置文件中获取 +type VirtualEnv struct { + *database.Connector + + // 保存DB测试环境映射关系,防止vEnv环境冲突。 + DBRef map[string]string + hash2Db map[string]string + // 保存Table创建关系,防止重复创建表 + TableMap map[string]map[string]string + // 错误 + Error error +} + +// NewVirtualEnv 初始化一个新的测试环境 +func NewVirtualEnv(vEnv *database.Connector) *VirtualEnv { + return &VirtualEnv{ + Connector: vEnv, + DBRef: make(map[string]string), + hash2Db: make(map[string]string), + TableMap: make(map[string]map[string]string), + } +} + +// BuildEnv 测试环境初始化&连接线上环境检查 +// @output *VirtualEnv 测试环境 +// @output *database.Connector 线上环境连接句柄 +func BuildEnv() (*VirtualEnv, *database.Connector) { + // 生成测试环境 + vEnv := NewVirtualEnv(&database.Connector{ + Addr: common.Config.TestDSN.Addr, + User: common.Config.TestDSN.User, + Pass: common.Config.TestDSN.Password, + Database: common.Config.TestDSN.Schema, + Charset: common.Config.TestDSN.Charset, + }) + + // 检查测试环境可用性,并记录数据库版本 + vEnvVersion, err := vEnv.Version() + common.Config.TestDSN.Version = vEnvVersion + if err != nil { + common.Log.Warn("BuildEnv TestDSN: %s:********@%s/%s not available , Error: %s", + vEnv.User, vEnv.Addr, vEnv.Database, err.Error()) + common.Config.TestDSN.Disable = true + } + + // 连接线上环境 + // 如果未配置线上环境线测试环境配置为线上环境 + if common.Config.OnlineDSN.Addr == "" { + common.Log.Warn("BuildEnv AllowOnlineAsTest: OnlineDSN not config, use TestDSN: %s:********@%s/%s as OnlineDSN", + vEnv.User, vEnv.Addr, vEnv.Database) + common.Config.OnlineDSN = common.Config.TestDSN + } + conn := &database.Connector{ + Addr: common.Config.OnlineDSN.Addr, + User: common.Config.OnlineDSN.User, + Pass: common.Config.OnlineDSN.Password, + Database: common.Config.OnlineDSN.Schema, + Charset: common.Config.OnlineDSN.Charset, + } + + // 检查线上环境可用性版本 + rEnvVersion, err := vEnv.Version() + common.Config.OnlineDSN.Version = rEnvVersion + if err != nil { + common.Log.Warn("BuildEnv OnlineDSN: %s:********@%s/%s not available , Error: %s", + vEnv.User, vEnv.Addr, vEnv.Database, err.Error()) + common.Config.TestDSN.Disable = true + } + + // 检查是否允许Online和Test一致,防止误操作 + if common.FormatDSN(common.Config.OnlineDSN) == common.FormatDSN(common.Config.TestDSN) && + !common.Config.AllowOnlineAsTest { + common.Log.Warn("BuildEnv AllowOnlineAsTest: %s:********@%s/%s OnlineDSN can't config as TestDSN", + vEnv.User, vEnv.Addr, vEnv.Database) + common.Config.TestDSN.Disable = true + common.Config.OnlineDSN.Disable = true + } + + // 判断测试环境与remote环境版本是否一致 + if vEnvVersion < rEnvVersion { + common.Log.Warning("TestDSN MySQL version older than OnlineDSN, TestDSN will not be used", vEnvVersion, rEnvVersion) + common.Config.TestDSN.Disable = true + } + + return vEnv, conn +} + +// RealDB 从测试环境中获取通过hash后的DB +func (ve VirtualEnv) RealDB(hash string) string { + if _, ok := ve.hash2Db[hash]; ok { + return ve.hash2Db[hash] + } + return hash +} + +// DBHash 从测试环境中根据DB找到对应的hash值 +func (ve VirtualEnv) DBHash(db string) string { + if _, ok := ve.DBRef[db]; ok { + return ve.DBRef[db] + } + return db +} + +// CleanUp 环境清理 +func (ve VirtualEnv) CleanUp() bool { + if !common.Config.TestDSN.Disable && common.Config.DropTestTemporary { + common.Log.Debug("CleanUp ...") + for db := range ve.hash2Db { + ve.Database = db + _, err := ve.Query("drop database %s", db) + if err != nil { + common.Log.Error("CleanUp failed Error: %s", err) + return false + } + } + common.Log.Debug("CleanUp, done") + } + return true +} + +// BuildVirtualEnv rEnv为SQL源环境,DB使用的信息从接口获取 +// 注意:如果是USE,DDL等语句,执行完第一条就会返回,后面的SQL不会执行 +func (ve *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string) bool { + var stmt sqlparser.Statement + var err error + + // 置空错误信息 + ve.Error = nil + // 检测是否已经创建初始数据库,如果未创建则创建一个名称hash过的映射数据库 + err = ve.createDatabase(*rEnv, rEnv.Database) + common.LogIfWarn(err, "") + + // 测试环境检测 + if common.Config.TestDSN.Disable { + common.Log.Info("BuildVirtualEnv TestDSN not config") + return true + } + + // 判断rEnv中是否指定了DB + if rEnv.Database == "" { + common.Log.Error("BuildVirtualEnv no database specified, TestDSN init failed") + return false + } + + // 库表提取 + meta := make(map[string]*common.DB) + for _, sql := range SQLs { + + common.Log.Debug("BuildVirtualEnv Database&Table Mapping, SQL: %s", sql) + + stmt, err = sqlparser.Parse(sql) + if err != nil { + common.Log.Error("BuildVirtualEnv Error : %v", err) + return false + } + + // 语句类型判断 + switch stmt := stmt.(type) { + case *sqlparser.Use: + // 如果是use语句,则更改基础环配置 + if _, ok := meta[stmt.DBName.String()]; !ok { + // 如果USE了一个线上环境不存在的数据库,将创建该数据库,字符集默认utf8mb4 + meta[stmt.DBName.String()] = common.NewDB(stmt.DBName.String()) + rEnv.Database = stmt.DBName.String() + + // use DB 后检查 DB是否已经创建,如果没有创建则创建DB + err = ve.createDatabase(*rEnv, rEnv.Database) + common.LogIfWarn(err, "") + } + return true + case *sqlparser.DDL: + // 如果是DDL,则先获取DDL对应的表结构,然后直接在测试环境接执行SQL + // 为不影响其他SQL操作,复制一个Connector对象,将数据库切换到对应的DB上直接执行 + tmpDB := *ve.Connector + tmpDB.Database = ve.DBRef[rEnv.Database] + + // 为了支持并发,需要将DB进行映射,但db.table这种形式无法保证DB的映射是正确的 + // TODO:暂不支持 create db.tableName (id int) 形式的建表语句 + if stmt.Table.Qualifier.String() != "" || stmt.NewName.Qualifier.String() != "" { + common.Log.Error("BuildVirtualEnv DDL Not support '.'") + return false + } + + // 拉取表结构 + table := stmt.Table.Name.String() + if table != "" { + err = ve.createTable(*rEnv, rEnv.Database, table) + if err != nil { + common.Log.Error("BuildVirtualEnv Error : %v", err) + return false + } + } + + _, err = tmpDB.Query(sql) + if err != nil { + switch stmt.Action { + case "create", "alter": + // 如果是创建或者修改语句,且报错信息为如重复建表、重复索引等信息,将错误反馈到上一次层输出建议 + ve.Error = err + default: + common.Log.Error("BuildVirtualEnv DDL Execute Error : %v", err) + } + } + return true + } + + meta := ast.GetMeta(stmt, nil) + + // 由于DB环境可能是变的,所以需要每一次都单独的提取库表结构,整体随着rEnv的变动而发生变化 + for db, table := range meta { + if db == "" { + db = rEnv.Database + } + tmpEnv := *rEnv + tmpEnv.Database = db + + // 创建数据库环境 + for _, tb := range table.Table { + if tb.TableName == "" { + continue + } + + // 视图检查 + common.Log.Debug("BuildVirtualEnv Checking view -- %s.%s", tmpEnv.Database, tb.TableName) + tbStatus, err := tmpEnv.ShowTableStatus(tb.TableName) + if err != nil { + common.Log.Error("BuildVirtualEnv ShowTableStatus Error : %v", err) + return false + } + + // 如果是视图,解析语句 + if len(tbStatus.Rows) > 0 && tbStatus.Rows[0].Comment == "VIEW" { + tmpEnv.Database = db + var viewDDL string + viewDDL, err = tmpEnv.ShowCreateTable(tb.TableName) + if err != nil { + common.Log.Error("BuildVirtualEnv create view failed: %v", err) + return false + } + + startIdx := strings.Index(viewDDL, "AS") + viewDDL = viewDDL[startIdx+2:] + if !ve.BuildVirtualEnv(&tmpEnv, viewDDL) { + return false + } + } + + err = ve.createTable(tmpEnv, db, tb.TableName) + if err != nil { + common.Log.Error("BuildVirtualEnv Error : %v", err) + return false + } + } + } + } + return true +} + +func (ve VirtualEnv) createDatabase(rEnv database.Connector, dbName string) error { + // 生成映射关系 + if _, ok := ve.DBRef[dbName]; ok { + common.Log.Debug("createDatabase, Database `%s` created", dbName) + return nil + } + + dbHash := "optimizer_" + uniuri.New() + common.Log.Debug("createDatabase, mapping `%s` :`%s`-->`%s`", dbName, dbName, dbHash) + ddl, err := rEnv.ShowCreateDatabase(dbName) + if err != nil { + common.Log.Warning("createDatabase, rEnv.ShowCreateDatabase Error : %v", err) + ddl = fmt.Sprintf("create database `%s` character set utf8mb4", dbName) + } + + ddl = strings.Replace(ddl, dbName, dbHash, -1) + _, err = ve.Query(ddl) + if err != nil { + common.Log.Warning("createDatabase, Error : %v", err) + return err + } + + // 创建成功,添加映射记录 + ve.DBRef[dbName] = dbHash + ve.hash2Db[dbHash] = dbName + return nil +} + +/* + @input: + database.Connector 为一个线上环境数据库连接句柄的复制,因为在处理SQL时需要对上下文进行关联处理, + 所以存在修改DB连接参数(主要是数据库名称变更)的可能性,为了不影响整体上下文的环境,所以需要一个镜像句柄来做当前环境的操作。 + + dbName, tbName: 需要在环境中操作的库表名称, + + @output: + return 执行过程中的错误 + + NOTE: + 该函数会将线上环境中使用到的库表结构复制到测试环境中,为后续操作提供基础环境。 + 传入的库表名称均来自于对AST的解析,库表名称的获取遵循以下原则: + 如果未在SQL中指定数据库名称,则数据库一定是配置文件(或命令行参数传入DSN)中指定的数据库 + 如果一个SQL中存在多个数据库,则只能有一个数据库是没有在SQL中被显示指定的(即DSN中指定的数据库) + TODO: + 在一些可能的情况下,由于数据库配置的不一致(如SQL_MODE不同)导致remote环境的库表无法正确的在测试环境进行同步, + soar能够做出判断并进行session级别的修改,但是这一阶段可用性保证应该是由用户提供两个完全相同(或测试环境兼容线上环境) + 的数据库环境来实现的。 +*/ +func (ve VirtualEnv) createTable(rEnv database.Connector, dbName, tbName string) error { + + if dbName == "" { + dbName = rEnv.Database + } + // 如果 dbName 不为空,说明指定了DB,临时修改rEnv中DB参数,来确保执行正确性 + rEnv.Database = dbName + + if ve.TableMap[dbName] == nil { + ve.TableMap[dbName] = make(map[string]string) + } + + if strings.ToLower(tbName) == "dual" { + common.Log.Debug("createTable, %s no need create", tbName) + return nil + } + + if ve.TableMap[dbName][tbName] != "" { + common.Log.Debug("createTable, `%s`.`%s` created", dbName, tbName) + return nil + } + + common.Log.Debug("createTable, Database: %s, Table: %s", dbName, tbName) + + // TODO:查看是否有外键关联(done),对外键的支持 (未解决循环依赖的问题) + + // 判断数据库是否已经创建 + if ve.DBRef[dbName] == "" { + // 若没创建,则创建数据库 + err := ve.createDatabase(rEnv, dbName) + if err != nil { + return err + } + } + + // 记录Table创建信息 + ve.TableMap[dbName][tbName] = tbName + + // 生成建表语句 + common.Log.Debug("createTable DSN(%s/%s): generate ddl", rEnv.Addr, rEnv.Database) + + ddl, err := rEnv.ShowCreateTable(tbName) + if err != nil { + // 有可能是用户新建表,因此线上环境查不到 + common.Log.Error("createTable, %s DDL Error : %v", tbName, err) + return err + } + + // 改变数据环境 + ve.Database = ve.DBRef[dbName] + _, err = ve.Query(ddl) + if err != nil { + // 有可能是用户新建表,因此线上环境查不到 + common.Log.Error("createTable, %s Error : %v", tbName, err) + return err + } + + // 泵取数据 + if common.Config.Sampling { + common.Log.Debug("createTable, Start Sampling data from %s.%s to %s.%s ...", dbName, tbName, ve.DBRef[dbName], tbName) + err := ve.SamplingData(rEnv, tbName) + if err != nil { + common.Log.Error(" (ve VirtualEnv) createTable SamplingData Error: %v", err) + return err + } + } + return nil +} + +// GenTableColumns 为Rewrite提供的结构体初始化 +func (ve *VirtualEnv) GenTableColumns(meta common.Meta) common.TableColumns { + tableColumns := make(common.TableColumns) + for dbName, db := range meta { + for _, tb := range db.Table { + // 防止传入非预期值 + if tb == nil { + break + } + td, err := ve.Connector.ShowColumns(tb.TableName) + if err != nil { + common.Log.Warn("GenTableColumns, ShowColumns Error: " + err.Error()) + break + } + + // tableColumns 初始化 + if dbName == "" { + dbName = ve.RealDB(ve.Connector.Database) + } + + if _, ok := tableColumns[dbName]; !ok { + tableColumns[dbName] = make(map[string][]*common.Column) + } + + if _, ok := tableColumns[dbName][tb.TableName]; !ok { + tableColumns[dbName][tb.TableName] = make([]*common.Column, 0) + } + + if len(tb.Column) == 0 { + // tb.column为空说明SQL里这个表是用的*来查询 + if err != nil { + common.Log.Error("ast.Rewrite ShowColumns, Error: %v", err) + break + } + + for _, colInfo := range td.DescValues { + tableColumns[dbName][tb.TableName] = append(tableColumns[dbName][tb.TableName], &common.Column{ + Name: colInfo.Field, + DB: dbName, + Table: tb.TableName, + DataType: colInfo.Type, + Character: colInfo.Collation, + Key: colInfo.Key, + Default: colInfo.Default, + Extra: colInfo.Extra, + Comment: colInfo.Comment, + Privileges: colInfo.Privileges, + Null: colInfo.Null, + }) + } + } else { + // tb.column如果不为空则需要把使用到的列填写进去 + var columns []*common.Column + for _, col := range tb.Column { + for _, colInfo := range td.DescValues { + if col.Name == colInfo.Field { + // 根据获取的信息将列的信息补全 + col.DB = dbName + col.Table = tb.TableName + col.DataType = colInfo.Type + col.Character = colInfo.Collation + col.Key = colInfo.Key + col.Default = colInfo.Default + col.Extra = colInfo.Extra + col.Comment = colInfo.Comment + col.Privileges = colInfo.Privileges + col.Null = colInfo.Null + + columns = append(columns, col) + break + } + } + } + tableColumns[dbName][tb.TableName] = columns + } + } + } + return tableColumns +} diff --git a/env/env_test.go b/env/env_test.go new file mode 100644 index 00000000..ad511ae7 --- /dev/null +++ b/env/env_test.go @@ -0,0 +1,182 @@ +/* + * Copyright 2018 Xiaomi, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package env + +import ( + "flag" + "testing" + + "github.com/XiaoMi/soar/common" + "github.com/XiaoMi/soar/database" + "github.com/kr/pretty" + "github.com/ziutek/mymysql/mysql" +) + +var connTest *database.Connector +var update = flag.Bool("update", false, "update .golden files") + +func init() { + common.BaseDir = common.DevPath + err := common.ParseConfig("") + common.LogIfError(err, "init ParseConfig") + connTest = &database.Connector{ + Addr: common.Config.TestDSN.Addr, + User: common.Config.TestDSN.User, + Pass: common.Config.TestDSN.Password, + Database: common.Config.TestDSN.Schema, + Charset: common.Config.TestDSN.Charset, + } +} + +func TestNewVirtualEnv(t *testing.T) { + testSQL := []string{ + "create table t(id int,c1 varchar(20),PRIMARY KEY (id));", + "alter table t add index `idx_c1`(c1);", + "alter table t add index `idx_c1`(c1);", + "select * from city where country_id = 44;", + "select * from address where address2 is not null;", + "select * from address where address2 is null;", + "select * from address where address2 >= 44;", + "select * from city where country_id between 44 and 107;", + "select * from city where city like 'Ad%';", + "select * from city where city = 'Aden' and country_id = 107;", + "select * from city where country_id > 31 and city = 'Aden';", + "select * from address where address_id > 8 and city_id < 400 and district = 'Nantou';", + "select * from address where address_id > 8 and city_id < 400;", + "select * from actor where last_update='2006-02-15 04:34:33' and last_name='CHASE' group by first_name;", + "select * from address where last_update >='2014-09-25 22:33:47' group by district;", + "select * from address group by address,district;", + "select * from address where last_update='2014-09-25 22:30:27' group by district,(address_id+city_id);", + "select * from customer where active=1 order by last_name limit 10;", + "select * from customer order by last_name limit 10;", + "select * from customer where address_id > 224 order by address_id limit 10;", + "select * from customer where address_id < 224 order by address_id limit 10;", + "select * from customer where active=1 order by last_name;", + "select * from customer where address_id > 224 order by address_id;", + "select * from customer where address_id in (224,510) order by last_name;", + "select city from city where country_id = 44;", + "select city,city_id from city where country_id = 44 and last_update='2006-02-15 04:45:25';", + "select city from city where country_id > 44 and last_update > '2006-02-15 04:45:25';", + "select * from city where country_id=1 and city='Kabul' order by last_update;", + "select * from city where country_id>1 and city='Kabul' order by last_update;", + "select * from city where city_id>251 order by last_update; ", + "select * from city i inner join country o on i.country_id=o.country_id;", + "select * from city i left join country o on i.city_id=o.country_id;", + "select * from city i right join country o on i.city_id=o.country_id;", + "select * from city i left join country o on i.city_id=o.country_id where o.country_id is null;", + "select * from city i right join country o on i.city_id=o.country_id where i.city_id is null;", + "select * from city i left join country o on i.city_id=o.country_id union select * from city i right join country o on i.city_id=o.country_id;", + "select * from city i left join country o on i.city_id=o.country_id where o.country_id is null union select * from city i right join country o on i.city_id=o.country_id where i.city_id is null;", + "select first_name,last_name,email from customer natural left join address;", + "select first_name,last_name,email from customer natural left join address;", + "select first_name,last_name,email from customer natural right join address;", + "select first_name,last_name,email from customer STRAIGHT_JOIN address on customer.address_id=address.address_id;", + "select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc;", + } + + rEnv := connTest + + env := NewVirtualEnv(connTest) + defer env.CleanUp() + common.GoldenDiff(func() { + for _, sql := range testSQL { + env.BuildVirtualEnv(rEnv, sql) + switch err := env.Error.(type) { + case nil: + pretty.Println(sql, "OK") + case error: + // unexpected EOF + // 测试环境无法访问,或者被Disable的时候会进入这个分支 + pretty.Println(sql, err) + case *mysql.Error: + if err.Code != 1061 { + t.Error(err) + } + default: + t.Error(err) + } + } + }, t.Name(), update) +} + +func TestGenTableColumns(t *testing.T) { + vEnv, rEnv := BuildEnv() + defer vEnv.CleanUp() + + pretty.Println(common.Config.TestDSN.Disable) + if common.Config.TestDSN.Disable { + common.Log.Warn("common.Config.TestDSN.Disable=true, by pass TestGenTableColumns") + return + } + + // 只能对sakila数据库进行测试 + if rEnv.Database == "sakila" { + testSQL := []string{ + "select * from city where country_id = 44;", + "select country_id from city where country_id = 44;", + "select country_id from city where country_id > 44;", + } + + metaList := []common.Meta{ + { + "": &common.DB{ + Table: map[string]*common.Table{ + "city": common.NewTable("city"), + }, + }, + }, + { + "sakila": &common.DB{ + Table: map[string]*common.Table{ + "city": common.NewTable("city"), + }, + }, + }, + { + "sakila": &common.DB{ + Table: map[string]*common.Table{ + "city": { + TableName: "city", + Column: map[string]*common.Column{ + "country_id": { + Name: "country_id", + }, + }, + }, + }, + }, + }, + } + + for i, sql := range testSQL { + vEnv.BuildVirtualEnv(rEnv, sql) + tFlag := false + columns := vEnv.GenTableColumns(metaList[i]) + if _, ok := columns["sakila"]; ok { + if _, okk := columns["sakila"]["city"]; okk { + if length := len(columns["sakila"]["city"]); length >= 1 { + tFlag = true + } + } + } + + if !tFlag { + t.Errorf("columns: \n%s", pretty.Sprint(columns)) + } + } + } +} diff --git a/env/testdata/TestNewVirtualEnv.golden b/env/testdata/TestNewVirtualEnv.golden new file mode 100644 index 00000000..65edf3fd --- /dev/null +++ b/env/testdata/TestNewVirtualEnv.golden @@ -0,0 +1,42 @@ +create table t(id int,c1 varchar(20),PRIMARY KEY (id)); OK +alter table t add index `idx_c1`(c1); OK +alter table t add index `idx_c1`(c1); OK +select * from city where country_id = 44; OK +select * from address where address2 is not null; OK +select * from address where address2 is null; OK +select * from address where address2 >= 44; OK +select * from city where country_id between 44 and 107; OK +select * from city where city like 'Ad%'; OK +select * from city where city = 'Aden' and country_id = 107; OK +select * from city where country_id > 31 and city = 'Aden'; OK +select * from address where address_id > 8 and city_id < 400 and district = 'Nantou'; OK +select * from address where address_id > 8 and city_id < 400; OK +select * from actor where last_update='2006-02-15 04:34:33' and last_name='CHASE' group by first_name; OK +select * from address where last_update >='2014-09-25 22:33:47' group by district; OK +select * from address group by address,district; OK +select * from address where last_update='2014-09-25 22:30:27' group by district,(address_id+city_id); OK +select * from customer where active=1 order by last_name limit 10; OK +select * from customer order by last_name limit 10; OK +select * from customer where address_id > 224 order by address_id limit 10; OK +select * from customer where address_id < 224 order by address_id limit 10; OK +select * from customer where active=1 order by last_name; OK +select * from customer where address_id > 224 order by address_id; OK +select * from customer where address_id in (224,510) order by last_name; OK +select city from city where country_id = 44; OK +select city,city_id from city where country_id = 44 and last_update='2006-02-15 04:45:25'; OK +select city from city where country_id > 44 and last_update > '2006-02-15 04:45:25'; OK +select * from city where country_id=1 and city='Kabul' order by last_update; OK +select * from city where country_id>1 and city='Kabul' order by last_update; OK +select * from city where city_id>251 order by last_update; OK +select * from city i inner join country o on i.country_id=o.country_id; OK +select * from city i left join country o on i.city_id=o.country_id; OK +select * from city i right join country o on i.city_id=o.country_id; OK +select * from city i left join country o on i.city_id=o.country_id where o.country_id is null; OK +select * from city i right join country o on i.city_id=o.country_id where i.city_id is null; OK +select * from city i left join country o on i.city_id=o.country_id union select * from city i right join country o on i.city_id=o.country_id; OK +select * from city i left join country o on i.city_id=o.country_id where o.country_id is null union select * from city i right join country o on i.city_id=o.country_id where i.city_id is null; OK +select first_name,last_name,email from customer natural left join address; OK +select first_name,last_name,email from customer natural left join address; OK +select first_name,last_name,email from customer natural right join address; OK +select first_name,last_name,email from customer STRAIGHT_JOIN address on customer.address_id=address.address_id; OK +select ID,name from (select address from customer_list where SID=1 order by phone limit 50,10) a join customer_list l on (a.address=l.address) join city c on (c.city=l.city) order by phone desc; OK diff --git a/etc/soar.blacklist b/etc/soar.blacklist new file mode 100644 index 00000000..270f88de --- /dev/null +++ b/etc/soar.blacklist @@ -0,0 +1,9 @@ +# 这是一个黑名单例子 +## 不评审常见的SET, SHOW, SELECT CONST等完美请求 +^set.* +^show.* +^select \?$ +^\/\*.*\*\/$ +^drop.* +^lock.* +^unlock.* diff --git a/etc/soar.yaml b/etc/soar.yaml new file mode 100644 index 00000000..04cdcdfa --- /dev/null +++ b/etc/soar.yaml @@ -0,0 +1,20 @@ +# 这是一个配置文件例子 +online-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1tIsB1g3rt" + disable: false + +test-dsn: + addr: 127.0.0.1:3306 + schema: sakila + user: root + password: "1tIsB1g3rt" + disable: false + +allow-online-as-test: true + +log-level: 7 +log-output: soar.log +sampling: true diff --git a/genver.sh b/genver.sh new file mode 100755 index 00000000..5c199f40 --- /dev/null +++ b/genver.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +## Go version check +GO_VERSION_MIN=$1 +echo "==> Checking that build is using go version >= ${GO_VERSION_MIN}..." + +GO_VERSION=$(go version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') + +IFS="." read -r -a GO_VERSION_ARR <<<"$GO_VERSION" +IFS="." read -r -a GO_VERSION_REQ <<<"$GO_VERSION_MIN" + +if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || (${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && (${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || (${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]}))) ]] \ + ; then + echo "requires go $GO_VERSION_MIN to build; found $GO_VERSION." + exit 1 +fi + +## Generate Repository Version +version=$(git log --date=iso --pretty=format:"%cd @%h" -1) +if [ "X${version}" == "X" ]; then + version="not a git repo" +fi + +git_dirty=$(git diff --no-ext-diff 2>/dev/null | wc -l) + +compile="$(date +"%F %T %z") by $(go version)" + +branch=$(git rev-parse --abbrev-ref HEAD) + +dev_path=$( + cd "$(dirname "$0")" || exit + pwd +) + +cat <common/version.go +package common + +// -version输出信息 +const ( + Version = "${version}" + Compile = "${compile}" + Branch = "${branch}" + GitDirty= ${git_dirty} + DevPath = "${dev_path}" +) +EOF diff --git a/retool-install.sh b/retool-install.sh new file mode 100755 index 00000000..23e441e3 --- /dev/null +++ b/retool-install.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +# This script generates tools.json +# It helps record what releases/branches are being used +which retool >/dev/null || go get -u github.com/twitchtv/retool + +# This tool can run other checks in a standardized way +retool add gopkg.in/alecthomas/gometalinter.v2 v2.0.5 + +# check spelling +# misspell works with gometalinter +retool add github.com/client9/misspell/cmd/misspell v0.3.4 +# goword adds additional capability to check comments +retool add github.com/chzchzchz/goword a9744cb52b033fe5c269df48eeef2c954526cd79 + +# checks correctness +retool add github.com/gordonklaus/ineffassign 7bae11eba15a3285c75e388f77eb6357a2d73ee2 +retool add honnef.co/go/tools/cmd/megacheck master +retool add github.com/dnephin/govet 4a96d43e39d340b63daa8bc5576985aa599885f6 + +# slow checks +retool add github.com/kisielk/errcheck v1.1.0 +retool add github.com/securego/gosec/cmd/gosec 1.0.0 + +# linter +retool add github.com/mgechev/revive 7773f47324c2bf1c8f7a5500aff2b6c01d3ed73b +retool add github.com/golangci/golangci-lint/cmd/golangci-lint v1.10 diff --git a/revive.toml b/revive.toml new file mode 100644 index 00000000..ebe13ddc --- /dev/null +++ b/revive.toml @@ -0,0 +1,51 @@ +ignoreGeneratedHeader = false +severity = "error" +confidence = 0.8 +errorCode = 0 +warningCode = 0 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.var-naming] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.indent-error-flow] +[rule.superfluous-else] +[rule.modifies-parameter] + +# This can be checked by other tools like megacheck +#[rule.unreachable-code] + + +# Currently this makes too much noise, but should add it in +# and perhaps ignore it in a few files +#[rule.confusing-naming] +# severity = "warning" +#[rule.confusing-results] +# severity = "warning" +#[rule.unused-parameter] +# severity = "warning" +#[rule.deep-exit] +# severity = "warning" +#[rule.flag-parameter] +# severity = "warning" + + + +# Adding these will slow down the linter +# They are already provided by megacheck +#[rule.unexported-return] +#[rule.time-naming] +#[rule.errorf] + +# Adding these will slow down the linter +# Not sure if they are already provided by megacheck +#[rule.var-declaration] +#[rule.context-keys-type] diff --git a/tools.json b/tools.json new file mode 100644 index 00000000..b9e5b2df --- /dev/null +++ b/tools.json @@ -0,0 +1,45 @@ +{ + "Tools": [ + { + "Repository": "gopkg.in/alecthomas/gometalinter.v2", + "Commit": "46cc1ea3778b247666c2949669a3333c532fa9c6" + }, + { + "Repository": "github.com/client9/misspell/cmd/misspell", + "Commit": "7888c6b6ce89353cd98e196bce3c3f9e4cdf31f6" + }, + { + "Repository": "github.com/chzchzchz/goword", + "Commit": "a9744cb52b033fe5c269df48eeef2c954526cd79" + }, + { + "Repository": "github.com/gordonklaus/ineffassign", + "Commit": "7bae11eba15a3285c75e388f77eb6357a2d73ee2" + }, + { + "Repository": "github.com/dnephin/govet", + "Commit": "4a96d43e39d340b63daa8bc5576985aa599885f6" + }, + { + "Repository": "github.com/securego/gosec/cmd/gosec", + "Commit": "5fb530cda357c16175f2c049577d2030de735b28" + }, + { + "Repository": "github.com/kisielk/errcheck", + "Commit": "55d8f507faff4d6eddd0c41a3e713e2567fca4e5" + }, + { + "Repository": "github.com/mgechev/revive", + "Commit": "7773f47324c2bf1c8f7a5500aff2b6c01d3ed73b" + }, + { + "Repository": "github.com/golangci/golangci-lint/cmd/golangci-lint", + "Commit": "a2b901227c37337bce9860499a413db2b464481b" + }, + { + "Repository": "honnef.co/go/tools/cmd/megacheck", + "Commit": "88497007e8588ea5b6baee991f74a1607e809487" + } + ], + "RetoolVersion": "1.3.7" +} \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/LICENSE b/vendor/github.com/astaxie/beego/LICENSE new file mode 100644 index 00000000..5dbd4243 --- /dev/null +++ b/vendor/github.com/astaxie/beego/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 astaxie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/logs/README.md b/vendor/github.com/astaxie/beego/logs/README.md new file mode 100644 index 00000000..57d7abc3 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/README.md @@ -0,0 +1,63 @@ +## logs +logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` . + + +## How to install? + + go get github.com/astaxie/beego/logs + + +## What adapters are supported? + +As of now this logs support console, file,smtp and conn. + + +## How to use it? + +First you must import it + + import ( + "github.com/astaxie/beego/logs" + ) + +Then init a Log (example with console adapter) + + log := NewLogger(10000) + log.SetLogger("console", "") + +> the first params stand for how many channel + +Use it like this: + + log.Trace("trace") + log.Info("info") + log.Warn("warning") + log.Debug("debug") + log.Critical("critical") + + +## File adapter + +Configure file adapter like this: + + log := NewLogger(10000) + log.SetLogger("file", `{"filename":"test.log"}`) + + +## Conn adapter + +Configure like this: + + log := NewLogger(1000) + log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) + log.Info("info") + + +## Smtp adapter + +Configure like this: + + log := NewLogger(10000) + log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) + log.Critical("sendmail critical") + time.Sleep(time.Second * 30) diff --git a/vendor/github.com/astaxie/beego/logs/accesslog.go b/vendor/github.com/astaxie/beego/logs/accesslog.go new file mode 100644 index 00000000..cf799dc1 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/accesslog.go @@ -0,0 +1,86 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "bytes" + "encoding/json" + "time" + "fmt" +) + +const ( + apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s\n" + apacheFormat = "APACHE_FORMAT" + jsonFormat = "JSON_FORMAT" +) + +// AccessLogRecord struct for holding access log data. +type AccessLogRecord struct { + RemoteAddr string `json:"remote_addr"` + RequestTime time.Time `json:"request_time"` + RequestMethod string `json:"request_method"` + Request string `json:"request"` + ServerProtocol string `json:"server_protocol"` + Host string `json:"host"` + Status int `json:"status"` + BodyBytesSent int64 `json:"body_bytes_sent"` + ElapsedTime time.Duration `json:"elapsed_time"` + HTTPReferrer string `json:"http_referrer"` + HTTPUserAgent string `json:"http_user_agent"` + RemoteUser string `json:"remote_user"` +} + +func (r *AccessLogRecord) json() ([]byte, error) { + buffer := &bytes.Buffer{} + encoder := json.NewEncoder(buffer) + disableEscapeHTML(encoder) + + err := encoder.Encode(r) + return buffer.Bytes(), err +} + +func disableEscapeHTML(i interface{}) { + e, ok := i.(interface { + SetEscapeHTML(bool) + }); + if ok { + e.SetEscapeHTML(false) + } +} + +// AccessLog - Format and print access log. +func AccessLog(r *AccessLogRecord, format string) { + var msg string + + switch format { + + case apacheFormat: + timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05") + msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent, + r.ElapsedTime.Seconds(), r.HTTPReferrer, r.HTTPUserAgent) + case jsonFormat: + fallthrough + default: + jsonData, err := r.json() + if err != nil { + msg = fmt.Sprintf(`{"Error": "%s"}`, err) + } else { + msg = string(jsonData) + } + } + + beeLogger.Debug(msg) +} diff --git a/vendor/github.com/astaxie/beego/logs/color.go b/vendor/github.com/astaxie/beego/logs/color.go new file mode 100644 index 00000000..41d23638 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/color.go @@ -0,0 +1,28 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package logs + +import "io" + +type ansiColorWriter struct { + w io.Writer + mode outputMode +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + return cw.w.Write(p) +} diff --git a/vendor/github.com/astaxie/beego/logs/color_windows.go b/vendor/github.com/astaxie/beego/logs/color_windows.go new file mode 100644 index 00000000..4e28f188 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/color_windows.go @@ -0,0 +1,428 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package logs + +import ( + "bytes" + "io" + "strings" + "syscall" + "unsafe" +) + +type ( + csiState int + parseResult int +) + +const ( + outsideCsiCode csiState = iota + firstCsiCode + secondCsiCode +) + +const ( + noConsole parseResult = iota + changedColor + unknown +) + +type ansiColorWriter struct { + w io.Writer + mode outputMode + state csiState + paramStartBuf bytes.Buffer + paramBuf bytes.Buffer +} + +const ( + firstCsiChar byte = '\x1b' + secondeCsiChar byte = '[' + separatorChar byte = ';' + sgrCode byte = 'm' +) + +const ( + foregroundBlue = uint16(0x0001) + foregroundGreen = uint16(0x0002) + foregroundRed = uint16(0x0004) + foregroundIntensity = uint16(0x0008) + backgroundBlue = uint16(0x0010) + backgroundGreen = uint16(0x0020) + backgroundRed = uint16(0x0040) + backgroundIntensity = uint16(0x0080) + underscore = uint16(0x8000) + + foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity + backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity +) + +const ( + ansiReset = "0" + ansiIntensityOn = "1" + ansiIntensityOff = "21" + ansiUnderlineOn = "4" + ansiUnderlineOff = "24" + ansiBlinkOn = "5" + ansiBlinkOff = "25" + + ansiForegroundBlack = "30" + ansiForegroundRed = "31" + ansiForegroundGreen = "32" + ansiForegroundYellow = "33" + ansiForegroundBlue = "34" + ansiForegroundMagenta = "35" + ansiForegroundCyan = "36" + ansiForegroundWhite = "37" + ansiForegroundDefault = "39" + + ansiBackgroundBlack = "40" + ansiBackgroundRed = "41" + ansiBackgroundGreen = "42" + ansiBackgroundYellow = "43" + ansiBackgroundBlue = "44" + ansiBackgroundMagenta = "45" + ansiBackgroundCyan = "46" + ansiBackgroundWhite = "47" + ansiBackgroundDefault = "49" + + ansiLightForegroundGray = "90" + ansiLightForegroundRed = "91" + ansiLightForegroundGreen = "92" + ansiLightForegroundYellow = "93" + ansiLightForegroundBlue = "94" + ansiLightForegroundMagenta = "95" + ansiLightForegroundCyan = "96" + ansiLightForegroundWhite = "97" + + ansiLightBackgroundGray = "100" + ansiLightBackgroundRed = "101" + ansiLightBackgroundGreen = "102" + ansiLightBackgroundYellow = "103" + ansiLightBackgroundBlue = "104" + ansiLightBackgroundMagenta = "105" + ansiLightBackgroundCyan = "106" + ansiLightBackgroundWhite = "107" +) + +type drawType int + +const ( + foreground drawType = iota + background +) + +type winColor struct { + code uint16 + drawType drawType +} + +var colorMap = map[string]winColor{ + ansiForegroundBlack: {0, foreground}, + ansiForegroundRed: {foregroundRed, foreground}, + ansiForegroundGreen: {foregroundGreen, foreground}, + ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, + ansiForegroundBlue: {foregroundBlue, foreground}, + ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, + ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, + ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiBackgroundBlack: {0, background}, + ansiBackgroundRed: {backgroundRed, background}, + ansiBackgroundGreen: {backgroundGreen, background}, + ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, + ansiBackgroundBlue: {backgroundBlue, background}, + ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, + ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, + ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, + ansiBackgroundDefault: {0, background}, + + ansiLightForegroundGray: {foregroundIntensity, foreground}, + ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, + ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, + ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, + ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, + ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, + ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, + ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiLightBackgroundGray: {backgroundIntensity, background}, + ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, + ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, + ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, + ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, + ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, + ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, + ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + defaultAttr *textAttributes +) + +func init() { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo != nil { + colorMap[ansiForegroundDefault] = winColor{ + screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), + foreground, + } + colorMap[ansiBackgroundDefault] = winColor{ + screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), + background, + } + defaultAttr = convertTextAttr(screenInfo.WAttributes) + } +} + +type coord struct { + X, Y int16 +} + +type smallRect struct { + Left, Top, Right, Bottom int16 +} + +type consoleScreenBufferInfo struct { + DwSize coord + DwCursorPosition coord + WAttributes uint16 + SrWindow smallRect + DwMaximumWindowSize coord +} + +func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { + var csbi consoleScreenBufferInfo + ret, _, _ := procGetConsoleScreenBufferInfo.Call( + hConsoleOutput, + uintptr(unsafe.Pointer(&csbi))) + if ret == 0 { + return nil + } + return &csbi +} + +func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { + ret, _, _ := procSetConsoleTextAttribute.Call( + hConsoleOutput, + uintptr(wAttributes)) + return ret != 0 +} + +type textAttributes struct { + foregroundColor uint16 + backgroundColor uint16 + foregroundIntensity uint16 + backgroundIntensity uint16 + underscore uint16 + otherAttributes uint16 +} + +func convertTextAttr(winAttr uint16) *textAttributes { + fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) + bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) + fgIntensity := winAttr & foregroundIntensity + bgIntensity := winAttr & backgroundIntensity + underline := winAttr & underscore + otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) + return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} +} + +func convertWinAttr(textAttr *textAttributes) uint16 { + var winAttr uint16 + winAttr |= textAttr.foregroundColor + winAttr |= textAttr.backgroundColor + winAttr |= textAttr.foregroundIntensity + winAttr |= textAttr.backgroundIntensity + winAttr |= textAttr.underscore + winAttr |= textAttr.otherAttributes + return winAttr +} + +func changeColor(param []byte) parseResult { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo == nil { + return noConsole + } + + winAttr := convertTextAttr(screenInfo.WAttributes) + strParam := string(param) + if len(strParam) <= 0 { + strParam = "0" + } + csiParam := strings.Split(strParam, string(separatorChar)) + for _, p := range csiParam { + c, ok := colorMap[p] + switch { + case !ok: + switch p { + case ansiReset: + winAttr.foregroundColor = defaultAttr.foregroundColor + winAttr.backgroundColor = defaultAttr.backgroundColor + winAttr.foregroundIntensity = defaultAttr.foregroundIntensity + winAttr.backgroundIntensity = defaultAttr.backgroundIntensity + winAttr.underscore = 0 + winAttr.otherAttributes = 0 + case ansiIntensityOn: + winAttr.foregroundIntensity = foregroundIntensity + case ansiIntensityOff: + winAttr.foregroundIntensity = 0 + case ansiUnderlineOn: + winAttr.underscore = underscore + case ansiUnderlineOff: + winAttr.underscore = 0 + case ansiBlinkOn: + winAttr.backgroundIntensity = backgroundIntensity + case ansiBlinkOff: + winAttr.backgroundIntensity = 0 + default: + // unknown code + } + case c.drawType == foreground: + winAttr.foregroundColor = c.code + case c.drawType == background: + winAttr.backgroundColor = c.code + } + } + winTextAttribute := convertWinAttr(winAttr) + setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) + + return changedColor +} + +func parseEscapeSequence(command byte, param []byte) parseResult { + if defaultAttr == nil { + return noConsole + } + + switch command { + case sgrCode: + return changeColor(param) + default: + return unknown + } +} + +func (cw *ansiColorWriter) flushBuffer() (int, error) { + return cw.flushTo(cw.w) +} + +func (cw *ansiColorWriter) resetBuffer() (int, error) { + return cw.flushTo(nil) +} + +func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { + var n1, n2 int + var err error + + startBytes := cw.paramStartBuf.Bytes() + cw.paramStartBuf.Reset() + if w != nil { + n1, err = cw.w.Write(startBytes) + if err != nil { + return n1, err + } + } else { + n1 = len(startBytes) + } + paramBytes := cw.paramBuf.Bytes() + cw.paramBuf.Reset() + if w != nil { + n2, err = cw.w.Write(paramBytes) + if err != nil { + return n1 + n2, err + } + } else { + n2 = len(paramBytes) + } + return n1 + n2, nil +} + +func isParameterChar(b byte) bool { + return ('0' <= b && b <= '9') || b == separatorChar +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + var r, nw, first, last int + if cw.mode != DiscardNonColorEscSeq { + cw.state = outsideCsiCode + cw.resetBuffer() + } + + var err error + for i, ch := range p { + switch cw.state { + case outsideCsiCode: + if ch == firstCsiChar { + cw.paramStartBuf.WriteByte(ch) + cw.state = firstCsiCode + } + case firstCsiCode: + switch ch { + case firstCsiChar: + cw.paramStartBuf.WriteByte(ch) + break + case secondeCsiChar: + cw.paramStartBuf.WriteByte(ch) + cw.state = secondCsiCode + last = i - 1 + default: + cw.resetBuffer() + cw.state = outsideCsiCode + } + case secondCsiCode: + if isParameterChar(ch) { + cw.paramBuf.WriteByte(ch) + } else { + nw, err = cw.w.Write(p[first:last]) + r += nw + if err != nil { + return r, err + } + first = i + 1 + result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) + if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) { + cw.paramBuf.WriteByte(ch) + nw, err := cw.flushBuffer() + if err != nil { + return r, err + } + r += nw + } else { + n, _ := cw.resetBuffer() + // Add one more to the size of the buffer for the last ch + r += n + 1 + } + + cw.state = outsideCsiCode + } + default: + cw.state = outsideCsiCode + } + } + + if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode { + nw, err = cw.w.Write(p[first:]) + r += nw + } + + return r, err +} diff --git a/vendor/github.com/astaxie/beego/logs/conn.go b/vendor/github.com/astaxie/beego/logs/conn.go new file mode 100644 index 00000000..6d5bf6bf --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/conn.go @@ -0,0 +1,117 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "encoding/json" + "io" + "net" + "time" +) + +// connWriter implements LoggerInterface. +// it writes messages in keep-live tcp connection. +type connWriter struct { + lg *logWriter + innerWriter io.WriteCloser + ReconnectOnMsg bool `json:"reconnectOnMsg"` + Reconnect bool `json:"reconnect"` + Net string `json:"net"` + Addr string `json:"addr"` + Level int `json:"level"` +} + +// NewConn create new ConnWrite returning as LoggerInterface. +func NewConn() Logger { + conn := new(connWriter) + conn.Level = LevelTrace + return conn +} + +// Init init connection writer with json config. +// json config only need key "level". +func (c *connWriter) Init(jsonConfig string) error { + return json.Unmarshal([]byte(jsonConfig), c) +} + +// WriteMsg write message in connection. +// if connection is down, try to re-connect. +func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > c.Level { + return nil + } + if c.needToConnectOnMsg() { + err := c.connect() + if err != nil { + return err + } + } + + if c.ReconnectOnMsg { + defer c.innerWriter.Close() + } + + c.lg.println(when, msg) + return nil +} + +// Flush implementing method. empty. +func (c *connWriter) Flush() { + +} + +// Destroy destroy connection writer and close tcp listener. +func (c *connWriter) Destroy() { + if c.innerWriter != nil { + c.innerWriter.Close() + } +} + +func (c *connWriter) connect() error { + if c.innerWriter != nil { + c.innerWriter.Close() + c.innerWriter = nil + } + + conn, err := net.Dial(c.Net, c.Addr) + if err != nil { + return err + } + + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + } + + c.innerWriter = conn + c.lg = newLogWriter(conn) + return nil +} + +func (c *connWriter) needToConnectOnMsg() bool { + if c.Reconnect { + c.Reconnect = false + return true + } + + if c.innerWriter == nil { + return true + } + + return c.ReconnectOnMsg +} + +func init() { + Register(AdapterConn, NewConn) +} diff --git a/vendor/github.com/astaxie/beego/logs/console.go b/vendor/github.com/astaxie/beego/logs/console.go new file mode 100644 index 00000000..e75f2a1b --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/console.go @@ -0,0 +1,101 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "encoding/json" + "os" + "runtime" + "time" +) + +// brush is a color join function +type brush func(string) string + +// newBrush return a fix color Brush +func newBrush(color string) brush { + pre := "\033[" + reset := "\033[0m" + return func(text string) string { + return pre + color + "m" + text + reset + } +} + +var colors = []brush{ + newBrush("1;37"), // Emergency white + newBrush("1;36"), // Alert cyan + newBrush("1;35"), // Critical magenta + newBrush("1;31"), // Error red + newBrush("1;33"), // Warning yellow + newBrush("1;32"), // Notice green + newBrush("1;34"), // Informational blue + newBrush("1;44"), // Debug Background blue +} + +// consoleWriter implements LoggerInterface and writes messages to terminal. +type consoleWriter struct { + lg *logWriter + Level int `json:"level"` + Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color +} + +// NewConsole create ConsoleWriter returning as LoggerInterface. +func NewConsole() Logger { + cw := &consoleWriter{ + lg: newLogWriter(os.Stdout), + Level: LevelDebug, + Colorful: runtime.GOOS != "windows", + } + return cw +} + +// Init init console logger. +// jsonConfig like '{"level":LevelTrace}'. +func (c *consoleWriter) Init(jsonConfig string) error { + if len(jsonConfig) == 0 { + return nil + } + err := json.Unmarshal([]byte(jsonConfig), c) + if runtime.GOOS == "windows" { + c.Colorful = false + } + return err +} + +// WriteMsg write message in console. +func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > c.Level { + return nil + } + if c.Colorful { + msg = colors[level](msg) + } + c.lg.println(when, msg) + return nil +} + +// Destroy implementing method. empty. +func (c *consoleWriter) Destroy() { + +} + +// Flush implementing method. empty. +func (c *consoleWriter) Flush() { + +} + +func init() { + Register(AdapterConsole, NewConsole) +} diff --git a/vendor/github.com/astaxie/beego/logs/file.go b/vendor/github.com/astaxie/beego/logs/file.go new file mode 100644 index 00000000..8e5117d2 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/file.go @@ -0,0 +1,335 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +// fileLogWriter implements LoggerInterface. +// It writes messages by lines limit, file size limit, or time frequency. +type fileLogWriter struct { + sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize + // The opened file + Filename string `json:"filename"` + fileWriter *os.File + + // Rotate at line + MaxLines int `json:"maxlines"` + maxLinesCurLines int + + // Rotate at size + MaxSize int `json:"maxsize"` + maxSizeCurSize int + + // Rotate daily + Daily bool `json:"daily"` + MaxDays int64 `json:"maxdays"` + dailyOpenDate int + dailyOpenTime time.Time + + Rotate bool `json:"rotate"` + + Level int `json:"level"` + + Perm string `json:"perm"` + + RotatePerm string `json:"rotateperm"` + + fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix +} + +// newFileWriter create a FileLogWriter returning as LoggerInterface. +func newFileWriter() Logger { + w := &fileLogWriter{ + Daily: true, + MaxDays: 7, + Rotate: true, + RotatePerm: "0440", + Level: LevelTrace, + Perm: "0660", + } + return w +} + +// Init file logger with json config. +// jsonConfig like: +// { +// "filename":"logs/beego.log", +// "maxLines":10000, +// "maxsize":1024, +// "daily":true, +// "maxDays":15, +// "rotate":true, +// "perm":"0600" +// } +func (w *fileLogWriter) Init(jsonConfig string) error { + err := json.Unmarshal([]byte(jsonConfig), w) + if err != nil { + return err + } + if len(w.Filename) == 0 { + return errors.New("jsonconfig must have filename") + } + w.suffix = filepath.Ext(w.Filename) + w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix) + if w.suffix == "" { + w.suffix = ".log" + } + err = w.startLogger() + return err +} + +// start file logger. create log file and set to locker-inside file writer. +func (w *fileLogWriter) startLogger() error { + file, err := w.createLogFile() + if err != nil { + return err + } + if w.fileWriter != nil { + w.fileWriter.Close() + } + w.fileWriter = file + return w.initFd() +} + +func (w *fileLogWriter) needRotate(size int, day int) bool { + return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || + (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || + (w.Daily && day != w.dailyOpenDate) + +} + +// WriteMsg write logger message into file. +func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > w.Level { + return nil + } + h, d := formatTimeHeader(when) + msg = string(h) + msg + "\n" + if w.Rotate { + w.RLock() + if w.needRotate(len(msg), d) { + w.RUnlock() + w.Lock() + if w.needRotate(len(msg), d) { + if err := w.doRotate(when); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) + } + } + w.Unlock() + } else { + w.RUnlock() + } + } + + w.Lock() + _, err := w.fileWriter.Write([]byte(msg)) + if err == nil { + w.maxLinesCurLines++ + w.maxSizeCurSize += len(msg) + } + w.Unlock() + return err +} + +func (w *fileLogWriter) createLogFile() (*os.File, error) { + // Open the log file + perm, err := strconv.ParseInt(w.Perm, 8, 64) + if err != nil { + return nil, err + } + fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm)) + if err == nil { + // Make sure file perm is user set perm cause of `os.OpenFile` will obey umask + os.Chmod(w.Filename, os.FileMode(perm)) + } + return fd, err +} + +func (w *fileLogWriter) initFd() error { + fd := w.fileWriter + fInfo, err := fd.Stat() + if err != nil { + return fmt.Errorf("get stat err: %s", err) + } + w.maxSizeCurSize = int(fInfo.Size()) + w.dailyOpenTime = time.Now() + w.dailyOpenDate = w.dailyOpenTime.Day() + w.maxLinesCurLines = 0 + if w.Daily { + go w.dailyRotate(w.dailyOpenTime) + } + if fInfo.Size() > 0 && w.MaxLines > 0 { + count, err := w.lines() + if err != nil { + return err + } + w.maxLinesCurLines = count + } + return nil +} + +func (w *fileLogWriter) dailyRotate(openTime time.Time) { + y, m, d := openTime.Add(24 * time.Hour).Date() + nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location()) + tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100)) + <-tm.C + w.Lock() + if w.needRotate(0, time.Now().Day()) { + if err := w.doRotate(time.Now()); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) + } + } + w.Unlock() +} + +func (w *fileLogWriter) lines() (int, error) { + fd, err := os.Open(w.Filename) + if err != nil { + return 0, err + } + defer fd.Close() + + buf := make([]byte, 32768) // 32k + count := 0 + lineSep := []byte{'\n'} + + for { + c, err := fd.Read(buf) + if err != nil && err != io.EOF { + return count, err + } + + count += bytes.Count(buf[:c], lineSep) + + if err == io.EOF { + break + } + } + + return count, nil +} + +// DoRotate means it need to write file in new file. +// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size) +func (w *fileLogWriter) doRotate(logTime time.Time) error { + // file exists + // Find the next available number + num := 1 + fName := "" + rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64) + if err != nil { + return err + } + + _, err = os.Lstat(w.Filename) + if err != nil { + //even if the file is not exist or other ,we should RESTART the logger + goto RESTART_LOGGER + } + + if w.MaxLines > 0 || w.MaxSize > 0 { + for ; err == nil && num <= 999; num++ { + fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format("2006-01-02"), num, w.suffix) + _, err = os.Lstat(fName) + } + } else { + fName = fmt.Sprintf("%s.%s%s", w.fileNameOnly, w.dailyOpenTime.Format("2006-01-02"), w.suffix) + _, err = os.Lstat(fName) + for ; err == nil && num <= 999; num++ { + fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", w.dailyOpenTime.Format("2006-01-02"), num, w.suffix) + _, err = os.Lstat(fName) + } + } + // return error if the last file checked still existed + if err == nil { + return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename) + } + + // close fileWriter before rename + w.fileWriter.Close() + + // Rename the file to its new found name + // even if occurs error,we MUST guarantee to restart new logger + err = os.Rename(w.Filename, fName) + if err != nil { + goto RESTART_LOGGER + } + + err = os.Chmod(fName, os.FileMode(rotatePerm)) + +RESTART_LOGGER: + + startLoggerErr := w.startLogger() + go w.deleteOldLog() + + if startLoggerErr != nil { + return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr) + } + if err != nil { + return fmt.Errorf("Rotate: %s", err) + } + return nil +} + +func (w *fileLogWriter) deleteOldLog() { + dir := filepath.Dir(w.Filename) + filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r) + } + }() + + if info == nil { + return + } + + if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) { + if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && + strings.HasSuffix(filepath.Base(path), w.suffix) { + os.Remove(path) + } + } + return + }) +} + +// Destroy close the file description, close file writer. +func (w *fileLogWriter) Destroy() { + w.fileWriter.Close() +} + +// Flush flush file logger. +// there are no buffering messages in file logger in memory. +// flush file means sync file from disk. +func (w *fileLogWriter) Flush() { + w.fileWriter.Sync() +} + +func init() { + Register(AdapterFile, newFileWriter) +} diff --git a/vendor/github.com/astaxie/beego/logs/jianliao.go b/vendor/github.com/astaxie/beego/logs/jianliao.go new file mode 100644 index 00000000..88ba0f9a --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/jianliao.go @@ -0,0 +1,72 @@ +package logs + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" +) + +// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook +type JLWriter struct { + AuthorName string `json:"authorname"` + Title string `json:"title"` + WebhookURL string `json:"webhookurl"` + RedirectURL string `json:"redirecturl,omitempty"` + ImageURL string `json:"imageurl,omitempty"` + Level int `json:"level"` +} + +// newJLWriter create jiaoliao writer. +func newJLWriter() Logger { + return &JLWriter{Level: LevelTrace} +} + +// Init JLWriter with json config string +func (s *JLWriter) Init(jsonconfig string) error { + return json.Unmarshal([]byte(jsonconfig), s) +} + +// WriteMsg write message in smtp writer. +// it will send an email with subject and only this message. +func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > s.Level { + return nil + } + + text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg) + + form := url.Values{} + form.Add("authorName", s.AuthorName) + form.Add("title", s.Title) + form.Add("text", text) + if s.RedirectURL != "" { + form.Add("redirectUrl", s.RedirectURL) + } + if s.ImageURL != "" { + form.Add("imageUrl", s.ImageURL) + } + + resp, err := http.PostForm(s.WebhookURL, form) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) + } + return nil +} + +// Flush implementing method. empty. +func (s *JLWriter) Flush() { +} + +// Destroy implementing method. empty. +func (s *JLWriter) Destroy() { +} + +func init() { + Register(AdapterJianLiao, newJLWriter) +} diff --git a/vendor/github.com/astaxie/beego/logs/log.go b/vendor/github.com/astaxie/beego/logs/log.go new file mode 100644 index 00000000..0e97a70e --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/log.go @@ -0,0 +1,646 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logs provide a general log interface +// Usage: +// +// import "github.com/astaxie/beego/logs" +// +// log := NewLogger(10000) +// log.SetLogger("console", "") +// +// > the first params stand for how many channel +// +// Use it like this: +// +// log.Trace("trace") +// log.Info("info") +// log.Warn("warning") +// log.Debug("debug") +// log.Critical("critical") +// +// more docs http://beego.me/docs/module/logs.md +package logs + +import ( + "fmt" + "log" + "os" + "path" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// RFC5424 log message levels. +const ( + LevelEmergency = iota + LevelAlert + LevelCritical + LevelError + LevelWarning + LevelNotice + LevelInformational + LevelDebug +) + +// levelLogLogger is defined to implement log.Logger +// the real log level will be LevelEmergency +const levelLoggerImpl = -1 + +// Name for adapter with beego official support +const ( + AdapterConsole = "console" + AdapterFile = "file" + AdapterMultiFile = "multifile" + AdapterMail = "smtp" + AdapterConn = "conn" + AdapterEs = "es" + AdapterJianLiao = "jianliao" + AdapterSlack = "slack" + AdapterAliLS = "alils" +) + +// Legacy log level constants to ensure backwards compatibility. +const ( + LevelInfo = LevelInformational + LevelTrace = LevelDebug + LevelWarn = LevelWarning +) + +type newLoggerFunc func() Logger + +// Logger defines the behavior of a log provider. +type Logger interface { + Init(config string) error + WriteMsg(when time.Time, msg string, level int) error + Destroy() + Flush() +} + +var adapters = make(map[string]newLoggerFunc) +var levelPrefix = [LevelDebug + 1]string{"[M] ", "[A] ", "[C] ", "[E] ", "[W] ", "[N] ", "[I] ", "[D] "} + +// Register makes a log provide available by the provided name. +// If Register is called twice with the same name or if driver is nil, +// it panics. +func Register(name string, log newLoggerFunc) { + if log == nil { + panic("logs: Register provide is nil") + } + if _, dup := adapters[name]; dup { + panic("logs: Register called twice for provider " + name) + } + adapters[name] = log +} + +// BeeLogger is default logger in beego application. +// it can contain several providers and log message into all providers. +type BeeLogger struct { + lock sync.Mutex + level int + init bool + enableFuncCallDepth bool + loggerFuncCallDepth int + asynchronous bool + msgChanLen int64 + msgChan chan *logMsg + signalChan chan string + wg sync.WaitGroup + outputs []*nameLogger +} + +const defaultAsyncMsgLen = 1e3 + +type nameLogger struct { + Logger + name string +} + +type logMsg struct { + level int + msg string + when time.Time +} + +var logMsgPool *sync.Pool + +// NewLogger returns a new BeeLogger. +// channelLen means the number of messages in chan(used where asynchronous is true). +// if the buffering chan is full, logger adapters write to file or other way. +func NewLogger(channelLens ...int64) *BeeLogger { + bl := new(BeeLogger) + bl.level = LevelDebug + bl.loggerFuncCallDepth = 2 + bl.msgChanLen = append(channelLens, 0)[0] + if bl.msgChanLen <= 0 { + bl.msgChanLen = defaultAsyncMsgLen + } + bl.signalChan = make(chan string, 1) + bl.setLogger(AdapterConsole) + return bl +} + +// Async set the log to asynchronous and start the goroutine +func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger { + bl.lock.Lock() + defer bl.lock.Unlock() + if bl.asynchronous { + return bl + } + bl.asynchronous = true + if len(msgLen) > 0 && msgLen[0] > 0 { + bl.msgChanLen = msgLen[0] + } + bl.msgChan = make(chan *logMsg, bl.msgChanLen) + logMsgPool = &sync.Pool{ + New: func() interface{} { + return &logMsg{} + }, + } + bl.wg.Add(1) + go bl.startLogger() + return bl +} + +// SetLogger provides a given logger adapter into BeeLogger with config string. +// config need to be correct JSON as string: {"interval":360}. +func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error { + config := append(configs, "{}")[0] + for _, l := range bl.outputs { + if l.name == adapterName { + return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName) + } + } + + log, ok := adapters[adapterName] + if !ok { + return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) + } + + lg := log() + err := lg.Init(config) + if err != nil { + fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error()) + return err + } + bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg}) + return nil +} + +// SetLogger provides a given logger adapter into BeeLogger with config string. +// config need to be correct JSON as string: {"interval":360}. +func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error { + bl.lock.Lock() + defer bl.lock.Unlock() + if !bl.init { + bl.outputs = []*nameLogger{} + bl.init = true + } + return bl.setLogger(adapterName, configs...) +} + +// DelLogger remove a logger adapter in BeeLogger. +func (bl *BeeLogger) DelLogger(adapterName string) error { + bl.lock.Lock() + defer bl.lock.Unlock() + outputs := []*nameLogger{} + for _, lg := range bl.outputs { + if lg.name == adapterName { + lg.Destroy() + } else { + outputs = append(outputs, lg) + } + } + if len(outputs) == len(bl.outputs) { + return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) + } + bl.outputs = outputs + return nil +} + +func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) { + for _, l := range bl.outputs { + err := l.WriteMsg(when, msg, level) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err) + } + } +} + +func (bl *BeeLogger) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + // writeMsg will always add a '\n' character + if p[len(p)-1] == '\n' { + p = p[0 : len(p)-1] + } + // set levelLoggerImpl to ensure all log message will be write out + err = bl.writeMsg(levelLoggerImpl, string(p)) + if err == nil { + return len(p), err + } + return 0, err +} + +func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error { + if !bl.init { + bl.lock.Lock() + bl.setLogger(AdapterConsole) + bl.lock.Unlock() + } + + if len(v) > 0 { + msg = fmt.Sprintf(msg, v...) + } + when := time.Now() + if bl.enableFuncCallDepth { + _, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth) + if !ok { + file = "???" + line = 0 + } + _, filename := path.Split(file) + msg = "[" + filename + ":" + strconv.Itoa(line) + "] " + msg + } + + //set level info in front of filename info + if logLevel == levelLoggerImpl { + // set to emergency to ensure all log will be print out correctly + logLevel = LevelEmergency + } else { + msg = levelPrefix[logLevel] + msg + } + + if bl.asynchronous { + lm := logMsgPool.Get().(*logMsg) + lm.level = logLevel + lm.msg = msg + lm.when = when + bl.msgChan <- lm + } else { + bl.writeToLoggers(when, msg, logLevel) + } + return nil +} + +// SetLevel Set log message level. +// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning), +// log providers will not even be sent the message. +func (bl *BeeLogger) SetLevel(l int) { + bl.level = l +} + +// SetLogFuncCallDepth set log funcCallDepth +func (bl *BeeLogger) SetLogFuncCallDepth(d int) { + bl.loggerFuncCallDepth = d +} + +// GetLogFuncCallDepth return log funcCallDepth for wrapper +func (bl *BeeLogger) GetLogFuncCallDepth() int { + return bl.loggerFuncCallDepth +} + +// EnableFuncCallDepth enable log funcCallDepth +func (bl *BeeLogger) EnableFuncCallDepth(b bool) { + bl.enableFuncCallDepth = b +} + +// start logger chan reading. +// when chan is not empty, write logs. +func (bl *BeeLogger) startLogger() { + gameOver := false + for { + select { + case bm := <-bl.msgChan: + bl.writeToLoggers(bm.when, bm.msg, bm.level) + logMsgPool.Put(bm) + case sg := <-bl.signalChan: + // Now should only send "flush" or "close" to bl.signalChan + bl.flush() + if sg == "close" { + for _, l := range bl.outputs { + l.Destroy() + } + bl.outputs = nil + gameOver = true + } + bl.wg.Done() + } + if gameOver { + break + } + } +} + +// Emergency Log EMERGENCY level message. +func (bl *BeeLogger) Emergency(format string, v ...interface{}) { + if LevelEmergency > bl.level { + return + } + bl.writeMsg(LevelEmergency, format, v...) +} + +// Alert Log ALERT level message. +func (bl *BeeLogger) Alert(format string, v ...interface{}) { + if LevelAlert > bl.level { + return + } + bl.writeMsg(LevelAlert, format, v...) +} + +// Critical Log CRITICAL level message. +func (bl *BeeLogger) Critical(format string, v ...interface{}) { + if LevelCritical > bl.level { + return + } + bl.writeMsg(LevelCritical, format, v...) +} + +// Error Log ERROR level message. +func (bl *BeeLogger) Error(format string, v ...interface{}) { + if LevelError > bl.level { + return + } + bl.writeMsg(LevelError, format, v...) +} + +// Warning Log WARNING level message. +func (bl *BeeLogger) Warning(format string, v ...interface{}) { + if LevelWarn > bl.level { + return + } + bl.writeMsg(LevelWarn, format, v...) +} + +// Notice Log NOTICE level message. +func (bl *BeeLogger) Notice(format string, v ...interface{}) { + if LevelNotice > bl.level { + return + } + bl.writeMsg(LevelNotice, format, v...) +} + +// Informational Log INFORMATIONAL level message. +func (bl *BeeLogger) Informational(format string, v ...interface{}) { + if LevelInfo > bl.level { + return + } + bl.writeMsg(LevelInfo, format, v...) +} + +// Debug Log DEBUG level message. +func (bl *BeeLogger) Debug(format string, v ...interface{}) { + if LevelDebug > bl.level { + return + } + bl.writeMsg(LevelDebug, format, v...) +} + +// Warn Log WARN level message. +// compatibility alias for Warning() +func (bl *BeeLogger) Warn(format string, v ...interface{}) { + if LevelWarn > bl.level { + return + } + bl.writeMsg(LevelWarn, format, v...) +} + +// Info Log INFO level message. +// compatibility alias for Informational() +func (bl *BeeLogger) Info(format string, v ...interface{}) { + if LevelInfo > bl.level { + return + } + bl.writeMsg(LevelInfo, format, v...) +} + +// Trace Log TRACE level message. +// compatibility alias for Debug() +func (bl *BeeLogger) Trace(format string, v ...interface{}) { + if LevelDebug > bl.level { + return + } + bl.writeMsg(LevelDebug, format, v...) +} + +// Flush flush all chan data. +func (bl *BeeLogger) Flush() { + if bl.asynchronous { + bl.signalChan <- "flush" + bl.wg.Wait() + bl.wg.Add(1) + return + } + bl.flush() +} + +// Close close logger, flush all chan data and destroy all adapters in BeeLogger. +func (bl *BeeLogger) Close() { + if bl.asynchronous { + bl.signalChan <- "close" + bl.wg.Wait() + close(bl.msgChan) + } else { + bl.flush() + for _, l := range bl.outputs { + l.Destroy() + } + bl.outputs = nil + } + close(bl.signalChan) +} + +// Reset close all outputs, and set bl.outputs to nil +func (bl *BeeLogger) Reset() { + bl.Flush() + for _, l := range bl.outputs { + l.Destroy() + } + bl.outputs = nil +} + +func (bl *BeeLogger) flush() { + if bl.asynchronous { + for { + if len(bl.msgChan) > 0 { + bm := <-bl.msgChan + bl.writeToLoggers(bm.when, bm.msg, bm.level) + logMsgPool.Put(bm) + continue + } + break + } + } + for _, l := range bl.outputs { + l.Flush() + } +} + +// beeLogger references the used application logger. +var beeLogger = NewLogger() + +// GetBeeLogger returns the default BeeLogger +func GetBeeLogger() *BeeLogger { + return beeLogger +} + +var beeLoggerMap = struct { + sync.RWMutex + logs map[string]*log.Logger +}{ + logs: map[string]*log.Logger{}, +} + +// GetLogger returns the default BeeLogger +func GetLogger(prefixes ...string) *log.Logger { + prefix := append(prefixes, "")[0] + if prefix != "" { + prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix)) + } + beeLoggerMap.RLock() + l, ok := beeLoggerMap.logs[prefix] + if ok { + beeLoggerMap.RUnlock() + return l + } + beeLoggerMap.RUnlock() + beeLoggerMap.Lock() + defer beeLoggerMap.Unlock() + l, ok = beeLoggerMap.logs[prefix] + if !ok { + l = log.New(beeLogger, prefix, 0) + beeLoggerMap.logs[prefix] = l + } + return l +} + +// Reset will remove all the adapter +func Reset() { + beeLogger.Reset() +} + +// Async set the beelogger with Async mode and hold msglen messages +func Async(msgLen ...int64) *BeeLogger { + return beeLogger.Async(msgLen...) +} + +// SetLevel sets the global log level used by the simple logger. +func SetLevel(l int) { + beeLogger.SetLevel(l) +} + +// EnableFuncCallDepth enable log funcCallDepth +func EnableFuncCallDepth(b bool) { + beeLogger.enableFuncCallDepth = b +} + +// SetLogFuncCall set the CallDepth, default is 4 +func SetLogFuncCall(b bool) { + beeLogger.EnableFuncCallDepth(b) + beeLogger.SetLogFuncCallDepth(4) +} + +// SetLogFuncCallDepth set log funcCallDepth +func SetLogFuncCallDepth(d int) { + beeLogger.loggerFuncCallDepth = d +} + +// SetLogger sets a new logger. +func SetLogger(adapter string, config ...string) error { + return beeLogger.SetLogger(adapter, config...) +} + +// Emergency logs a message at emergency level. +func Emergency(f interface{}, v ...interface{}) { + beeLogger.Emergency(formatLog(f, v...)) +} + +// Alert logs a message at alert level. +func Alert(f interface{}, v ...interface{}) { + beeLogger.Alert(formatLog(f, v...)) +} + +// Critical logs a message at critical level. +func Critical(f interface{}, v ...interface{}) { + beeLogger.Critical(formatLog(f, v...)) +} + +// Error logs a message at error level. +func Error(f interface{}, v ...interface{}) { + beeLogger.Error(formatLog(f, v...)) +} + +// Warning logs a message at warning level. +func Warning(f interface{}, v ...interface{}) { + beeLogger.Warn(formatLog(f, v...)) +} + +// Warn compatibility alias for Warning() +func Warn(f interface{}, v ...interface{}) { + beeLogger.Warn(formatLog(f, v...)) +} + +// Notice logs a message at notice level. +func Notice(f interface{}, v ...interface{}) { + beeLogger.Notice(formatLog(f, v...)) +} + +// Informational logs a message at info level. +func Informational(f interface{}, v ...interface{}) { + beeLogger.Info(formatLog(f, v...)) +} + +// Info compatibility alias for Warning() +func Info(f interface{}, v ...interface{}) { + beeLogger.Info(formatLog(f, v...)) +} + +// Debug logs a message at debug level. +func Debug(f interface{}, v ...interface{}) { + beeLogger.Debug(formatLog(f, v...)) +} + +// Trace logs a message at trace level. +// compatibility alias for Warning() +func Trace(f interface{}, v ...interface{}) { + beeLogger.Trace(formatLog(f, v...)) +} + +func formatLog(f interface{}, v ...interface{}) string { + var msg string + switch f.(type) { + case string: + msg = f.(string) + if len(v) == 0 { + return msg + } + if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") { + //format string + } else { + //do not contain format char + msg += strings.Repeat(" %v", len(v)) + } + default: + msg = fmt.Sprint(f) + if len(v) == 0 { + return msg + } + msg += strings.Repeat(" %v", len(v)) + } + return fmt.Sprintf(msg, v...) +} diff --git a/vendor/github.com/astaxie/beego/logs/logger.go b/vendor/github.com/astaxie/beego/logs/logger.go new file mode 100644 index 00000000..1700901f --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/logger.go @@ -0,0 +1,208 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "fmt" + "io" + "os" + "sync" + "time" +) + +type logWriter struct { + sync.Mutex + writer io.Writer +} + +func newLogWriter(wr io.Writer) *logWriter { + return &logWriter{writer: wr} +} + +func (lg *logWriter) println(when time.Time, msg string) { + lg.Lock() + h, _ := formatTimeHeader(when) + lg.writer.Write(append(append(h, msg...), '\n')) + lg.Unlock() +} + +type outputMode int + +// DiscardNonColorEscSeq supports the divided color escape sequence. +// But non-color escape sequence is not output. +// Please use the OutputNonColorEscSeq If you want to output a non-color +// escape sequences such as ncurses. However, it does not support the divided +// color escape sequence. +const ( + _ outputMode = iota + DiscardNonColorEscSeq + OutputNonColorEscSeq +) + +// NewAnsiColorWriter creates and initializes a new ansiColorWriter +// using io.Writer w as its initial contents. +// In the console of Windows, which change the foreground and background +// colors of the text by the escape sequence. +// In the console of other systems, which writes to w all text. +func NewAnsiColorWriter(w io.Writer) io.Writer { + return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq) +} + +// NewModeAnsiColorWriter create and initializes a new ansiColorWriter +// by specifying the outputMode. +func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { + if _, ok := w.(*ansiColorWriter); !ok { + return &ansiColorWriter{ + w: w, + mode: mode, + } + } + return w +} + +const ( + y1 = `0123456789` + y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` + y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999` + y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` + mo1 = `000000000111` + mo2 = `123456789012` + d1 = `0000000001111111111222222222233` + d2 = `1234567890123456789012345678901` + h1 = `000000000011111111112222` + h2 = `012345678901234567890123` + mi1 = `000000000011111111112222222222333333333344444444445555555555` + mi2 = `012345678901234567890123456789012345678901234567890123456789` + s1 = `000000000011111111112222222222333333333344444444445555555555` + s2 = `012345678901234567890123456789012345678901234567890123456789` + ns1 = `0123456789` +) + +func formatTimeHeader(when time.Time) ([]byte, int) { + y, mo, d := when.Date() + h, mi, s := when.Clock() + ns := when.Nanosecond()/1000000 + //len("2006/01/02 15:04:05.123 ")==24 + var buf [24]byte + + buf[0] = y1[y/1000%10] + buf[1] = y2[y/100] + buf[2] = y3[y-y/100*100] + buf[3] = y4[y-y/100*100] + buf[4] = '/' + buf[5] = mo1[mo-1] + buf[6] = mo2[mo-1] + buf[7] = '/' + buf[8] = d1[d-1] + buf[9] = d2[d-1] + buf[10] = ' ' + buf[11] = h1[h] + buf[12] = h2[h] + buf[13] = ':' + buf[14] = mi1[mi] + buf[15] = mi2[mi] + buf[16] = ':' + buf[17] = s1[s] + buf[18] = s2[s] + buf[19] = '.' + buf[20] = ns1[ns/100] + buf[21] = ns1[ns%100/10] + buf[22] = ns1[ns%10] + + buf[23] = ' ' + + return buf[0:], d +} + +var ( + green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) + white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) + yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) + red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) + blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) + magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) + cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) + + w32Green = string([]byte{27, 91, 52, 50, 109}) + w32White = string([]byte{27, 91, 52, 55, 109}) + w32Yellow = string([]byte{27, 91, 52, 51, 109}) + w32Red = string([]byte{27, 91, 52, 49, 109}) + w32Blue = string([]byte{27, 91, 52, 52, 109}) + w32Magenta = string([]byte{27, 91, 52, 53, 109}) + w32Cyan = string([]byte{27, 91, 52, 54, 109}) + + reset = string([]byte{27, 91, 48, 109}) +) + +// ColorByStatus return color by http code +// 2xx return Green +// 3xx return White +// 4xx return Yellow +// 5xx return Red +func ColorByStatus(cond bool, code int) string { + switch { + case code >= 200 && code < 300: + return map[bool]string{true: green, false: w32Green}[cond] + case code >= 300 && code < 400: + return map[bool]string{true: white, false: w32White}[cond] + case code >= 400 && code < 500: + return map[bool]string{true: yellow, false: w32Yellow}[cond] + default: + return map[bool]string{true: red, false: w32Red}[cond] + } +} + +// ColorByMethod return color by http code +// GET return Blue +// POST return Cyan +// PUT return Yellow +// DELETE return Red +// PATCH return Green +// HEAD return Magenta +// OPTIONS return WHITE +func ColorByMethod(cond bool, method string) string { + switch method { + case "GET": + return map[bool]string{true: blue, false: w32Blue}[cond] + case "POST": + return map[bool]string{true: cyan, false: w32Cyan}[cond] + case "PUT": + return map[bool]string{true: yellow, false: w32Yellow}[cond] + case "DELETE": + return map[bool]string{true: red, false: w32Red}[cond] + case "PATCH": + return map[bool]string{true: green, false: w32Green}[cond] + case "HEAD": + return map[bool]string{true: magenta, false: w32Magenta}[cond] + case "OPTIONS": + return map[bool]string{true: white, false: w32White}[cond] + default: + return reset + } +} + +// Guard Mutex to guarantee atomic of W32Debug(string) function +var mu sync.Mutex + +// W32Debug Helper method to output colored logs in Windows terminals +func W32Debug(msg string) { + mu.Lock() + defer mu.Unlock() + + current := time.Now() + w := NewAnsiColorWriter(os.Stdout) + + fmt.Fprintf(w, "[beego] %v %s\n", current.Format("2006/01/02 - 15:04:05"), msg) +} diff --git a/vendor/github.com/astaxie/beego/logs/multifile.go b/vendor/github.com/astaxie/beego/logs/multifile.go new file mode 100644 index 00000000..63204e17 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/multifile.go @@ -0,0 +1,116 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "encoding/json" + "time" +) + +// A filesLogWriter manages several fileLogWriter +// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file +// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log +// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log +// the rotate attribute also acts like fileLogWriter +type multiFileLogWriter struct { + writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter + fullLogWriter *fileLogWriter + Separate []string `json:"separate"` +} + +var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"} + +// Init file logger with json config. +// jsonConfig like: +// { +// "filename":"logs/beego.log", +// "maxLines":0, +// "maxsize":0, +// "daily":true, +// "maxDays":15, +// "rotate":true, +// "perm":0600, +// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"], +// } + +func (f *multiFileLogWriter) Init(config string) error { + writer := newFileWriter().(*fileLogWriter) + err := writer.Init(config) + if err != nil { + return err + } + f.fullLogWriter = writer + f.writers[LevelDebug+1] = writer + + //unmarshal "separate" field to f.Separate + json.Unmarshal([]byte(config), f) + + jsonMap := map[string]interface{}{} + json.Unmarshal([]byte(config), &jsonMap) + + for i := LevelEmergency; i < LevelDebug+1; i++ { + for _, v := range f.Separate { + if v == levelNames[i] { + jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix + jsonMap["level"] = i + bs, _ := json.Marshal(jsonMap) + writer = newFileWriter().(*fileLogWriter) + writer.Init(string(bs)) + f.writers[i] = writer + } + } + } + + return nil +} + +func (f *multiFileLogWriter) Destroy() { + for i := 0; i < len(f.writers); i++ { + if f.writers[i] != nil { + f.writers[i].Destroy() + } + } +} + +func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error { + if f.fullLogWriter != nil { + f.fullLogWriter.WriteMsg(when, msg, level) + } + for i := 0; i < len(f.writers)-1; i++ { + if f.writers[i] != nil { + if level == f.writers[i].Level { + f.writers[i].WriteMsg(when, msg, level) + } + } + } + return nil +} + +func (f *multiFileLogWriter) Flush() { + for i := 0; i < len(f.writers); i++ { + if f.writers[i] != nil { + f.writers[i].Flush() + } + } +} + +// newFilesWriter create a FileLogWriter returning as LoggerInterface. +func newFilesWriter() Logger { + return &multiFileLogWriter{} +} + +func init() { + Register(AdapterMultiFile, newFilesWriter) +} diff --git a/vendor/github.com/astaxie/beego/logs/slack.go b/vendor/github.com/astaxie/beego/logs/slack.go new file mode 100644 index 00000000..1cd2e5ae --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/slack.go @@ -0,0 +1,60 @@ +package logs + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" +) + +// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook +type SLACKWriter struct { + WebhookURL string `json:"webhookurl"` + Level int `json:"level"` +} + +// newSLACKWriter create jiaoliao writer. +func newSLACKWriter() Logger { + return &SLACKWriter{Level: LevelTrace} +} + +// Init SLACKWriter with json config string +func (s *SLACKWriter) Init(jsonconfig string) error { + return json.Unmarshal([]byte(jsonconfig), s) +} + +// WriteMsg write message in smtp writer. +// it will send an email with subject and only this message. +func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > s.Level { + return nil + } + + text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg) + + form := url.Values{} + form.Add("payload", text) + + resp, err := http.PostForm(s.WebhookURL, form) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) + } + return nil +} + +// Flush implementing method. empty. +func (s *SLACKWriter) Flush() { +} + +// Destroy implementing method. empty. +func (s *SLACKWriter) Destroy() { +} + +func init() { + Register(AdapterSlack, newSLACKWriter) +} diff --git a/vendor/github.com/astaxie/beego/logs/smtp.go b/vendor/github.com/astaxie/beego/logs/smtp.go new file mode 100644 index 00000000..6208d7b8 --- /dev/null +++ b/vendor/github.com/astaxie/beego/logs/smtp.go @@ -0,0 +1,149 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/smtp" + "strings" + "time" +) + +// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server. +type SMTPWriter struct { + Username string `json:"username"` + Password string `json:"password"` + Host string `json:"host"` + Subject string `json:"subject"` + FromAddress string `json:"fromAddress"` + RecipientAddresses []string `json:"sendTos"` + Level int `json:"level"` +} + +// NewSMTPWriter create smtp writer. +func newSMTPWriter() Logger { + return &SMTPWriter{Level: LevelTrace} +} + +// Init smtp writer with json config. +// config like: +// { +// "username":"example@gmail.com", +// "password:"password", +// "host":"smtp.gmail.com:465", +// "subject":"email title", +// "fromAddress":"from@example.com", +// "sendTos":["email1","email2"], +// "level":LevelError +// } +func (s *SMTPWriter) Init(jsonconfig string) error { + return json.Unmarshal([]byte(jsonconfig), s) +} + +func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { + if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 { + return nil + } + return smtp.PlainAuth( + "", + s.Username, + s.Password, + host, + ) +} + +func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error { + client, err := smtp.Dial(hostAddressWithPort) + if err != nil { + return err + } + + host, _, _ := net.SplitHostPort(hostAddressWithPort) + tlsConn := &tls.Config{ + InsecureSkipVerify: true, + ServerName: host, + } + if err = client.StartTLS(tlsConn); err != nil { + return err + } + + if auth != nil { + if err = client.Auth(auth); err != nil { + return err + } + } + + if err = client.Mail(fromAddress); err != nil { + return err + } + + for _, rec := range recipients { + if err = client.Rcpt(rec); err != nil { + return err + } + } + + w, err := client.Data() + if err != nil { + return err + } + _, err = w.Write(msgContent) + if err != nil { + return err + } + + err = w.Close() + if err != nil { + return err + } + + return client.Quit() +} + +// WriteMsg write message in smtp writer. +// it will send an email with subject and only this message. +func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error { + if level > s.Level { + return nil + } + + hp := strings.Split(s.Host, ":") + + // Set up authentication information. + auth := s.getSMTPAuth(hp[0]) + + // Connect to the server, authenticate, set the sender and recipient, + // and send the email all in one step. + contentType := "Content-Type: text/plain" + "; charset=UTF-8" + mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress + + ">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg) + + return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg) +} + +// Flush implementing method. empty. +func (s *SMTPWriter) Flush() { +} + +// Destroy implementing method. empty. +func (s *SMTPWriter) Destroy() { +} + +func init() { + Register(AdapterMail, newSMTPWriter) +} diff --git a/vendor/github.com/dchest/uniuri/README.md b/vendor/github.com/dchest/uniuri/README.md new file mode 100644 index 00000000..b321a5fa --- /dev/null +++ b/vendor/github.com/dchest/uniuri/README.md @@ -0,0 +1,97 @@ +Package uniuri +===================== + +[![Build Status](https://travis-ci.org/dchest/uniuri.svg)](https://travis-ci.org/dchest/uniuri) + +```go +import "github.com/dchest/uniuri" +``` + +Package uniuri generates random strings good for use in URIs to identify +unique objects. + +Example usage: + +```go +s := uniuri.New() // s is now "apHCJBl7L1OmC57n" +``` + +A standard string created by New() is 16 bytes in length and consists of +Latin upper and lowercase letters, and numbers (from the set of 62 allowed +characters), which means that it has ~95 bits of entropy. To get more +entropy, you can use NewLen(UUIDLen), which returns 20-byte string, giving +~119 bits of entropy, or any other desired length. + +Functions read from crypto/rand random source, and panic if they fail to +read from it. + + +Constants +--------- + +```go +const ( + // StdLen is a standard length of uniuri string to achive ~95 bits of entropy. + StdLen = 16 + // UUIDLen is a length of uniuri string to achive ~119 bits of entropy, closest + // to what can be losslessly converted to UUIDv4 (122 bits). + UUIDLen = 20 +) + +``` + + + +Variables +--------- + +```go +var StdChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") +``` + + +StdChars is a set of standard characters allowed in uniuri string. + + +Functions +--------- + +### func New + +```go +func New() string +``` + +New returns a new random string of the standard length, consisting of +standard characters. + +### func NewLen + +```go +func NewLen(length int) string +``` + +NewLen returns a new random string of the provided length, consisting of +standard characters. + +### func NewLenChars + +```go +func NewLenChars(length int, chars []byte) string +``` + +NewLenChars returns a new random string of the provided length, consisting +of the provided byte slice of allowed characters (maximum 256). + + + +Public domain dedication +------------------------ + +Written in 2011-2014 by Dmitry Chestnykh + +The author(s) have dedicated all copyright and related and +neighboring rights to this software to the public domain +worldwide. Distributed without any warranty. +http://creativecommons.org/publicdomain/zero/1.0/ + diff --git a/vendor/github.com/dchest/uniuri/uniuri.go b/vendor/github.com/dchest/uniuri/uniuri.go new file mode 100644 index 00000000..6393446c --- /dev/null +++ b/vendor/github.com/dchest/uniuri/uniuri.go @@ -0,0 +1,81 @@ +// Written in 2011-2014 by Dmitry Chestnykh +// +// The author(s) have dedicated all copyright and related and +// neighboring rights to this software to the public domain +// worldwide. Distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ + +// Package uniuri generates random strings good for use in URIs to identify +// unique objects. +// +// Example usage: +// +// s := uniuri.New() // s is now "apHCJBl7L1OmC57n" +// +// A standard string created by New() is 16 bytes in length and consists of +// Latin upper and lowercase letters, and numbers (from the set of 62 allowed +// characters), which means that it has ~95 bits of entropy. To get more +// entropy, you can use NewLen(UUIDLen), which returns 20-byte string, giving +// ~119 bits of entropy, or any other desired length. +// +// Functions read from crypto/rand random source, and panic if they fail to +// read from it. +package uniuri + +import "crypto/rand" + +const ( + // StdLen is a standard length of uniuri string to achive ~95 bits of entropy. + StdLen = 16 + // UUIDLen is a length of uniuri string to achive ~119 bits of entropy, closest + // to what can be losslessly converted to UUIDv4 (122 bits). + UUIDLen = 20 +) + +// StdChars is a set of standard characters allowed in uniuri string. +var StdChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") + +// New returns a new random string of the standard length, consisting of +// standard characters. +func New() string { + return NewLenChars(StdLen, StdChars) +} + +// NewLen returns a new random string of the provided length, consisting of +// standard characters. +func NewLen(length int) string { + return NewLenChars(length, StdChars) +} + +// NewLenChars returns a new random string of the provided length, consisting +// of the provided byte slice of allowed characters (maximum 256). +func NewLenChars(length int, chars []byte) string { + if length == 0 { + return "" + } + clen := len(chars) + if clen < 2 || clen > 256 { + panic("uniuri: wrong charset length for NewLenChars") + } + maxrb := 255 - (256 % clen) + b := make([]byte, length) + r := make([]byte, length+(length/4)) // storage for random bytes. + i := 0 + for { + if _, err := rand.Read(r); err != nil { + panic("uniuri: error reading random bytes: " + err.Error()) + } + for _, rb := range r { + c := int(rb) + if c > maxrb { + // Skip this number to avoid modulo bias. + continue + } + b[i] = chars[c%clen] + i++ + if i == length { + return string(b) + } + } + } +} diff --git a/vendor/github.com/gedex/inflector/CakePHP_LICENSE.txt b/vendor/github.com/gedex/inflector/CakePHP_LICENSE.txt new file mode 100644 index 00000000..414ab1e7 --- /dev/null +++ b/vendor/github.com/gedex/inflector/CakePHP_LICENSE.txt @@ -0,0 +1,28 @@ +The MIT License + +CakePHP(tm) : The Rapid Development PHP Framework (http://cakephp.org) +Copyright (c) 2005-2013, Cake Software Foundation, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +Cake Software Foundation, Inc. +1785 E. Sahara Avenue, +Suite 490-204 +Las Vegas, Nevada 89104, +United States of America. \ No newline at end of file diff --git a/vendor/github.com/gedex/inflector/LICENSE.md b/vendor/github.com/gedex/inflector/LICENSE.md new file mode 100644 index 00000000..cdbc2176 --- /dev/null +++ b/vendor/github.com/gedex/inflector/LICENSE.md @@ -0,0 +1,29 @@ +Copyright (c) 2013 Akeda Bagus . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------- + +Much of this library was inspired from CakePHP's inflector, a PHP +framework licensed under MIT license (see CakePHP_LICENSE.txt). diff --git a/vendor/github.com/gedex/inflector/README.md b/vendor/github.com/gedex/inflector/README.md new file mode 100644 index 00000000..45c7b266 --- /dev/null +++ b/vendor/github.com/gedex/inflector/README.md @@ -0,0 +1,25 @@ +Inflector +========= + +Inflector pluralizes and singularizes English nouns. + +[![Build Status](https://travis-ci.org/gedex/inflector.png?branch=master)](https://travis-ci.org/gedex/inflector) +[![Coverage Status](https://coveralls.io/repos/gedex/inflector/badge.png?branch=master)](https://coveralls.io/r/gedex/inflector?branch=master) +[![GoDoc](https://godoc.org/github.com/gedex/inflector?status.svg)](https://godoc.org/github.com/gedex/inflector) + +## Basic Usage + +There are only two exported functions: `Pluralize` and `Singularize`. + +~~~go +fmt.Println(inflector.Singularize("People")) // will print "Person" +fmt.Println(inflector.Pluralize("octopus")) // will print "octopuses" +~~~ + +## Credits + +* [CakePHP's Inflector](https://github.com/cakephp/cakephp/blob/master/lib/Cake/Utility/Inflector.php) + +## License + +This library is distributed under the BSD-style license found in the LICENSE.md file. diff --git a/vendor/github.com/gedex/inflector/inflector.go b/vendor/github.com/gedex/inflector/inflector.go new file mode 100644 index 00000000..319f936c --- /dev/null +++ b/vendor/github.com/gedex/inflector/inflector.go @@ -0,0 +1,355 @@ +// Copyright 2013 Akeda Bagus . All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package inflector pluralizes and singularizes English nouns. + +There are only two exported functions: `Pluralize` and `Singularize`. + + s := "People" + fmt.Println(inflector.Singularize(s)) // will print "Person" + + s2 := "octopus" + fmt.Println(inflector.Pluralize(s2)) // will print "octopuses" + +*/ +package inflector + +import ( + "bytes" + "fmt" + "regexp" + "strings" + "sync" +) + +// Rule represents name of the inflector rule, can be +// Plural or Singular +type Rule int + +const ( + Plural = iota + Singular +) + +// InflectorRule represents inflector rule +type InflectorRule struct { + Rules []*ruleItem + Irregular []*irregularItem + Uninflected []string + compiledIrregular *regexp.Regexp + compiledUninflected *regexp.Regexp + compiledRules []*compiledRule +} + +type ruleItem struct { + pattern string + replacement string +} + +type irregularItem struct { + word string + replacement string +} + +// compiledRule represents compiled version of Inflector.Rules. +type compiledRule struct { + replacement string + *regexp.Regexp +} + +// threadsafe access to rules and caches +var mutex sync.Mutex +var rules = make(map[Rule]*InflectorRule) + +// Words that should not be inflected +var uninflected = []string{ + `Amoyese`, `bison`, `Borghese`, `bream`, `breeches`, `britches`, `buffalo`, + `cantus`, `carp`, `chassis`, `clippers`, `cod`, `coitus`, `Congoese`, + `contretemps`, `corps`, `debris`, `diabetes`, `djinn`, `eland`, `elk`, + `equipment`, `Faroese`, `flounder`, `Foochowese`, `gallows`, `Genevese`, + `Genoese`, `Gilbertese`, `graffiti`, `headquarters`, `herpes`, `hijinks`, + `Hottentotese`, `information`, `innings`, `jackanapes`, `Kiplingese`, + `Kongoese`, `Lucchese`, `mackerel`, `Maltese`, `.*?media`, `mews`, `moose`, + `mumps`, `Nankingese`, `news`, `nexus`, `Niasese`, `Pekingese`, + `Piedmontese`, `pincers`, `Pistoiese`, `pliers`, `Portuguese`, `proceedings`, + `rabies`, `rice`, `rhinoceros`, `salmon`, `Sarawakese`, `scissors`, + `sea[- ]bass`, `series`, `Shavese`, `shears`, `siemens`, `species`, `swine`, + `testes`, `trousers`, `trout`, `tuna`, `Vermontese`, `Wenchowese`, `whiting`, + `wildebeest`, `Yengeese`, +} + +// Plural words that should not be inflected +var uninflectedPlurals = []string{ + `.*[nrlm]ese`, `.*deer`, `.*fish`, `.*measles`, `.*ois`, `.*pox`, `.*sheep`, + `people`, +} + +// Singular words that should not be inflected +var uninflectedSingulars = []string{ + `.*[nrlm]ese`, `.*deer`, `.*fish`, `.*measles`, `.*ois`, `.*pox`, `.*sheep`, + `.*ss`, +} + +type cache map[string]string + +// Inflected words that already cached for immediate retrieval from a given Rule +var caches = make(map[Rule]cache) + +// map of irregular words where its key is a word and its value is the replacement +var irregularMaps = make(map[Rule]cache) + +func init() { + + rules[Plural] = &InflectorRule{ + Rules: []*ruleItem{ + {`(?i)(s)tatus$`, `${1}${2}tatuses`}, + {`(?i)(quiz)$`, `${1}zes`}, + {`(?i)^(ox)$`, `${1}${2}en`}, + {`(?i)([m|l])ouse$`, `${1}ice`}, + {`(?i)(matr|vert|ind)(ix|ex)$`, `${1}ices`}, + {`(?i)(x|ch|ss|sh)$`, `${1}es`}, + {`(?i)([^aeiouy]|qu)y$`, `${1}ies`}, + {`(?i)(hive)$`, `$1s`}, + {`(?i)(?:([^f])fe|([lre])f)$`, `${1}${2}ves`}, + {`(?i)sis$`, `ses`}, + {`(?i)([ti])um$`, `${1}a`}, + {`(?i)(p)erson$`, `${1}eople`}, + {`(?i)(m)an$`, `${1}en`}, + {`(?i)(c)hild$`, `${1}hildren`}, + {`(?i)(buffal|tomat)o$`, `${1}${2}oes`}, + {`(?i)(alumn|bacill|cact|foc|fung|nucle|radi|stimul|syllab|termin|vir)us$`, `${1}i`}, + {`(?i)us$`, `uses`}, + {`(?i)(alias)$`, `${1}es`}, + {`(?i)(ax|cris|test)is$`, `${1}es`}, + {`s$`, `s`}, + {`^$`, ``}, + {`$`, `s`}, + }, + Irregular: []*irregularItem{ + {`atlas`, `atlases`}, + {`beef`, `beefs`}, + {`brother`, `brothers`}, + {`cafe`, `cafes`}, + {`child`, `children`}, + {`cookie`, `cookies`}, + {`corpus`, `corpuses`}, + {`cow`, `cows`}, + {`ganglion`, `ganglions`}, + {`genie`, `genies`}, + {`genus`, `genera`}, + {`graffito`, `graffiti`}, + {`hoof`, `hoofs`}, + {`loaf`, `loaves`}, + {`man`, `men`}, + {`money`, `monies`}, + {`mongoose`, `mongooses`}, + {`move`, `moves`}, + {`mythos`, `mythoi`}, + {`niche`, `niches`}, + {`numen`, `numina`}, + {`occiput`, `occiputs`}, + {`octopus`, `octopuses`}, + {`opus`, `opuses`}, + {`ox`, `oxen`}, + {`penis`, `penises`}, + {`person`, `people`}, + {`sex`, `sexes`}, + {`soliloquy`, `soliloquies`}, + {`testis`, `testes`}, + {`trilby`, `trilbys`}, + {`turf`, `turfs`}, + {`potato`, `potatoes`}, + {`hero`, `heroes`}, + {`tooth`, `teeth`}, + {`goose`, `geese`}, + {`foot`, `feet`}, + }, + } + prepare(Plural) + + rules[Singular] = &InflectorRule{ + Rules: []*ruleItem{ + {`(?i)(s)tatuses$`, `${1}${2}tatus`}, + {`(?i)^(.*)(menu)s$`, `${1}${2}`}, + {`(?i)(quiz)zes$`, `$1`}, + {`(?i)(matr)ices$`, `${1}ix`}, + {`(?i)(vert|ind)ices$`, `${1}ex`}, + {`(?i)^(ox)en`, `$1`}, + {`(?i)(alias)(es)*$`, `$1`}, + {`(?i)(alumn|bacill|cact|foc|fung|nucle|radi|stimul|syllab|termin|viri?)i$`, `${1}us`}, + {`(?i)([ftw]ax)es`, `$1`}, + {`(?i)(cris|ax|test)es$`, `${1}is`}, + {`(?i)(shoe|slave)s$`, `$1`}, + {`(?i)(o)es$`, `$1`}, + {`ouses$`, `ouse`}, + {`([^a])uses$`, `${1}us`}, + {`(?i)([m|l])ice$`, `${1}ouse`}, + {`(?i)(x|ch|ss|sh)es$`, `$1`}, + {`(?i)(m)ovies$`, `${1}${2}ovie`}, + {`(?i)(s)eries$`, `${1}${2}eries`}, + {`(?i)([^aeiouy]|qu)ies$`, `${1}y`}, + {`(?i)(tive)s$`, `$1`}, + {`(?i)([lre])ves$`, `${1}f`}, + {`(?i)([^fo])ves$`, `${1}fe`}, + {`(?i)(hive)s$`, `$1`}, + {`(?i)(drive)s$`, `$1`}, + {`(?i)(^analy)ses$`, `${1}sis`}, + {`(?i)(analy|diagno|^ba|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$`, `${1}${2}sis`}, + {`(?i)([ti])a$`, `${1}um`}, + {`(?i)(p)eople$`, `${1}${2}erson`}, + {`(?i)(m)en$`, `${1}an`}, + {`(?i)(c)hildren$`, `${1}${2}hild`}, + {`(?i)(n)ews$`, `${1}${2}ews`}, + {`eaus$`, `eau`}, + {`^(.*us)$`, `$1`}, + {`(?i)s$`, ``}, + }, + Irregular: []*irregularItem{ + {`foes`, `foe`}, + {`waves`, `wave`}, + {`curves`, `curve`}, + {`atlases`, `atlas`}, + {`beefs`, `beef`}, + {`brothers`, `brother`}, + {`cafes`, `cafe`}, + {`children`, `child`}, + {`cookies`, `cookie`}, + {`corpuses`, `corpus`}, + {`cows`, `cow`}, + {`ganglions`, `ganglion`}, + {`genies`, `genie`}, + {`genera`, `genus`}, + {`graffiti`, `graffito`}, + {`hoofs`, `hoof`}, + {`loaves`, `loaf`}, + {`men`, `man`}, + {`monies`, `money`}, + {`mongooses`, `mongoose`}, + {`moves`, `move`}, + {`mythoi`, `mythos`}, + {`niches`, `niche`}, + {`numina`, `numen`}, + {`occiputs`, `occiput`}, + {`octopuses`, `octopus`}, + {`opuses`, `opus`}, + {`oxen`, `ox`}, + {`penises`, `penis`}, + {`people`, `person`}, + {`sexes`, `sex`}, + {`soliloquies`, `soliloquy`}, + {`testes`, `testis`}, + {`trilbys`, `trilby`}, + {`turfs`, `turf`}, + {`potatoes`, `potato`}, + {`heroes`, `hero`}, + {`teeth`, `tooth`}, + {`geese`, `goose`}, + {`feet`, `foot`}, + }, + } + prepare(Singular) +} + +// prepare rule, e.g., compile the pattern. +func prepare(r Rule) error { + var reString string + + switch r { + case Plural: + // Merge global uninflected with singularsUninflected + rules[r].Uninflected = merge(uninflected, uninflectedPlurals) + case Singular: + // Merge global uninflected with singularsUninflected + rules[r].Uninflected = merge(uninflected, uninflectedSingulars) + } + + // Set InflectorRule.compiledUninflected by joining InflectorRule.Uninflected into + // a single string then compile it. + reString = fmt.Sprintf(`(?i)(^(?:%s))$`, strings.Join(rules[r].Uninflected, `|`)) + rules[r].compiledUninflected = regexp.MustCompile(reString) + + // Prepare irregularMaps + irregularMaps[r] = make(cache, len(rules[r].Irregular)) + + // Set InflectorRule.compiledIrregular by joining the irregularItem.word of Inflector.Irregular + // into a single string then compile it. + vIrregulars := make([]string, len(rules[r].Irregular)) + for i, item := range rules[r].Irregular { + vIrregulars[i] = item.word + irregularMaps[r][item.word] = item.replacement + } + reString = fmt.Sprintf(`(?i)(.*)\b((?:%s))$`, strings.Join(vIrregulars, `|`)) + rules[r].compiledIrregular = regexp.MustCompile(reString) + + // Compile all patterns in InflectorRule.Rules + rules[r].compiledRules = make([]*compiledRule, len(rules[r].Rules)) + for i, item := range rules[r].Rules { + rules[r].compiledRules[i] = &compiledRule{item.replacement, regexp.MustCompile(item.pattern)} + } + + // Prepare caches + caches[r] = make(cache) + + return nil +} + +// merge slice a and slice b +func merge(a []string, b []string) []string { + result := make([]string, len(a)+len(b)) + copy(result, a) + copy(result[len(a):], b) + + return result +} + +// Pluralize returns string s in plural form. +func Pluralize(s string) string { + return getInflected(Plural, s) +} + +// Singularize returns string s in singular form. +func Singularize(s string) string { + return getInflected(Singular, s) +} + +func getInflected(r Rule, s string) string { + mutex.Lock() + defer mutex.Unlock() + if v, ok := caches[r][s]; ok { + return v + } + + // Check for irregular words + if res := rules[r].compiledIrregular.FindStringSubmatch(s); len(res) >= 3 { + var buf bytes.Buffer + + buf.WriteString(res[1]) + buf.WriteString(s[0:1]) + buf.WriteString(irregularMaps[r][strings.ToLower(res[2])][1:]) + + // Cache it then returns + caches[r][s] = buf.String() + return caches[r][s] + } + + // Check for uninflected words + if rules[r].compiledUninflected.MatchString(s) { + caches[r][s] = s + return caches[r][s] + } + + // Check each rule + for _, re := range rules[r].compiledRules { + if re.MatchString(s) { + caches[r][s] = re.ReplaceAllString(s, re.replacement) + return caches[r][s] + } + } + + // Returns unaltered + caches[r][s] = s + return caches[r][s] +} diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README new file mode 100644 index 00000000..387b4eb6 --- /dev/null +++ b/vendor/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go new file mode 100644 index 00000000..54bd7afd --- /dev/null +++ b/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 00000000..65075d28 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..0f646931 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 00000000..3cd3249f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 00000000..d9aa3c42 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 00000000..dea2617c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 00000000..4c35d337 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,218 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 00000000..d4db5a1c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000..816a3b9d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,543 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 00000000..0e2191b8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,921 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 00000000..3b6ca41d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,314 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "sync" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..b6cad908 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..d55a335d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000..f710adab --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..be7b2428 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2685 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 00000000..5525def6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..96764347 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,1981 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 00000000..2205fdaa --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,843 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 00000000..0685bae3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..70276e8f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..f67edc7d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any // import "github.com/golang/protobuf/ptypes/any" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } + +var fileDescriptor_any_744b9ca530f228db = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000..c7486676 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..c0d595da --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..65cb0f8e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..4d75473b --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration // import "github.com/golang/protobuf/ptypes/duration" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} + +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000..975fce41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..47f10dbc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..e9c22228 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} + +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000..06750ab1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License new file mode 100644 index 00000000..05c783cc --- /dev/null +++ b/vendor/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme new file mode 100644 index 00000000..c589fc62 --- /dev/null +++ b/vendor/github.com/kr/pretty/Readme @@ -0,0 +1,9 @@ +package pretty + + import "github.com/kr/pretty" + + Package pretty provides pretty-printing for Go values. + +Documentation + + http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go new file mode 100644 index 00000000..6aa7f743 --- /dev/null +++ b/vendor/github.com/kr/pretty/diff.go @@ -0,0 +1,265 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (p *sbuf) Printf(format string, a ...interface{}) { + s := fmt.Sprintf(format, a...) + *p = append(*p, s) +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Pdiff((*sbuf)(&desc), a, b) + return desc +} + +// wprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type wprintfer struct{ w io.Writer } + +func (p *wprintfer) Printf(format string, a ...interface{}) { + fmt.Fprintf(p.w, format+"\n", a...) +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + Pdiff(&wprintfer{w}, a, b) +} + +type Printfer interface { + Printf(format string, a ...interface{}) +} + +// Pdiff prints to p a description of the differences between a and b. +// It calls Printf once for each difference, with no trailing newline. +// The standard library log.Logger is a Printfer. +func Pdiff(p Printfer, a, b interface{}) { + diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type Logfer interface { + Logf(format string, a ...interface{}) +} + +// logprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type logprintfer struct{ l Logfer } + +func (p *logprintfer) Printf(format string, a ...interface{}) { + p.l.Logf(format, a...) +} + +// Ldiff prints to l a description of the differences between a and b. +// It calls Logf once for each difference, with no trailing newline. +// The standard library testing.T and testing.B are Logfers. +func Ldiff(l Logfer, a, b interface{}) { + Pdiff(&logprintfer{l}, a, b) +} + +type diffPrinter struct { + w Printfer + l string // label +} + +func (w diffPrinter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + w.w.Printf(l+f, a...) +} + +func (w diffPrinter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %# v", formatter{v: bv, quote: true}) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%# v != nil", formatter{v: av, quote: true}) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + switch kind := at.Kind(); kind { + case reflect.Bool: + if a, b := av.Bool(), bv.Bool(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a, b := av.Int(), bv.Int(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if a, b := av.Uint(), bv.Uint(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Float32, reflect.Float64: + if a, b := av.Float(), bv.Float(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Complex64, reflect.Complex128: + if a, b := av.Complex(), bv.Complex(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Array: + n := av.Len() + for i := 0; i < n; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + if a, b := av.Pointer(), bv.Pointer(); a != b { + w.printf("%#x != %#x", a, b) + } + case reflect.Interface: + w.diff(av.Elem(), bv.Elem()) + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %# v", formatter{v: bv, quote: true}) + case !av.IsNil() && bv.IsNil(): + w.printf("%# v != nil", formatter{v: av, quote: true}) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Slice: + lenA := av.Len() + lenB := bv.Len() + if lenA != lenB { + w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) + break + } + for i := 0; i < lenA; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.String: + if a, b := av.String(), bv.String(); a != b { + w.printf("%q != %q", a, b) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + default: + panic("unknown reflect Kind: " + kind.String()) + } +} + +func (d diffPrinter) relabel(name string) (d1 diffPrinter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +// keyEqual compares a and b for equality. +// Both a and b must be valid map keys. +func keyEqual(av, bv reflect.Value) bool { + if !av.IsValid() && !bv.IsValid() { + return true + } + if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { + return false + } + switch kind := av.Kind(); kind { + case reflect.Bool: + a, b := av.Bool(), bv.Bool() + return a == b + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := av.Int(), bv.Int() + return a == b + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := av.Uint(), bv.Uint() + return a == b + case reflect.Float32, reflect.Float64: + a, b := av.Float(), bv.Float() + return a == b + case reflect.Complex64, reflect.Complex128: + a, b := av.Complex(), bv.Complex() + return a == b + case reflect.Array: + for i := 0; i < av.Len(); i++ { + if !keyEqual(av.Index(i), bv.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: + a, b := av.Pointer(), bv.Pointer() + return a == b + case reflect.Interface: + return keyEqual(av.Elem(), bv.Elem()) + case reflect.String: + a, b := av.String(), bv.String() + return a == b + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + if !keyEqual(av.Field(i), bv.Field(i)) { + return false + } + } + return true + default: + panic("invalid map key type " + av.Type().String()) + } +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if keyEqual(av, bv) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if keyEqual(av, bv) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go new file mode 100644 index 00000000..a317d7b8 --- /dev/null +++ b/vendor/github.com/kr/pretty/formatter.go @@ -0,0 +1,328 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +type formatter struct { + v reflect.Value + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{v: reflect.ValueOf(x), quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.v.Interface()) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(i) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.v.Interface()) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} + p.printValue(fo.v, true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer + visited map[visit]int + depth int +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +// printValue must keep track of already-printed pointer values to avoid +// infinite recursion. +type visit struct { + v uintptr + typ reflect.Type +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + if p.depth > 10 { + io.WriteString(p, "!%v(DEPTH EXCEEDED)") + return + } + + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + showTypeInStruct := true + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if v.CanAddr() { + addr := v.UnsafeAddr() + vis := visit{addr, t} + if vd, ok := p.visited[vis]; ok && vd < p.depth { + p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) + break // don't print v again + } + p.visited[vis] = p.depth + } + + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + pp := *p + pp.depth++ + pp.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + pp := *p + pp.depth++ + writeByte(pp, '&') + pp.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/vendor/github.com/kr/pretty/go.mod b/vendor/github.com/kr/pretty/go.mod new file mode 100644 index 00000000..1e295331 --- /dev/null +++ b/vendor/github.com/kr/pretty/go.mod @@ -0,0 +1,3 @@ +module "github.com/kr/pretty" + +require "github.com/kr/text" v0.1.0 diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go new file mode 100644 index 00000000..49423ec7 --- /dev/null +++ b/vendor/github.com/kr/pretty/pretty.go @@ -0,0 +1,108 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" + "reflect" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprint is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprint(x, y) is equivalent to +// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Sprint(a ...interface{}) string { + return fmt.Sprint(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{v: reflect.ValueOf(x), force: force} + } + return w +} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go new file mode 100644 index 00000000..abb5b6fc --- /dev/null +++ b/vendor/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License new file mode 100644 index 00000000..480a3280 --- /dev/null +++ b/vendor/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme new file mode 100644 index 00000000..7e6e7c06 --- /dev/null +++ b/vendor/github.com/kr/text/Readme @@ -0,0 +1,3 @@ +This is a Go package for manipulating paragraphs of text. + +See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go new file mode 100644 index 00000000..cf4c198f --- /dev/null +++ b/vendor/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/vendor/github.com/kr/text/go.mod b/vendor/github.com/kr/text/go.mod new file mode 100644 index 00000000..fa0528b9 --- /dev/null +++ b/vendor/github.com/kr/text/go.mod @@ -0,0 +1,3 @@ +module "github.com/kr/text" + +require "github.com/kr/pty" v1.1.1 diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go new file mode 100644 index 00000000..4ebac45c --- /dev/null +++ b/vendor/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go new file mode 100644 index 00000000..b09bb037 --- /dev/null +++ b/vendor/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim || i == n-1 { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/vendor/github.com/percona/go-mysql/LICENSE b/vendor/github.com/percona/go-mysql/LICENSE new file mode 100644 index 00000000..dbbe3558 --- /dev/null +++ b/vendor/github.com/percona/go-mysql/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/vendor/github.com/percona/go-mysql/query/query.go b/vendor/github.com/percona/go-mysql/query/query.go new file mode 100644 index 00000000..4f8bfcd0 --- /dev/null +++ b/vendor/github.com/percona/go-mysql/query/query.go @@ -0,0 +1,804 @@ +/* + Copyright (c) 2014-2015, Percona LLC and/or its affiliates. All rights reserved. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see +*/ + +// Package query provides functions to transform queries. +package query + +/* + Fingerprint is highly-specialized and operates on a single principle: + + 1. We only replace numbers, quoted strings, and value lists. + + Although we must handle a lot of details, exceptions, and special cases, + that principle distills the problem to its essence which makes it + possible to solve without a true SQL syntax parser (or regex). It means + that we can simply copy and ignore the vast majority of the query because + if it's not a number, quoted string, or value list then there's nothing + to do. With a regex solution we can stop there and simply do transformations + like s/\d+/?/g and s/'[^']*'/?/g but the details, exceptions, and specials + cases make that only a partial, crude solution because, for example, with + col = 'It\s' an escaped quote char.' now the regex needs to handle inner, + escaped quotes so it becomes more complicated and slower. There are many + more difficult problems like this. Consequently, neither regex nor this + solution can be simple, but at least this solution is very fast compared + to regex because it makes a single pass through the query whereas regex + makes as many passes as there are regexes and, worst case, a single regex + can make several passes due to backtracking. To handle problems like these, + this solution is a simple state machine. In the previous example problem, + once the first quote char (') is seen we enter the inQuote state. In this + state, if an escape char (\) is seen, this is remembered and the next char + is ignored. This allows us to correctly detect the real ending quote char: + it's the first unescaped quote char. Consequently, we can simply ignore + everything else in the quoted value. The same basic logic applies to numbers + and value lists. + + With that principle and basic logic in mind, you'll notice three major code + blocks: + 1. Skip parts of the query for certain states. + 2. Change state based on rune and current state. + 3. Copy a slice of the query into the fingerprint. + The order is important because it enforces the basic logic: once we enter + certain states we must process and finish them first because in these states + we ignore everything but whatever ends the state. As mentioned above, the + inQuote state is handled in the first block. When in this state, only the + real ending quote char ends it. Consequently nothing will trick the parser; + for example the quoted value in col="INSERT INTO t VALUES ('no problem')" + will not be mistaken for another query and another quoted value. -- + The second block is primarily where cpFromOffset and cpToOffset are set + which are used by the third block to copy that range of the query into the + fingerprint. The second block stops, switches state, copies, and lets + the code enter the first block which skips through number, quoted value, or + value list that the second block found. +*/ + +import ( + "crypto/md5" + "fmt" + "io" + "strings" +) + +const ( + unknown byte = iota + inWord // \S+ + inNumber // [0-9a-fA-Fx.-] + inSpace // space, tab, \r, \n + inOp // [=<>!] (usually precedes a number) + opOrNumber // + in 2 + 2 or +3e-9 + inQuote // '...' or "..." + subOrOLC // - or start of -- comment + inDash // -- begins a one-line comment if followed by space + inOLC // -- comment (at least one space after dash is required) + divOrMLC // / operator or start of /* comment */ + mlcOrMySQLCode // /* comment */ or /*! MySQL-specific code */ + inMLC // /* comment */ + inValues // VALUES (1), ..., (N) + moreValuesOrUnknown // , (2nd+) or ON DUPLICATE KEY or end of query + orderBy // ORDER BY + onDupeKeyUpdate // ON DUPLICATE KEY UPDATE + inNumberInWord // e.g. db23 + inBackticks // `table-1` + inMySQLCode // /*! MySQL-specific code */ +) + +var stateName map[byte]string = map[byte]string{ + 0: "unknown", + 1: "inWord", + 2: "inNumber", + 3: "inSpace", + 4: "inOp", + 5: "opOrNumber", + 6: "inQuote", + 7: "subOrOLC", + 8: "inDash", + 9: "inOLC", + 10: "divOrMLC", + 11: "mlcOrMySQLCode", + 12: "inMLC", + 13: "inValues", + 14: "moreValuesOrUnknown", + 15: "orderBy", + 16: "onDupeKeyUpdate", + 17: "inNumberInWord", + 18: "inBackTicks", + 19: "inMySQLCode", +} + +// Debug prints very verbose tracing information to STDOUT. +var Debug bool = false + +// ReplaceNumbersInWords enables replacing numbers in words. For example: +// `SELECT c FROM org235.t` -> `SELECT c FROM org?.t`. For more examples +// look at test query_test.go/TestFingerprintWithNumberInDbName. +var ReplaceNumbersInWords = false + +// Fingerprint returns the canonical form of q. The primary transformations are: +// - Replace values with ? +// - Collapse whitespace +// - Remove comments +// - Lowercase everything +// Additional trasnformations are performed which change the syntax of the +// original query without affecting its performance characteristics. For +// example, "ORDER BY col ASC" is the same as "ORDER BY col", so "ASC" in the +// fingerprint is removed. +func Fingerprint(q string) string { + q += " " // need range to run off end of original query + prevWord := "" + f := make([]byte, len(q)) + fi := 0 + pr := rune(0) // previous rune + s := unknown // current state + sqlState := unknown + quoteChar := rune(0) + cpFromOffset := 0 + cpToOffset := 0 + addSpace := false + escape := false + parOpen := 0 + parOpenTotal := 0 + valueNo := 0 + firstPar := 0 + + for qi, r := range q { + if Debug { + fmt.Printf("\n%d:%d %s/%s [%d:%d] %x %q\n", qi, fi, stateName[s], stateName[sqlState], cpFromOffset, cpToOffset, r, r) + } + + /** + * 1. Skip parts of the query for certain states. + */ + + if s == inQuote { + // We're in a 'quoted value' or "quoted value". The quoted value + // ends at the first non-escaped matching quote character (' or "). + if r != quoteChar { + // The only char inside a quoted value we need to track is \, + // the escape char. This allows us to tell that the 2nd ' in + // '\'' is escaped, not the ending quote char. + if escape { + if Debug { + fmt.Println("Ignore quoted literal") + } + escape = false + } else if r == '\\' { + if Debug { + fmt.Println("Escape") + } + escape = true + } else { + if Debug { + fmt.Println("Ignore quoted value") + } + } + } else if escape { + // \' or \" + if Debug { + fmt.Println("Quote literal") + } + escape = false + } else { + // 'foo' -> ? + // "foo" -> ? + if Debug { + fmt.Println("Quote end") + } + escape = false + + // qi = the closing quote char, so +1 to ensure we don't copy + // anything before this, i.e. quoted value is done, move on. + cpFromOffset = qi + 1 + + if sqlState == inValues { + // ('Hello world!', ...) -> VALUES (, ...) + // The inValues state uses this state to skip quoted values, + // so we don't replace them with ?; the inValues blocks will + // replace the entire value list with ?+. + s = inValues + } else { + f[fi] = '?' + fi++ + s = unknown + } + } + continue + } else if s == inBackticks { + if r != '`' { + // The only char inside a quoted value we need to track is \, + // the escape char. This allows us to tell that the 2nd ' in + // '\`' is escaped, not the ending quote char. + if escape { + if Debug { + fmt.Println("Ignore backtick literal") + } + escape = false + } else if r == '\\' { + if Debug { + fmt.Println("Escape") + } + escape = true + } else { + if Debug { + fmt.Println("Ignore quoted value") + } + } + } else if escape { + // \` + if Debug { + fmt.Println("Quote literal") + } + escape = false + } else { + if Debug { + fmt.Println("Quote end") + } + escape = false + + // qi = the closing backtick, so +1 to ensure we don't copy + // anything before this, i.e. quoted value is done, move on. + //cpFromOffset = qi + 1 + cpToOffset = qi + 1 + + s = inWord + } + continue + + } else if s == inNumberInWord { + // Replaces number in words with ? + // e.g. `db37` to `db?` + // Parser can fall into inNumberInWord only if + // option ReplaceNumbersInWords is turned on + if r >= '0' && r <= '9' { + if Debug { + fmt.Println("Ignore digit in word") + } + continue + } + // 123 -> ?, 0xff -> ?, 1e-9 -> ?, etc. + if Debug { + fmt.Println("Number in word end") + } + f[fi] = '?' + fi++ + cpFromOffset = qi + if isSpace(r) { + s = unknown + } else { + s = inWord + } + } else if s == inNumber { + // We're in a number which can be something simple like 123 or + // something trickier like 1e-9 or 0xFF. The pathological case is + // like 12ff: this is valid hex number and a valid ident (e.g. table + // name). We can't detect this; the best we can do is realize that + // 12ffz is not a number because of the z. + if (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') || r == '.' || r == 'x' || r == '-' { + if Debug { + fmt.Println("Ignore digit") + } + continue + } + if (r >= 'g' && r <= 'z') || (r >= 'G' && r <= 'Z') || r == '_' { + if Debug { + fmt.Println("Not a number") + } + cpToOffset = qi + s = inWord + } else if sqlState == inMySQLCode { + // If we are in /*![version] ... */, keep the version number + cpToOffset = qi + s = inWord + sqlState = unknown + } else { + // 123 -> ?, 0xff -> ?, 1e-9 -> ?, etc. + if Debug { + fmt.Println("Number end") + } + f[fi] = '?' + fi++ + cpFromOffset = qi + cpToOffset = qi + s = unknown + } + } else if s == inValues { + // We're in the (val1),...,(valN) after IN or VALUE[S]. A single + // () value ends when the parenthesis are balanced, but... + if r == ')' { + parOpen-- + parOpenTotal++ + if Debug { + fmt.Println("Close parenthesis", parOpen) + } + } else if r == '(' { + parOpen++ + if Debug { + fmt.Println("Open parenthesis", parOpen) + } + if parOpen == 1 { + firstPar = qi + } + } else if r == '\'' || r == '"' { + // VALUES ('Hello world!') -> enter inQuote state to skip + // the quoted value so ')' in 'This ) is a trick' doesn't + // balance an outer parenthesis. + if Debug { + fmt.Println("Quote begin") + } + s = inQuote + quoteChar = r + continue + } else if isSpace(r) { + if Debug { + fmt.Println("Space") + } + continue + } + if parOpen > 0 { + // Parenthesis are not balanced yet; i.e. haven't reached + // closing ) for this value. + continue + } + if parOpenTotal == 0 { + // SELECT value FROM t + if Debug { + fmt.Println("Literal values not VALUES()") + } + s = inWord + continue + } + // () -> (?+) only for first value + if Debug { + fmt.Println("Values end") + } + valueNo++ + if valueNo == 1 { + if qi-firstPar > 1 { + copy(f[fi:fi+4], "(?+)") + fi += 4 + } else { + // INSERT INTO t VALUES () + copy(f[fi:fi+2], "()") + fi += 2 + } + firstPar = 0 + } + // ... the difficult part is that there may be other values, e.g. + // (1), (2), (3). So we enter the following state. The values list + // ends when the next char is not a comma. + s = moreValuesOrUnknown + pr = r + cpFromOffset = qi + 1 + parOpenTotal = 0 + continue + } else if s == inMLC { + // We're in a /* mutli-line comments */. Skip and ignore it all. + if pr == '*' && r == '/' { + // /* foo */ -> (nothing) + if Debug { + fmt.Printf("Multi-line comment end. pr: %s\n", string(pr)) + } + s = unknown + } else { + pr = r + if Debug { + fmt.Println("Ignore multi-line comment content") + } + } + continue + } else if s == mlcOrMySQLCode { + // We're at the start of either a /* multi-line comment */ or some + // /*![version] some MySQL-specific code */. The ! after the /* + // determines which one. + if r != '!' { + if Debug { + fmt.Println("Multi-line comment") + } + s = inMLC + continue + } else { + // /*![version] SQL_NO_CACHE */ -> /*![version] SQL_NO_CACHE */ (no change) + if Debug { + fmt.Println("MySQL-specific code") + } + s = inWord + sqlState = inMySQLCode + } + } else if s == inOLC { + // We're in a -- one line comment. A space after -- is required. + // It ends at the end of the line, but there can be more query after + // it like: + // SELECT * -- comment + // FROM t + // is really "SELECT * FROM t". + if r == 0x0A { // newline + if Debug { + fmt.Println("One-line comment end") + } + s = unknown + } + continue + } else if isSpace(r) && isSpace(pr) { + // All space is collapsed into a single space, so if this char is + // a space and the previous was too, then skip the extra space. + if Debug { + fmt.Println("Skip space") + } + // +1 here ensures we actually skip the extra space in certain + // cases like "select \n-- bar\n foo". When a part of the query + // triggers a copy of preceding chars, if the only preceding char + // is a space then it's incorrectly copied, but +1 sets cpFromOffset + // to the same offset as the trigger char, thus avoiding the copy. + // For example in that ^ query, the offsets are: + // 0 's' + // 1 'e' + // 2 'l' + // 3 'e' + // 4 'c' + // 5 't' + // 6 ' ' + // 7 '\n' + // 8 '-' + // After copying 'select ', we are here @ 7 and intend to skip the + // newline. Next, the '-' @ 8 triggers a copy of any preceding + // chars. So here if we set cpFromOffset = 7 then 7:8 is copied, + // the newline, but setting cpFromOffset = 7 + 1 is 8:8 and so + // nothing is copied as we want. Actually, cpToOffset is still 6 + // in this case, but 8:6 avoids the copy too. + cpFromOffset = qi + 1 + pr = r + continue + } + + /** + * 2. Change state based on rune and current state. + */ + + switch { + case r >= 0x30 && r <= 0x39: // 0-9 + switch s { + case opOrNumber: + if Debug { + fmt.Println("+/-First digit") + } + cpToOffset = qi - 1 + s = inNumber + case inOp: + if Debug { + fmt.Println("First digit after operator") + } + cpToOffset = qi + s = inNumber + case inWord: + if pr == '(' { + if Debug { + fmt.Println("Number in function") + } + cpToOffset = qi + s = inNumber + } else if pr == ',' { + // foo,4 -- 4 may be a number literal or a word/ident + if Debug { + fmt.Println("Number or word") + } + s = inNumber + cpToOffset = qi + } else { + if Debug { + fmt.Println("Number in word") + } + if ReplaceNumbersInWords { + s = inNumberInWord + cpToOffset = qi + } + } + default: + if Debug { + fmt.Println("Number literal") + } + s = inNumber + cpToOffset = qi + } + case isSpace(r): + if s == unknown { + if Debug { + fmt.Println("Lost in space") + } + if fi > 0 && !isSpace(rune(f[fi-1])) { + if Debug { + fmt.Println("Add space") + } + f[fi] = ' ' + fi++ + // This is a common case: a space after skipping something, + // e.g. col = 'foo'. We want only the first space, + // so advance cpFromOffset to whatever is after the space + // and if it's more space then space skipping block will + // handle it. + cpFromOffset = qi + 1 + } + } else if s == inDash { + if Debug { + fmt.Println("One-line comment begin") + } + s = inOLC + if cpToOffset > 2 { + cpToOffset = qi - 2 + } + } else if s == moreValuesOrUnknown { + if Debug { + fmt.Println("Space after values") + } + if valueNo == 1 { + f[fi] = ' ' + fi++ + } + } else { + if Debug { + fmt.Println("Word end") + } + word := strings.ToLower(q[cpFromOffset:qi]) + // Only match USE if it is the first word in the query, otherwise, + // it could be a USE INDEX + if word == "use" && prevWord == "" { + return "use ?" + } else if (word == "null" && (prevWord != "is" && prevWord != "not")) || word == "null," { + if Debug { + fmt.Println("NULL as value") + } + f[fi] = '?' + fi++ + if word[len(word)-1] == ',' { + f[fi] = ',' + fi++ + } + f[fi] = ' ' + fi++ + cpFromOffset = qi + 1 + } else if prevWord == "order" && word == "by" { + if Debug { + fmt.Println("ORDER BY begin") + } + sqlState = orderBy + } else if sqlState == orderBy && wordIn(word, "asc", "asc,", "asc ") { + if Debug { + fmt.Println("ORDER BY ASC") + } + cpFromOffset = qi + if word[len(word)-1] == ',' { + fi-- + f[fi] = ',' + f[fi+1] = ' ' + fi += 2 + } + } else if prevWord == "key" && word == "update" { + if Debug { + fmt.Println("ON DUPLICATE KEY UPDATE begin") + } + sqlState = onDupeKeyUpdate + } + s = inSpace + cpToOffset = qi + addSpace = true + } + case r == '\'' || r == '"': + if pr != '\\' { + if s != inQuote { + if Debug { + fmt.Println("Quote begin") + } + s = inQuote + quoteChar = r + cpToOffset = qi + if pr == 'x' || pr == 'b' { + if Debug { + fmt.Println("Hex/binary value") + } + // We're at the first quote char of x'0F' + // (or b'0101', etc.), so -2 for the quote char and + // the x or b char to copy anything before and up to + // this value. + cpToOffset = -2 + } + } + } + case r == '`': + if pr != '\\' { + if s != inBackticks { + if Debug { + fmt.Println("Beckticks begin") + } + s = inBackticks + } + + } + case r == '=' || r == '<' || r == '>' || r == '!': + if Debug { + fmt.Println("Operator") + } + if s != inWord && s != inOp { + cpFromOffset = qi + } + s = inOp + case r == '/': + if Debug { + fmt.Println("Op or multi-line comment") + } + s = divOrMLC + case r == '*' && s == divOrMLC: + if Debug { + fmt.Println("Multi-line comment or MySQL-specific code") + } + s = mlcOrMySQLCode + case r == '+': + if Debug { + fmt.Println("Operator or number") + } + s = opOrNumber + case r == '-': + if pr == '-' { + if Debug { + fmt.Println("Dash") + } + s = inDash + } else { + if Debug { + fmt.Println("Operator or number") + } + s = opOrNumber + } + case r == '.': + if s == inNumber || s == inOp { + if Debug { + fmt.Println("Floating point number") + } + s = inNumber + cpToOffset = qi + } + case r == '(': + if prevWord == "call" { + // 'CALL foo(...)' -> 'call foo' + if Debug { + fmt.Println("CALL sp_name") + } + return "call " + q[cpFromOffset:qi] + } else if sqlState != onDupeKeyUpdate && (((s == inSpace || s == moreValuesOrUnknown) && (prevWord == "value" || prevWord == "values" || prevWord == "in")) || wordIn(q[cpFromOffset:qi], "value", "values", "in")) { + // VALUE(, VALUE (, VALUES(, VALUES (, IN(, or IN( + // but not after ON DUPLICATE KEY UPDATE + if Debug { + fmt.Println("Values begin") + } + s = inValues + sqlState = inValues + parOpen = 1 + firstPar = qi + if valueNo == 0 { + cpToOffset = qi + } + } else if s != inWord { + if Debug { + fmt.Println("Random (") + } + valueNo = 0 + cpFromOffset = qi + s = inWord + } + case r == ',' && s == moreValuesOrUnknown: + if Debug { + fmt.Println("More values") + } + case r == ':' && prevWord == "administrator": + // 'administrator command: Init DB' -> 'administrator command: Init DB' (no change) + if Debug { + fmt.Println("Admin cmd") + } + return q[0 : len(q)-1] // original query minus the trailing space we added + case r == '#': + if Debug { + fmt.Println("One-line comment begin") + } + addSpace = false + s = inOLC + default: + if s != inWord && s != inOp { + // If in a word or operator then keep copying the query, else + // previous chars were being ignored for some reasons but now + // we should start copying again, so set cpFromOffset. Example: + // col=NOW(). 'col' will be set to copy, but then '=' will put + // us in inOp state which, if a value follows, will trigger a + // copy of "col=", but "NOW()" is not a value so "N" is caught + // here and since s=inOp still we do not copy yet (this block is + // is not entered). + if Debug { + fmt.Println("Random character") + } + valueNo = 0 + cpFromOffset = qi + + if sqlState == inValues { + // Values are comma-separated, so the first random char + // marks the end of the VALUE() or IN() list. + if Debug { + fmt.Println("No more values") + } + sqlState = unknown + } + } + s = inWord + } + + /** + * 3. Copy a slice of the query into the fingerprint. + */ + + if cpToOffset > cpFromOffset { + l := cpToOffset - cpFromOffset + prevWord = strings.ToLower(q[cpFromOffset:cpToOffset]) + if Debug { + fmt.Printf("copy '%s' (%d:%d, %d:%d) %d\n", prevWord, fi, fi+l, cpFromOffset, cpToOffset, l) + } + copy(f[fi:fi+l], prevWord) + fi += l + cpFromOffset = cpToOffset + if wordIn(prevWord, "in", "value", "values") && sqlState != onDupeKeyUpdate { + // IN () -> in(?+) + // VALUES () -> values(?+) + addSpace = false + s = inValues + sqlState = inValues + } else if addSpace { + if Debug { + fmt.Println("Add space") + } + f[fi] = ' ' + fi++ + cpFromOffset++ + addSpace = false + } + } + pr = r + } + + // Remove trailing spaces. + for fi > 0 && isSpace(rune(f[fi-1])) { + fi-- + } + + // Clean up control characters, and return the fingerprint + return strings.Replace(string(f[0:fi]), "\x00", "", -1) +} + +func isSpace(r rune) bool { + return r == 0x20 || r == 0x09 || r == 0x0D || r == 0x0A +} + +func wordIn(q string, words ...string) bool { + q = strings.ToLower(q) + for _, word := range words { + if q == word { + return true + } + } + return false +} + +// Id returns the right-most 16 characters of the MD5 checksum of fingerprint. +// Query IDs are the shortest way to uniquely identify queries. +func Id(fingerprint string) string { + id := md5.New() + io.WriteString(id, fingerprint) + h := fmt.Sprintf("%x", id.Sum(nil)) + return strings.ToUpper(h[16:32]) +} diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 00000000..2885af36 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md new file mode 100644 index 00000000..e0066b0f --- /dev/null +++ b/vendor/github.com/russross/blackfriday/README.md @@ -0,0 +1,363 @@ +Blackfriday +[![Build Status][BuildSVG]][BuildURL] +[![Godoc][GodocV2SVG]][GodocV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with any modern Go release. With Go and git installed: + + go get -u gopkg.in/russross/blackfriday.v2 + +will download, compile, and install the package into your `$GOPATH` directory +hierarchy. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://godoc.org/gopkg.in/russross/blackfriday.v2. + +It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, +but we highly recommend using package management tool like [dep][7] or +[Glide][8] and make use of semantic versioning. With package management you +should import `github.com/russross/blackfriday` and specify that you're using +version 2.0.0. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://godoc.org/github.com/russross/blackfriday + +### Known issue with `dep` + +There is a known problem with using Blackfriday v1 _transitively_ and `dep`. +Currently `dep` prioritizes semver versions over anything else, and picks the +latest one, plus it does not apply a `[[constraint]]` specifier to transitively +pulled in packages. So if you're using something that uses Blackfriday v1, but +that something does not use `dep` yet, you will get Blackfriday v2 pulled in and +your first dependency will fail to build. + +There are couple of fixes for it, documented here: +https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version + +Meanwhile, `dep` team is working on a more general solution to the constraints +on transitive dependencies problem: https://github.com/golang/dep/issues/1124. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "gopkg.in/russross/blackfriday.v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" + [7]: https://github.com/golang/dep/ "dep" + [8]: https://github.com/Masterminds/glide "Glide" + + [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master + [BuildURL]: https://travis-ci.org/russross/blackfriday + [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg + [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 00000000..929638aa --- /dev/null +++ b/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1451 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //

+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 00000000..9656c42a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 00000000..e0a6c69c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,938 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded