From d76b1357da21f777ee1cdab8bfd907ed66c7b3bd Mon Sep 17 00:00:00 2001 From: Ryan Leckey Date: Fri, 27 Dec 2019 17:31:01 -0800 Subject: [PATCH] Audit MySql and Postgres protocols --- .gitignore | 3 - Cargo.toml | 31 +- LICENSE-APACHE | 201 ++++ LICENSE-MIT | 25 + README.md | 53 +- benches/postgres-protocol.rs | 37 - .../Cargo.toml | 4 +- .../schema.sql} | 0 .../setup.sh | 4 +- .../src/main.rs | 2 +- rustfmt.toml | 2 - sqlx-core/Cargo.toml | 26 +- sqlx-core/src/arguments.rs | 160 +++ sqlx-core/src/backend.rs | 28 - sqlx-core/src/cache.rs | 57 +- sqlx-core/src/connection.rs | 24 +- sqlx-core/src/database.rs | 19 + sqlx-core/src/decode.rs | 84 +- sqlx-core/src/describe.rs | 57 +- sqlx-core/src/encode.rs | 100 +- sqlx-core/src/error.rs | 101 +- sqlx-core/src/executor.rs | 95 +- sqlx-core/src/io/buf.rs | 50 +- sqlx-core/src/io/buf_mut.rs | 26 +- sqlx-core/src/io/buf_stream.rs | 5 +- sqlx-core/src/io/mod.rs | 22 +- sqlx-core/src/lib.rs | 83 +- sqlx-core/src/macros.rs | 14 - sqlx-core/src/mysql/arguments.rs | 51 + sqlx-core/src/mysql/backend.rs | 34 - sqlx-core/src/mysql/connection.rs | 384 +++---- sqlx-core/src/mysql/database.rs | 12 + sqlx-core/src/mysql/error.rs | 58 +- sqlx-core/src/mysql/establish.rs | 46 - sqlx-core/src/mysql/executor.rs | 490 ++++++--- sqlx-core/src/mysql/io/buf_ext.rs | 33 +- sqlx-core/src/mysql/io/buf_mut_ext.rs | 43 +- sqlx-core/src/mysql/io/mod.rs | 4 +- sqlx-core/src/mysql/mod.rs | 39 +- .../mysql/protocol/binary/com_stmt_close.rs | 38 - .../mysql/protocol/binary/com_stmt_exec.rs | 89 -- .../mysql/protocol/binary/com_stmt_fetch.rs | 44 - .../mysql/protocol/binary/com_stmt_prepare.rs | 39 - .../protocol/binary/com_stmt_prepare_ok.rs | 81 -- .../mysql/protocol/binary/com_stmt_reset.rs | 34 - sqlx-core/src/mysql/protocol/binary/mod.rs | 21 - sqlx-core/src/mysql/protocol/capabilities.rs | 87 +- sqlx-core/src/mysql/protocol/column_count.rs | 18 + sqlx-core/src/mysql/protocol/column_def.rs | 77 ++ sqlx-core/src/mysql/protocol/com_query.rs | 21 + .../src/mysql/protocol/com_set_option.rs | 29 + .../src/mysql/protocol/com_stmt_execute.rs | 62 ++ .../src/mysql/protocol/com_stmt_prepare.rs | 21 + .../src/mysql/protocol/com_stmt_prepare_ok.rs | 49 + .../protocol/connect/auth_switch_request.rs | 21 - .../src/mysql/protocol/connect/initial.rs | 164 --- sqlx-core/src/mysql/protocol/connect/mod.rs | 9 - .../src/mysql/protocol/connect/response.rs | 86 -- .../src/mysql/protocol/connect/ssl_request.rs | 40 - sqlx-core/src/mysql/protocol/decode.rs | 7 + sqlx-core/src/mysql/protocol/encode.rs | 2 +- sqlx-core/src/mysql/protocol/eof.rs | 52 + sqlx-core/src/mysql/protocol/err.rs | 55 + sqlx-core/src/mysql/protocol/error_code.rs | 997 ------------------ sqlx-core/src/mysql/protocol/field.rs | 89 +- sqlx-core/src/mysql/protocol/handshake.rs | 159 +++ .../src/mysql/protocol/handshake_response.rs | 57 + sqlx-core/src/mysql/protocol/mod.rs | 72 +- sqlx-core/src/mysql/protocol/ok.rs | 64 ++ .../mysql/protocol/response/column_count.rs | 43 - .../src/mysql/protocol/response/column_def.rs | 123 --- sqlx-core/src/mysql/protocol/response/eof.rs | 58 - sqlx-core/src/mysql/protocol/response/err.rs | 116 -- sqlx-core/src/mysql/protocol/response/mod.rs | 13 - sqlx-core/src/mysql/protocol/response/ok.rs | 109 -- sqlx-core/src/mysql/protocol/response/row.rs | 87 -- sqlx-core/src/mysql/protocol/row.rs | 129 +++ sqlx-core/src/mysql/protocol/server_status.rs | 45 - sqlx-core/src/mysql/protocol/status.rs | 49 + .../src/mysql/protocol/text/com_debug.rs | 28 - .../src/mysql/protocol/text/com_init_db.rs | 36 - sqlx-core/src/mysql/protocol/text/com_ping.rs | 28 - .../mysql/protocol/text/com_process_kill.rs | 35 - .../src/mysql/protocol/text/com_query.rs | 36 - sqlx-core/src/mysql/protocol/text/com_quit.rs | 29 - .../src/mysql/protocol/text/com_reset_conn.rs | 30 - .../src/mysql/protocol/text/com_set_option.rs | 45 - .../src/mysql/protocol/text/com_sleep.rs | 27 - .../src/mysql/protocol/text/com_statistics.rs | 28 - sqlx-core/src/mysql/protocol/text/mod.rs | 41 - sqlx-core/src/mysql/protocol/type.rs | 39 + sqlx-core/src/mysql/protocol/types.rs | 8 - sqlx-core/src/mysql/query.rs | 41 - sqlx-core/src/mysql/row.rs | 61 +- sqlx-core/src/mysql/types/binary.rs | 42 - sqlx-core/src/mysql/types/bool.rs | 25 + sqlx-core/src/mysql/types/boolean.rs | 34 - sqlx-core/src/mysql/types/character.rs | 60 -- sqlx-core/src/mysql/types/chrono.rs | 174 +-- sqlx-core/src/mysql/types/float.rs | 47 + sqlx-core/src/mysql/types/int.rs | 87 ++ sqlx-core/src/mysql/types/mod.rs | 44 +- sqlx-core/src/mysql/types/numeric.rs | 272 ----- sqlx-core/src/mysql/types/str.rs | 41 + sqlx-core/src/mysql/types/uint.rs | 85 ++ sqlx-core/src/params.rs | 140 --- sqlx-core/src/pool/executor.rs | 90 +- sqlx-core/src/pool/inner.rs | 55 +- sqlx-core/src/pool/mod.rs | 68 +- sqlx-core/src/pool/options.rs | 6 +- sqlx-core/src/postgres/arguments.rs | 60 ++ sqlx-core/src/postgres/backend.rs | 48 - sqlx-core/src/postgres/connection.rs | 225 ++-- sqlx-core/src/postgres/database.rs | 12 + sqlx-core/src/postgres/error.rs | 49 +- sqlx-core/src/postgres/executor.rs | 448 +++++--- sqlx-core/src/postgres/mod.rs | 46 +- .../src/postgres/protocol/authentication.rs | 6 +- .../src/postgres/protocol/backend_key_data.rs | 24 +- sqlx-core/src/postgres/protocol/bind.rs | 18 +- .../src/postgres/protocol/cancel_request.rs | 4 +- sqlx-core/src/postgres/protocol/close.rs | 44 +- .../src/postgres/protocol/command_complete.rs | 24 +- sqlx-core/src/postgres/protocol/copy_data.rs | 29 - sqlx-core/src/postgres/protocol/copy_done.rs | 15 - sqlx-core/src/postgres/protocol/copy_fail.rs | 16 - sqlx-core/src/postgres/protocol/data_row.rs | 54 +- sqlx-core/src/postgres/protocol/decode.rs | 2 +- sqlx-core/src/postgres/protocol/describe.rs | 61 +- sqlx-core/src/postgres/protocol/execute.rs | 4 +- sqlx-core/src/postgres/protocol/flush.rs | 3 +- sqlx-core/src/postgres/protocol/message.rs | 5 +- sqlx-core/src/postgres/protocol/mod.rs | 90 +- .../protocol/notification_response.rs | 65 +- .../protocol/parameter_description.rs | 2 +- .../src/postgres/protocol/parameter_status.rs | 63 +- sqlx-core/src/postgres/protocol/parse.rs | 18 +- .../src/postgres/protocol/password_message.rs | 10 +- sqlx-core/src/postgres/protocol/query.rs | 2 +- .../src/postgres/protocol/ready_for_query.rs | 16 +- sqlx-core/src/postgres/protocol/response.rs | 218 +--- .../src/postgres/protocol/row_description.rs | 38 +- .../src/postgres/protocol/startup_message.rs | 2 +- sqlx-core/src/postgres/protocol/statement.rs | 18 + sqlx-core/src/postgres/protocol/sync.rs | 2 +- sqlx-core/src/postgres/protocol/terminate.rs | 2 +- sqlx-core/src/postgres/query.rs | 51 - sqlx-core/src/postgres/raw.rs | 0 sqlx-core/src/postgres/row.rs | 60 +- sqlx-core/src/postgres/types/binary.rs | 44 - sqlx-core/src/postgres/types/bool.rs | 24 + sqlx-core/src/postgres/types/boolean.rs | 36 - sqlx-core/src/postgres/types/character.rs | 61 -- sqlx-core/src/postgres/types/chrono.rs | 156 +-- sqlx-core/src/postgres/types/float.rs | 45 + sqlx-core/src/postgres/types/int.rs | 60 ++ sqlx-core/src/postgres/types/mod.rs | 113 +- sqlx-core/src/postgres/types/numeric.rs | 138 --- sqlx-core/src/postgres/types/str.rs | 44 + sqlx-core/src/postgres/types/uuid.rs | 31 +- sqlx-core/src/query.rs | 155 +-- sqlx-core/src/row.rs | 158 +-- sqlx-core/src/types.rs | 48 +- sqlx-core/src/url.rs | 65 +- sqlx-macros/Cargo.toml | 40 +- sqlx-macros/src/{backend => database}/mod.rs | 28 +- .../src/{backend => database}/mysql.rs | 2 +- .../src/{backend => database}/postgres.rs | 2 +- sqlx-macros/src/lib.rs | 21 +- sqlx-macros/src/query.rs | 77 +- src/lib.rs | 20 +- test.sh | 11 + tests/mysql-types.rs | 12 +- tests/mysql.rs | 50 + tests/postgres-types.rs | 22 +- tests/postgres.rs | 74 ++ 176 files changed, 4629 insertions(+), 6407 deletions(-) create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT delete mode 100644 benches/postgres-protocol.rs rename examples/{realworld => realworld-postgres}/Cargo.toml (66%) rename examples/{realworld/schema/1_users.sql => realworld-postgres/schema.sql} (100%) rename examples/{realworld => realworld-postgres}/setup.sh (62%) mode change 100755 => 100644 rename examples/{realworld => realworld-postgres}/src/main.rs (99%) delete mode 100644 rustfmt.toml create mode 100644 sqlx-core/src/arguments.rs delete mode 100644 sqlx-core/src/backend.rs create mode 100644 sqlx-core/src/database.rs delete mode 100644 sqlx-core/src/macros.rs create mode 100644 sqlx-core/src/mysql/arguments.rs delete mode 100644 sqlx-core/src/mysql/backend.rs create mode 100644 sqlx-core/src/mysql/database.rs delete mode 100644 sqlx-core/src/mysql/establish.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_close.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_exec.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_fetch.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_prepare.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_prepare_ok.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/com_stmt_reset.rs delete mode 100644 sqlx-core/src/mysql/protocol/binary/mod.rs create mode 100644 sqlx-core/src/mysql/protocol/column_count.rs create mode 100644 sqlx-core/src/mysql/protocol/column_def.rs create mode 100644 sqlx-core/src/mysql/protocol/com_query.rs create mode 100644 sqlx-core/src/mysql/protocol/com_set_option.rs create mode 100644 sqlx-core/src/mysql/protocol/com_stmt_execute.rs create mode 100644 sqlx-core/src/mysql/protocol/com_stmt_prepare.rs create mode 100644 sqlx-core/src/mysql/protocol/com_stmt_prepare_ok.rs delete mode 100644 sqlx-core/src/mysql/protocol/connect/auth_switch_request.rs delete mode 100644 sqlx-core/src/mysql/protocol/connect/initial.rs delete mode 100644 sqlx-core/src/mysql/protocol/connect/mod.rs delete mode 100644 sqlx-core/src/mysql/protocol/connect/response.rs delete mode 100644 sqlx-core/src/mysql/protocol/connect/ssl_request.rs create mode 100644 sqlx-core/src/mysql/protocol/decode.rs create mode 100644 sqlx-core/src/mysql/protocol/eof.rs create mode 100644 sqlx-core/src/mysql/protocol/err.rs delete mode 100644 sqlx-core/src/mysql/protocol/error_code.rs create mode 100644 sqlx-core/src/mysql/protocol/handshake.rs create mode 100644 sqlx-core/src/mysql/protocol/handshake_response.rs create mode 100644 sqlx-core/src/mysql/protocol/ok.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/column_count.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/column_def.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/eof.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/err.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/mod.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/ok.rs delete mode 100644 sqlx-core/src/mysql/protocol/response/row.rs create mode 100644 sqlx-core/src/mysql/protocol/row.rs delete mode 100644 sqlx-core/src/mysql/protocol/server_status.rs create mode 100644 sqlx-core/src/mysql/protocol/status.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_debug.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_init_db.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_ping.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_process_kill.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_query.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_quit.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_reset_conn.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_set_option.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_sleep.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/com_statistics.rs delete mode 100644 sqlx-core/src/mysql/protocol/text/mod.rs create mode 100644 sqlx-core/src/mysql/protocol/type.rs delete mode 100644 sqlx-core/src/mysql/protocol/types.rs delete mode 100644 sqlx-core/src/mysql/query.rs delete mode 100644 sqlx-core/src/mysql/types/binary.rs create mode 100644 sqlx-core/src/mysql/types/bool.rs delete mode 100644 sqlx-core/src/mysql/types/boolean.rs delete mode 100644 sqlx-core/src/mysql/types/character.rs create mode 100644 sqlx-core/src/mysql/types/float.rs create mode 100644 sqlx-core/src/mysql/types/int.rs delete mode 100644 sqlx-core/src/mysql/types/numeric.rs create mode 100644 sqlx-core/src/mysql/types/str.rs create mode 100644 sqlx-core/src/mysql/types/uint.rs delete mode 100644 sqlx-core/src/params.rs create mode 100644 sqlx-core/src/postgres/arguments.rs delete mode 100644 sqlx-core/src/postgres/backend.rs create mode 100644 sqlx-core/src/postgres/database.rs delete mode 100644 sqlx-core/src/postgres/protocol/copy_data.rs delete mode 100644 sqlx-core/src/postgres/protocol/copy_done.rs delete mode 100644 sqlx-core/src/postgres/protocol/copy_fail.rs create mode 100644 sqlx-core/src/postgres/protocol/statement.rs delete mode 100644 sqlx-core/src/postgres/query.rs delete mode 100644 sqlx-core/src/postgres/raw.rs delete mode 100644 sqlx-core/src/postgres/types/binary.rs create mode 100644 sqlx-core/src/postgres/types/bool.rs delete mode 100644 sqlx-core/src/postgres/types/boolean.rs delete mode 100644 sqlx-core/src/postgres/types/character.rs create mode 100644 sqlx-core/src/postgres/types/float.rs create mode 100644 sqlx-core/src/postgres/types/int.rs delete mode 100644 sqlx-core/src/postgres/types/numeric.rs create mode 100644 sqlx-core/src/postgres/types/str.rs rename sqlx-macros/src/{backend => database}/mod.rs (55%) rename sqlx-macros/src/{backend => database}/mysql.rs (87%) rename sqlx-macros/src/{backend => database}/postgres.rs (96%) create mode 100755 test.sh create mode 100644 tests/mysql.rs create mode 100644 tests/postgres.rs diff --git a/.gitignore b/.gitignore index 11759fd1..ca1cc0c5 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,3 @@ Cargo.lock # Environment .env - -# rustfmt backup files -**/*.rs.bk diff --git a/Cargo.toml b/Cargo.toml index 619f5f80..611c5692 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,13 +3,16 @@ members = [ ".", "sqlx-core", "sqlx-macros", - "examples/realworld" + "examples/realworld-postgres" ] [package] name = "sqlx" version = "0.1.1-pre" license = "MIT OR Apache-2.0" +readme = "README.md" +repository = "https://github.com/launchbadge/sqlx" +documentation = "https://docs.rs/sqlx" description = "The Rust SQL Toolkit." edition = "2018" authors = [ @@ -19,11 +22,14 @@ authors = [ [features] default = [ "macros" ] -unstable = [ "sqlx-core/unstable" ] +macros = [ "sqlx-macros", "proc-macro-hack" ] + +# database postgres = [ "sqlx-core/postgres", "sqlx-macros/postgres" ] mysql = [ "sqlx-core/mysql", "sqlx-macros/mysql" ] -macros = [ "sqlx-macros", "proc-macro-hack" ] -chrono = ["sqlx-core/chrono", "sqlx-macros/chrono"] + +# types +chrono = [ "sqlx-core/chrono", "sqlx-macros/chrono" ] uuid = [ "sqlx-core/uuid", "sqlx-macros/uuid" ] [dependencies] @@ -32,15 +38,23 @@ sqlx-macros = { version = "0.1.0-pre", path = "sqlx-macros", optional = true } proc-macro-hack = { version = "0.5.11", optional = true } [dev-dependencies] +anyhow = "1.0.25" +futures = "0.3.1" async-std = { version = "1.2.0", features = [ "attributes" ] } dotenv = "0.15.0" -matches = "0.1.8" -criterion = "0.3.0" [[test]] name = "macros" required-features = [ "postgres", "uuid", "macros" ] +[[test]] +name = "mysql" +required-features = [ "mysql" ] + +[[test]] +name = "postgres" +required-features = [ "postgres" ] + [[test]] name = "postgres-types" required-features = [ "postgres" ] @@ -48,8 +62,3 @@ required-features = [ "postgres" ] [[test]] name = "mysql-types" required-features = [ "mysql" ] - -[[bench]] -name = "postgres-protocol" -required-features = [ "postgres", "unstable" ] -harness = false diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 00000000..a488a491 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2019 LaunchBadge, LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 00000000..13735bd1 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2019 LaunchBadge, LLC + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index d988e3c3..3262c1e4 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,47 @@ -# SQLx +

SQLx

+
+ + 🧰 The Rust SQL Toolkit + +
-The Rust SQL Toolkit. - - * **Asynchronous**. Handle thousands of database connections from a single thread. - - * **Fast**. _TO BE WRITTEN_ - - * **Native**. SQLx is a pure Rust toolkit for SQL. Where possible, drivers are written from scratch, in Rust, utilizing the modern ecosystem for asynchronous network services development. - - * **Agnostic**. SQLx is agnostic over the database engine and can operate against a variety of database backends with the backend chosen **at compile-time** through generic constraints **or at runtime** with a slight performance loss (due to dynamic dispatch). +
-† The SQLite driver (which does not yet exist) will use the libsqlite3 C library as SQLite is an embedded database (the only way we could be pure Rust for SQLite is by porting _all_ of SQLite to Rust). +
+ + + Crates.io version + + + + Download + + + + docs.rs docs + +
+ +
+ Built with ❤️ by The LaunchBadge team +
+ +
+ +SQLx is a modern SQL client built from the ground up for Rust, in Rust. + + * **Asynchronous**. + + * **Native**. SQLx is a pure Rust toolkit for SQL. Where possible, drivers are written from scratch, in Rust, utilizing the modern ecosystem for asynchronous network services development. + + * **Type-safe**. SQLx is built upon the novel idea of preparing SQL statements before or duing compilation to provide strong type safety while not getting in your way with a custom DSL. + +## Safety + +This crate uses `#[deny(unsafe_code)]` to ensure everything is implemented in 100% Safe Rust. ## License diff --git a/benches/postgres-protocol.rs b/benches/postgres-protocol.rs deleted file mode 100644 index 552372d7..00000000 --- a/benches/postgres-protocol.rs +++ /dev/null @@ -1,37 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use sqlx::postgres::protocol::{Bind, DataRow, Decode, Encode, RowDescription}; - -fn bench(c: &mut Criterion) { - c.bench_function("decode_data_row", |b| { - b.iter(|| { - let _ = DataRow::decode(&black_box(b"\0\x03\0\0\0\x011\0\0\0\x012\0\0\0\x013")[..]); - }); - }); - - c.bench_function( "decode_row_description",|b| { - b.iter(|| { - let _ = RowDescription::decode(&black_box(b"\0\x02user_id\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0number_of_pages\0\0\0\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0")[..]); - }); - }); - - c.bench_function("encode_bind", |b| { - let mut buf = Vec::new(); - - b.iter(|| { - black_box(Bind { - portal: "__sqlx_portal_5121", - statement: "__sqlx_statement_5121", - formats: &[1], - values_len: 2, - values: &[(-1_i8) as _, 0, 0, 0, 1, 0, 0, 0, 25], - result_formats: &[1], - }) - .encode(&mut buf); - - buf.clear(); - }); - }); -} - -criterion_group!(benches, bench); -criterion_main!(benches); diff --git a/examples/realworld/Cargo.toml b/examples/realworld-postgres/Cargo.toml similarity index 66% rename from examples/realworld/Cargo.toml rename to examples/realworld-postgres/Cargo.toml index d6cebd73..73d9dec4 100644 --- a/examples/realworld/Cargo.toml +++ b/examples/realworld-postgres/Cargo.toml @@ -7,8 +7,8 @@ workspace = "../.." [dependencies] anyhow = "1.0.25" dotenv = "0.15.0" -async-std = "1.2.0" +async-std = { version = "1.2.0", features = [ "attributes" ] } tide = "0.4.0" sqlx = { path = "../..", features = [ "postgres" ] } -serde = { version = "1.0.103", features = [ "derive"] } +serde = { version = "1.0.103", features = [ "derive" ] } futures = "0.3.1" diff --git a/examples/realworld/schema/1_users.sql b/examples/realworld-postgres/schema.sql similarity index 100% rename from examples/realworld/schema/1_users.sql rename to examples/realworld-postgres/schema.sql diff --git a/examples/realworld/setup.sh b/examples/realworld-postgres/setup.sh old mode 100755 new mode 100644 similarity index 62% rename from examples/realworld/setup.sh rename to examples/realworld-postgres/setup.sh index 7d6096a8..68581759 --- a/examples/realworld/setup.sh +++ b/examples/realworld-postgres/setup.sh @@ -3,5 +3,5 @@ # Get current directory (of this script) DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -# Run SQL files in schema/ directory -psql -d "$DATABASE_URL" -f $DIR/schema/*.sql +# Run schema file +psql -d "$DATABASE_URL" -f schema.sql diff --git a/examples/realworld/src/main.rs b/examples/realworld-postgres/src/main.rs similarity index 99% rename from examples/realworld/src/main.rs rename to examples/realworld-postgres/src/main.rs index 270cdaff..cd4dcbb3 100644 --- a/examples/realworld/src/main.rs +++ b/examples/realworld-postgres/src/main.rs @@ -52,4 +52,4 @@ async fn register(mut req: Request>) -> Response { Response::new(200) .body_json(&RegisterResponseBody { id: user_id }) .unwrap() -} +} \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 4afdfbd8..00000000 --- a/rustfmt.toml +++ /dev/null @@ -1,2 +0,0 @@ -unstable_features = true -merge_imports = true diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index a810fdcf..e003fdeb 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -2,7 +2,6 @@ name = "sqlx-core" version = "0.1.0-pre" license = "MIT OR Apache-2.0" -description = "The Rust SQL Toolkit." edition = "2018" authors = [ "Ryan Leckey ", @@ -16,21 +15,18 @@ postgres = [] mysql = [] [dependencies] +async-stream = { version = "0.2.0", default-features = false } async-std = { version = "1.2.0", default-features = false, features = [ "unstable" ] } -async-stream = "0.2.0" -bitflags = "1.2.1" -byteorder = { version = "1.3.2", default-features = false } -chrono = { version = "0.4", optional = true } -futures-channel = "0.3.1" -futures-core = "0.3.1" -futures-util = "0.3.1" -log = "0.4.8" -md-5 = "0.8.0" -memchr = "2.2.1" -url = "2.1.0" -uuid = { version = "0.8.1", optional = true } +bitflags = { version = "1.2.1", default-features = false } +futures-core = { version = "0.3.1", default-features = false } +futures-util = { version = "0.3.1", default-features = false } +log = { version = "0.4", default-features = false } +url = { version = "2.1.0", default-features = false } +byteorder = { version ="1.3.2", default-features = false } +memchr = { version = "2.2.1", default-features = false } +md-5 = { version = "0.8.0", default-features = false } +uuid = { version = "0.8.1", default-features = false, optional = true } +chrono = { version = "0.4.10", default-features = false, features = [ "clock" ], optional = true } [dev-dependencies] matches = "0.1.8" -bytes = "0.5.2" -async-std = { version = "1.2.0", default-features = false, features = [ "attributes" ] } diff --git a/sqlx-core/src/arguments.rs b/sqlx-core/src/arguments.rs new file mode 100644 index 00000000..b756bbd1 --- /dev/null +++ b/sqlx-core/src/arguments.rs @@ -0,0 +1,160 @@ +//! Traits for passing arguments to SQL queries. + +use crate::database::Database; +use crate::encode::Encode; +use crate::types::HasSqlType; + +/// A tuple of arguments to be sent to the database. +pub trait Arguments: Send + Sized + Default + 'static { + type Database: Database + ?Sized; + + /// Returns `true` if there are no values. + #[inline] + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of values. + fn len(&self) -> usize; + + /// Returns the size of the arguments, in bytes. + fn size(&self) -> usize; + + /// Reserves the capacity for at least `len` more values (of `size` bytes) to + /// be added to the arguments without a reallocation. + fn reserve(&mut self, len: usize, size: usize); + + /// Add the value to the end of the arguments. + fn add(&mut self, value: T) + where + Self::Database: HasSqlType, + T: Encode; +} + +pub trait IntoArguments +where + DB: Database, +{ + fn into_arguments(self) -> DB::Arguments; +} + +impl IntoArguments for DB::Arguments +where + DB: Database, +{ + #[inline] + fn into_arguments(self) -> DB::Arguments { + self + } +} + +#[allow(unused)] +macro_rules! impl_into_arguments { + ($B:ident: $( ($idx:tt) -> $T:ident );+;) => { + impl<$($T,)+> crate::arguments::IntoArguments<$B> for ($($T,)+) + where + $($B: crate::types::HasSqlType<$T>,)+ + $($T: crate::encode::Encode<$B>,)+ + { + fn into_arguments(self) -> <$B as crate::database::Database>::Arguments { + use crate::arguments::Arguments; + + let mut arguments = <$B as crate::database::Database>::Arguments::default(); + + let binds = 0 $(+ { $idx; 1 } )+; + let bytes = 0 $(+ crate::encode::Encode::size_hint(&self.$idx))+; + + arguments.reserve(binds, bytes); + + $(crate::arguments::Arguments::bind(&mut arguments, self.$idx);)+ + + arguments + } + } + }; +} + +#[allow(unused)] +macro_rules! impl_into_arguments_for_database { + ($B:ident) => { + impl crate::arguments::IntoArguments<$B> for () + { + #[inline] + fn into_arguments(self) -> <$B as crate::database::Database>::Arguments { + Default::default() + } + } + + impl_into_arguments!($B: + (0) -> T1; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + (4) -> T5; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + (4) -> T5; + (5) -> T6; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + (4) -> T5; + (5) -> T6; + (6) -> T7; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + (4) -> T5; + (5) -> T6; + (6) -> T7; + (7) -> T8; + ); + + impl_into_arguments!($B: + (0) -> T1; + (1) -> T2; + (2) -> T3; + (3) -> T4; + (4) -> T5; + (5) -> T6; + (6) -> T7; + (7) -> T8; + (8) -> T9; + ); + } +} diff --git a/sqlx-core/src/backend.rs b/sqlx-core/src/backend.rs deleted file mode 100644 index d83d7251..00000000 --- a/sqlx-core/src/backend.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::{ - describe::Describe, executor::Executor, params::QueryParameters, row::Row, - types::HasTypeMetadata, -}; -use futures_core::future::BoxFuture; - -/// A database backend. -/// -/// Represents a connection to the database and further provides auxillary but -/// important related traits as associated types. -/// -/// This trait is not intended to be used directly. -pub trait Backend: HasTypeMetadata + Send + Sync + Sized + 'static { - type Connection: crate::Connection; - - /// The concrete `QueryParameters` implementation for this backend. - type QueryParameters: QueryParameters; - - /// The concrete `Row` implementation for this backend. - type Row: Row; - - /// The identifier for tables; in Postgres this is an `oid` while - /// in MySQL/MariaDB this is the qualified name of the table. - type TableIdent; - - /// Establish a new connection to the database server. - fn connect(url: &str) -> BoxFuture<'static, crate::Result>; -} diff --git a/sqlx-core/src/cache.rs b/sqlx-core/src/cache.rs index f7420819..0920df99 100644 --- a/sqlx-core/src/cache.rs +++ b/sqlx-core/src/cache.rs @@ -1,6 +1,6 @@ -use std::collections::hash_map::{HashMap, Entry}; -use std::cmp::Ordering; -use futures_core::Future; +use std::collections::HashMap; +use std::hash::Hash; +use std::sync::Arc; // TODO: figure out a cache eviction strategy // we currently naively cache all prepared statements which could live-leak memory @@ -11,44 +11,39 @@ use futures_core::Future; /// Per-connection prepared statement cache. pub struct StatementCache { - statements: HashMap + statements: HashMap, + columns: HashMap, usize>>>, } -impl StatementCache { +impl StatementCache +where + Id: Eq + Hash, +{ pub fn new() -> Self { StatementCache { statements: HashMap::with_capacity(10), + columns: HashMap::with_capacity(10), } } - #[cfg(feature = "mysql")] - pub async fn get_or_compute<'a, E, Fut>(&'a mut self, query: &str, compute: impl FnOnce() -> Fut) - -> Result<&'a Id, E> - where - Fut: Future> - { - match self.statements.entry(query.to_string()) { - Entry::Occupied(occupied) => Ok(occupied.into_mut()), - Entry::Vacant(vacant) => { - Ok(vacant.insert(compute().await?)) - } - } + pub fn has_columns(&self, id: Id) -> bool { + self.columns.contains_key(&id) } - // for Postgres so it can return the synthetic statement name instead of formatting twice - #[cfg(feature = "postgres")] - pub async fn map_or_compute(&mut self, query: &str, map: impl FnOnce(&Id) -> R, compute: impl FnOnce() -> Fut) - -> Result - where - Fut: Future> { + pub fn get(&self, query: &str) -> Option<&Id> { + self.statements.get(query) + } - match self.statements.entry(query.to_string()) { - Entry::Occupied(occupied) => Ok(map(occupied.get())), - Entry::Vacant(vacant) => { - let (id, ret) = compute().await?; - vacant.insert(id); - Ok(ret) - } - } + // It is a logical error to call this without first calling [put_columns] + pub fn get_columns(&self, id: Id) -> Arc, usize>> { + Arc::clone(&self.columns[&id]) + } + + pub fn put(&mut self, query: String, id: Id) { + self.statements.insert(query, id); + } + + pub fn put_columns(&mut self, id: Id, columns: HashMap, usize>) { + self.columns.insert(id, Arc::new(columns)); } } diff --git a/sqlx-core/src/connection.rs b/sqlx-core/src/connection.rs index 78d1ea7a..266791ca 100644 --- a/sqlx-core/src/connection.rs +++ b/sqlx-core/src/connection.rs @@ -1,7 +1,25 @@ +use crate::executor::Executor; +use crate::url::Url; use futures_core::future::BoxFuture; -use crate::Executor; +use futures_util::TryFutureExt; +use std::convert::TryInto; -pub trait Connection: Executor + Sized { - /// Gracefully close the connection. +/// Represents a single database connection rather than a pool of database connections. +/// +/// Prefer running queries from [Pool] unless there is a specific need for a single, continuous +/// connection. +pub trait Connection: Executor + Send + 'static { + /// Establish a new database connection. + fn open(url: T) -> BoxFuture<'static, crate::Result> + where + T: TryInto, + Self: Sized; + + /// Close this database connection. fn close(self) -> BoxFuture<'static, crate::Result<()>>; + + /// Verifies a connection to the database is still alive. + fn ping(&mut self) -> BoxFuture> { + Box::pin(self.execute("SELECT 1", Default::default()).map_ok(|_| ())) + } } diff --git a/sqlx-core/src/database.rs b/sqlx-core/src/database.rs new file mode 100644 index 00000000..878db648 --- /dev/null +++ b/sqlx-core/src/database.rs @@ -0,0 +1,19 @@ +use crate::arguments::Arguments; +use crate::connection::Connection; +use crate::row::Row; +use crate::types::HasTypeMetadata; + +/// A database driver. +/// +/// This trait encapsulates a complete driver implementation to a specific +/// database (e.g., MySQL, Postgres). +pub trait Database: HasTypeMetadata + 'static { + /// The concrete `Connection` implementation for this database. + type Connection: Connection; + + /// The concrete `Arguments` implementation for this database. + type Arguments: Arguments; + + /// The concrete `Row` implementation for this database. + type Row: Row; +} diff --git a/sqlx-core/src/decode.rs b/sqlx-core/src/decode.rs index a9efaf19..a46e3aa4 100644 --- a/sqlx-core/src/decode.rs +++ b/sqlx-core/src/decode.rs @@ -1,18 +1,86 @@ -//! Types and traits related to deserializing values from the database. -use crate::{backend::Backend, types::HasSqlType}; +//! Types and traits for decoding values from the database. -// TODO: Allow decode to return an error (that can be unified) +use std::error::Error as StdError; +use std::fmt::{self, Display}; -pub trait Decode { - fn decode(raw: Option<&[u8]>) -> Self; +use crate::database::Database; +use crate::types::HasSqlType; + +pub enum DecodeError { + /// An unexpected `NULL` was encountered while decoding. + UnexpectedNull, + + Message(Box), + + Other(Box), +} + +/// Decode a single value from the database. +pub trait Decode: Sized +where + DB: Database + ?Sized, +{ + fn decode(raw: &[u8]) -> Result; + + /// Creates a new value of this type from a `NULL` SQL value. + /// + /// The default implementation returns [DecodeError::UnexpectedNull]. + fn decode_null() -> Result { + return Err(DecodeError::UnexpectedNull); + } + + fn decode_nullable(raw: Option<&[u8]>) -> Result { + if let Some(raw) = raw { + Self::decode(raw) + } else { + Self::decode_null() + } + } } impl Decode for Option where - DB: Backend + HasSqlType, + DB: Database + HasSqlType, T: Decode, { - fn decode(raw: Option<&[u8]>) -> Self { - Some(T::decode(Some(raw?))) + fn decode(buf: &[u8]) -> Result { + T::decode(buf).map(Some) + } + + fn decode_null() -> Result { + Ok(None) + } +} + +impl fmt::Debug for DecodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("DecodeError(")?; + + match self { + DecodeError::UnexpectedNull => write!(f, "unexpected null for non-null column")?, + DecodeError::Message(err) => write!(f, "{}", err)?, + DecodeError::Other(err) => write!(f, "{:?}", err)?, + } + + f.write_str(")") + } +} + +impl fmt::Display for DecodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + DecodeError::UnexpectedNull => f.write_str("unexpected null for non-null column"), + DecodeError::Message(err) => write!(f, "{}", err), + DecodeError::Other(err) => write!(f, "{}", err), + } + } +} + +impl From for DecodeError +where + E: StdError + Send + Sync + 'static, +{ + fn from(err: E) -> DecodeError { + DecodeError::Other(Box::new(err)) } } diff --git a/sqlx-core/src/describe.rs b/sqlx-core/src/describe.rs index 92b5eb10..dd8ea043 100644 --- a/sqlx-core/src/describe.rs +++ b/sqlx-core/src/describe.rs @@ -1,46 +1,59 @@ -use crate::Backend; +//! Types for returning SQL type information about queries. use crate::types::HasTypeMetadata; +use crate::Database; +use std::fmt::{self, Debug}; -use std::fmt; +/// The return type of [Executor::describe]. +pub struct Describe +where + DB: Database + ?Sized, +{ + /// The expected types for the parameters of the query. + pub param_types: Box<[::TypeId]>, -/// The result of running prepare + describe for the given backend. -pub struct Describe { - /// The expected type IDs of bind parameters. - pub param_types: Vec<::TypeId>, - /// - pub result_fields: Vec>, - pub(crate) _backcompat: (), + /// The type and table information, if any for the results of the query. + pub result_columns: Box<[Column]>, + + // TODO: Remove and use #[non_exhaustive] when we can + pub(crate) _non_exhaustive: (), } -impl fmt::Debug for Describe +impl Debug for Describe where - ::TypeId: fmt::Debug, - ResultField: fmt::Debug, + DB: Database, + ::TypeId: Debug, + Column: Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Describe") .field("param_types", &self.param_types) - .field("result_fields", &self.result_fields) + .field("result_columns", &self.result_columns) .finish() } } -pub struct ResultField { - pub name: Option, - pub table_id: Option<::TableIdent>, - /// The type ID of this result column. +/// A single column of a result set. +pub struct Column +where + DB: Database + ?Sized, +{ + pub name: Option>, + pub table_id: Option<::TableId>, pub type_id: ::TypeId, - pub(crate) _backcompat: (), + + // TODO: Remove and use #[non_exhaustive] when we can + pub(crate) _non_exhaustive: (), } -impl fmt::Debug for ResultField +impl Debug for Column where - ::TableIdent: fmt::Debug, - ::TypeId: fmt::Debug, + DB: Database + ?Sized, + ::TableId: Debug, + ::TypeId: Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("ResultField") + f.debug_struct("Column") .field("name", &self.name) .field("table_id", &self.table_id) .field("type_id", &self.type_id) diff --git a/sqlx-core/src/encode.rs b/sqlx-core/src/encode.rs index 0367f8c5..b3ab8cad 100644 --- a/sqlx-core/src/encode.rs +++ b/sqlx-core/src/encode.rs @@ -1,72 +1,82 @@ -//! Types and traits related to serializing values for the database. -use crate::{backend::Backend, types::HasSqlType}; +//! Types and traits for encoding values to the database. +use crate::database::Database; +use crate::types::HasSqlType; use std::mem; -/// Annotates the result of [Encode] to differentiate between an empty value and a null value. +/// The return type of [Encode::encode]. pub enum IsNull { - /// The value was null (and no data was written to the buffer). + /// The value is null; no data was written. Yes, - /// The value was not null. + /// The value is not null. /// - /// This does not necessarily mean that any data was written to the buffer. + /// This does not mean that data was written. No, } -/// Serializes a single value to be sent to the database. -/// -/// The data must be written to the buffer in the expected format -/// for the given backend. -/// -/// When possible, implementations of this trait should prefer using an -/// existing implementation, rather than writing to `buf` directly. -pub trait Encode { - /// Writes the value of `self` into `buf` as the expected format - /// for the given backend. - /// - /// The return value indicates if this value should be represented as `NULL`. - /// If this is the case, implementations **must not** write anything to `out`. - fn encode(&self, buf: &mut Vec) -> IsNull; +/// Encode a single value to be sent to the database. +pub trait Encode +where + DB: Database + ?Sized, +{ + /// Writes the value of `self` into `buf` in the expected format for the database. + fn encode(&self, buf: &mut Vec); + + fn encode_nullable(&self, buf: &mut Vec) -> IsNull { + self.encode(buf); + + IsNull::No + } - /// Calculate the number of bytes this type will use when encoded. fn size_hint(&self) -> usize { mem::size_of_val(self) } } -/// [Encode] is implemented for `Option` where `T` implements `Encode`. An `Option` -/// represents a nullable SQL value. -impl Encode for Option -where - DB: Backend + HasSqlType, - T: Encode, -{ - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - if let Some(self_) = self { - self_.encode(buf) - } else { - IsNull::Yes - } - } - - fn size_hint(&self) -> usize { - if self.is_some() { mem::size_of::() } else { 0 } - } -} - impl Encode for &'_ T where - DB: Backend + HasSqlType, + DB: Database + HasSqlType, T: Encode, { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { + fn encode(&self, buf: &mut Vec) { (*self).encode(buf) } + fn encode_nullable(&self, buf: &mut Vec) -> IsNull { + (*self).encode_nullable(buf) + } + fn size_hint(&self) -> usize { (*self).size_hint() } } + +impl Encode for Option +where + DB: Database + HasSqlType, + T: Encode, +{ + fn encode(&self, buf: &mut Vec) { + // Forward to [encode_nullable] and ignore the result + let _ = self.encode_nullable(buf); + } + + fn encode_nullable(&self, buf: &mut Vec) -> IsNull { + if let Some(self_) = self { + self_.encode(buf); + + IsNull::No + } else { + IsNull::Yes + } + } + + fn size_hint(&self) -> usize { + if self.is_some() { + mem::size_of::() + } else { + 0 + } + } +} diff --git a/sqlx-core/src/error.rs b/sqlx-core/src/error.rs index 08c59e53..8a458c94 100644 --- a/sqlx-core/src/error.rs +++ b/sqlx-core/src/error.rs @@ -1,49 +1,51 @@ -use std::{ - error::Error as StdError, - fmt::{self, Debug, Display}, - io, -}; +//! Error and Result types. -use async_std::future::TimeoutError; +use crate::decode::DecodeError; +use std::error::Error as StdError; +use std::fmt::{self, Debug, Display}; +use std::io; -/// A convenient Result instantiation appropriate for SQLx. -pub type Result = std::result::Result; +/// A specialized `Result` type for SQLx. +pub type Result = std::result::Result; /// A generic error that represents all the ways a method can fail inside of SQLx. #[derive(Debug)] pub enum Error { - /// Error communicating with the database backend. - /// - /// Some reasons for this to be caused: - /// - /// - [io::ErrorKind::ConnectionRefused] - Database backend is most likely behind a firewall. - /// - /// - [io::ErrorKind::ConnectionReset] - Database backend dropped the client connection (perhaps from an administrator action). + /// Error communicating with the database. Io(io::Error), - /// An error was returned by the database backend. - Database(Box), + /// Connection URL was malformed. + UrlParse(url::ParseError), - /// No rows were returned by a query expected to return at least one row. + /// An error was returned by the database. + Database(Box), + + /// No rows were returned by a query that expected to return at least one row. NotFound, - /// More than one row was returned by a query expected to return exactly one row. + /// More than one row was returned by a query that expected to return exactly one row. FoundMoreThanOne, - /// Unexpected or invalid data was encountered. This would indicate that we received data that we were not - /// expecting or it was in a format we did not understand. This generally means either there is a programming error in a SQLx driver or - /// something with the connection or the database backend itself is corrupted. + /// Column was not found in Row during [Row::try_get]. + ColumnNotFound(Box), + + /// Unexpected or invalid data was encountered. This would indicate that we received + /// data that we were not expecting or it was in a format we did not understand. This + /// generally means either there is a programming error in a SQLx driver or + /// something with the connection or the database database itself is corrupted. /// /// Context is provided by the included error message. Protocol(Box), - /// A `Pool::acquire()` timed out due to connections not becoming available or + /// A [Pool::acquire] timed out due to connections not becoming available or /// because another task encountered too many errors while trying to open a new connection. - TimedOut, + PoolTimedOut, - /// `Pool::close()` was called while we were waiting in `Pool::acquire()`. + /// [Pool::close] was called while we were waiting in [Pool::acquire]. PoolClosed, + Decode(DecodeError), + // TODO: Remove and replace with `#[non_exhaustive]` when possible #[doc(hidden)] __Nonexhaustive, @@ -54,6 +56,10 @@ impl StdError for Error { match self { Error::Io(error) => Some(error), + Error::UrlParse(error) => Some(error), + + Error::Decode(DecodeError::Other(error)) => Some(&**error), + _ => None, } } @@ -64,17 +70,25 @@ impl Display for Error { match self { Error::Io(error) => write!(f, "{}", error), + Error::UrlParse(error) => write!(f, "{}", error), + + Error::Decode(error) => write!(f, "{}", error), + Error::Database(error) => Display::fmt(error, f), Error::NotFound => f.write_str("found no rows when we expected at least one"), + Error::ColumnNotFound(ref name) => { + write!(f, "no column found with the name {:?}", name) + } + Error::FoundMoreThanOne => { f.write_str("found more than one row when we expected exactly one") } Error::Protocol(ref err) => f.write_str(err), - Error::TimedOut => f.write_str("timed out while waiting for an open connection"), + Error::PoolTimedOut => f.write_str("timed out while waiting for an open connection"), Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"), @@ -90,9 +104,24 @@ impl From for Error { } } -impl From for Error { - fn from(_: TimeoutError) -> Self { - Error::TimedOut +impl From for Error { + #[inline] + fn from(err: io::ErrorKind) -> Self { + Error::Io(err.into()) + } +} + +impl From for Error { + #[inline] + fn from(err: DecodeError) -> Self { + Error::Decode(err) + } +} + +impl From for Error { + #[inline] + fn from(err: url::ParseError) -> Self { + Error::UrlParse(err) } } @@ -113,9 +142,20 @@ where } } -/// An error that was returned by the database backend. +/// An error that was returned by the database. pub trait DatabaseError: Display + Debug + Send + Sync { + /// The primary, human-readable error message. fn message(&self) -> &str; + + fn details(&self) -> Option<&str>; + + fn hint(&self) -> Option<&str>; + + fn table_name(&self) -> Option<&str>; + + fn column_name(&self) -> Option<&str>; + + fn constraint_name(&self) -> Option<&str>; } /// Used by the `protocol_error!()` macro for a lazily evaluated conversion to @@ -124,6 +164,7 @@ pub(crate) struct ProtocolError<'a> { pub args: fmt::Arguments<'a>, } +#[cfg(any(feature = "mysql", feature = "postgres"))] macro_rules! protocol_err ( ($($args:tt)*) => { $crate::error::ProtocolError { args: format_args!($($args)*) } diff --git a/sqlx-core/src/executor.rs b/sqlx-core/src/executor.rs index 3e137432..0f50dac3 100644 --- a/sqlx-core/src/executor.rs +++ b/sqlx-core/src/executor.rs @@ -1,81 +1,60 @@ -use crate::{ - backend::Backend, - describe::Describe, - error::Error, - params::{IntoQueryParameters, QueryParameters}, - row::FromRow, -}; -use futures_core::{future::BoxFuture, stream::BoxStream}; -use futures_util::{TryFutureExt, TryStreamExt}; +use crate::database::Database; +use crate::describe::Describe; +use futures_core::future::BoxFuture; +use futures_core::stream::BoxStream; +use futures_util::TryStreamExt; -pub trait Executor: Send { - type Backend: Backend; +/// Encapsulates query execution on the database. +/// +/// Implemented by [Pool], [Connection], and [Transaction]. +pub trait Executor { + type Database: Database + ?Sized; - /// Verifies a connection to the database is still alive. - fn ping<'e>(&'e mut self) -> BoxFuture<'e, crate::Result<()>> { - Box::pin( - self.execute( - "SELECT 1", - Default::default(), - ) - .map_ok(|_| ()), - ) - } + /// Send a raw SQL command to the database. + /// + /// This is intended for queries that cannot or should not be prepared (ex. `BEGIN`). + /// + /// Does not support fetching results. + fn send<'e, 'q: 'e>(&'e mut self, command: &'q str) -> BoxFuture<'e, crate::Result<()>>; + /// Execute the query, returning the number of rows affected. fn execute<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: ::QueryParameters, + args: ::Arguments, ) -> BoxFuture<'e, crate::Result>; - fn fetch<'e, 'q: 'e, T: 'e>( + /// Executes the query and returns a [Stream] of [Row]. + fn fetch<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: ::QueryParameters, - ) -> BoxStream<'e, crate::Result> - where - T: FromRow + Send + Unpin; + args: ::Arguments, + ) -> BoxStream<'e, crate::Result<::Row>>; - fn fetch_all<'e, 'q: 'e, T: 'e>( + /// Executes the query and returns up to resulting record. + /// * `Error::FoundMoreThanOne` will be returned if the query produced more than 1 row. + fn fetch_optional<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: ::QueryParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send + Unpin, - { - Box::pin(self.fetch(query, params).try_collect()) + args: ::Arguments, + ) -> BoxFuture<'e, crate::Result::Row>>> { + let mut s = self.fetch(query, args); + Box::pin(async move { s.try_next().await }) } - fn fetch_optional<'e, 'q: 'e, T: 'e>( + /// Execute the query and return at most one resulting record. + fn fetch_one<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: ::QueryParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send; - - fn fetch_one<'e, 'q: 'e, T: 'e>( - &'e mut self, - query: &'q str, - params: ::QueryParameters, - ) -> BoxFuture<'e, crate::Result> - where - T: FromRow + Send, - { - let fut = self.fetch_optional(query, params); - Box::pin(async move { fut.await?.ok_or(Error::NotFound) }) + args: ::Arguments, + ) -> BoxFuture<'e, crate::Result<::Row>> { + let mut s = self.fetch(query, args); + Box::pin(async move { s.try_next().await?.ok_or(crate::Error::NotFound) }) } - /// Analyze the SQL statement and report the inferred bind parameter types and returned - /// columns. + /// Analyze the SQL query and report the inferred bind parameter types and returned columns. fn describe<'e, 'q: 'e>( &'e mut self, query: &'q str, - ) -> BoxFuture<'e, crate::Result>>; - - /// Send a semicolon-delimited series of arbitrary SQL commands to the server. - /// - /// Does not support fetching results. - fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>>; + ) -> BoxFuture<'e, crate::Result>>; } diff --git a/sqlx-core/src/io/buf.rs b/sqlx-core/src/io/buf.rs index 424f7c07..e80fe446 100644 --- a/sqlx-core/src/io/buf.rs +++ b/sqlx-core/src/io/buf.rs @@ -5,6 +5,8 @@ use std::{io, slice, str}; pub trait Buf { fn advance(&mut self, cnt: usize); + fn get_uint(&mut self, n: usize) -> io::Result; + fn get_u8(&mut self) -> io::Result; fn get_u16(&mut self) -> io::Result; @@ -22,6 +24,8 @@ pub trait Buf { fn get_str(&mut self, len: usize) -> io::Result<&str>; fn get_str_nul(&mut self) -> io::Result<&str>; + + fn get_bytes(&mut self, len: usize) -> io::Result<&[u8]>; } impl<'a> Buf for &'a [u8] { @@ -29,9 +33,15 @@ impl<'a> Buf for &'a [u8] { *self = &self[cnt..]; } + fn get_uint(&mut self, n: usize) -> io::Result { + let val = T::read_uint(*self, n); + self.advance(n); + + Ok(val) + } + fn get_u8(&mut self) -> io::Result { let val = self[0]; - self.advance(1); Ok(val) @@ -51,16 +61,16 @@ impl<'a> Buf for &'a [u8] { Ok(val) } - fn get_i32(&mut self) -> io::Result { - let val = T::read_i32(*self); - self.advance(4); + fn get_u24(&mut self) -> io::Result { + let val = T::read_u24(*self); + self.advance(3); Ok(val) } - fn get_u24(&mut self) -> io::Result { - let val = T::read_u24(*self); - self.advance(3); + fn get_i32(&mut self) -> io::Result { + let val = T::read_i32(*self); + self.advance(4); Ok(val) } @@ -80,15 +90,8 @@ impl<'a> Buf for &'a [u8] { } fn get_str(&mut self, len: usize) -> io::Result<&str> { - let buf = &self[..len]; - - self.advance(len); - - if cfg!(debug_asserts) { - str::from_utf8(buf).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) - } else { - Ok(unsafe { str::from_utf8_unchecked(buf) }) - } + str::from_utf8(self.get_bytes(len)?) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) } fn get_str_nul(&mut self) -> io::Result<&str> { @@ -97,6 +100,13 @@ impl<'a> Buf for &'a [u8] { Ok(s) } + + fn get_bytes(&mut self, len: usize) -> io::Result<&[u8]> { + let buf = &self[..len]; + self.advance(len); + + Ok(buf) + } } pub trait ToBuf { @@ -104,9 +114,13 @@ pub trait ToBuf { } impl ToBuf for [u8] { - fn to_buf(&self) -> &[u8] { self } + fn to_buf(&self) -> &[u8] { + self + } } impl ToBuf for u8 { - fn to_buf(&self) -> &[u8] { slice::from_ref(self) } + fn to_buf(&self) -> &[u8] { + slice::from_ref(self) + } } diff --git a/sqlx-core/src/io/buf_mut.rs b/sqlx-core/src/io/buf_mut.rs index 486e7c89..8ebe8891 100644 --- a/sqlx-core/src/io/buf_mut.rs +++ b/sqlx-core/src/io/buf_mut.rs @@ -18,6 +18,10 @@ pub trait BufMut { fn put_u64(&mut self, val: u64); + fn put_bytes(&mut self, val: &[u8]); + + fn put_str(&mut self, val: &str); + fn put_str_nul(&mut self, val: &str); } @@ -30,18 +34,18 @@ impl BufMut for Vec { self.push(val); } - fn put_i16(&mut self, val: i16) { - let mut buf = [0; 2]; - T::write_i16(&mut buf, val); - self.extend_from_slice(&buf); - } - fn put_u16(&mut self, val: u16) { let mut buf = [0; 2]; T::write_u16(&mut buf, val); self.extend_from_slice(&buf); } + fn put_i16(&mut self, val: i16) { + let mut buf = [0; 2]; + T::write_i16(&mut buf, val); + self.extend_from_slice(&buf); + } + fn put_u24(&mut self, val: u32) { let mut buf = [0; 3]; T::write_u24(&mut buf, val); @@ -66,8 +70,16 @@ impl BufMut for Vec { self.extend_from_slice(&buf); } - fn put_str_nul(&mut self, val: &str) { + fn put_bytes(&mut self, val: &[u8]) { + self.extend_from_slice(val); + } + + fn put_str(&mut self, val: &str) { self.extend_from_slice(val.as_bytes()); + } + + fn put_str_nul(&mut self, val: &str) { + self.put_str(val); self.push(0); } } diff --git a/sqlx-core/src/io/buf_stream.rs b/sqlx-core/src/io/buf_stream.rs index 04e74407..f5607925 100644 --- a/sqlx-core/src/io/buf_stream.rs +++ b/sqlx-core/src/io/buf_stream.rs @@ -2,7 +2,6 @@ use async_std::io::{ prelude::{ReadExt, WriteExt}, Read, Write, }; -use std::mem::MaybeUninit; use std::io; pub struct BufStream { @@ -66,7 +65,9 @@ where // If we have enough bytes in our read buffer, // return immediately if self.rbuf_windex >= (self.rbuf_rindex + cnt) { - return Ok(Some(&self.rbuf[self.rbuf_rindex..(self.rbuf_rindex + cnt)])); + let buf = &self.rbuf[self.rbuf_rindex..(self.rbuf_rindex + cnt)]; + + return Ok(Some(buf)); } // If we are out of space to write to in the read buffer, diff --git a/sqlx-core/src/io/mod.rs b/sqlx-core/src/io/mod.rs index 922a1af0..2995d857 100644 --- a/sqlx-core/src/io/mod.rs +++ b/sqlx-core/src/io/mod.rs @@ -5,4 +5,24 @@ mod buf; mod buf_mut; mod byte_str; -pub use self::{buf::{Buf, ToBuf}, buf_mut::BufMut, buf_stream::BufStream, byte_str::ByteStr}; +pub use self::{ + buf::{Buf, ToBuf}, + buf_mut::BufMut, + buf_stream::BufStream, + byte_str::ByteStr, +}; + +#[cfg(test)] +#[doc(hidden)] +macro_rules! bytes ( + ($($b: expr), *) => {{ + use $crate::io::ToBuf; + + let mut buf = Vec::new(); + $( + buf.extend_from_slice($b.to_buf()); + )* + + buf + }} +); diff --git a/sqlx-core/src/lib.rs b/sqlx-core/src/lib.rs index 9691d3c3..6d28af68 100644 --- a/sqlx-core/src/lib.rs +++ b/sqlx-core/src/lib.rs @@ -1,73 +1,60 @@ #![recursion_limit = "256"] -#![allow(unused_imports)] - -#[macro_use] -mod macros; +#![deny(unsafe_code)] #[macro_use] pub mod error; -#[cfg(any(feature = "postgres", feature = "mysql"))] +#[cfg(any(feature = "mysql", feature = "postgres"))] #[macro_use] mod io; -mod backend; -pub mod decode; - -#[cfg(any(feature = "postgres", feature = "mysql"))] -mod url; - -#[macro_use] -mod row; - -mod connection; -mod executor; -mod pool; - -#[macro_use] -pub mod params; - -pub mod encode; -mod query; -pub mod types; - -mod describe; - +#[cfg(any(feature = "mysql", feature = "postgres"))] mod cache; -#[doc(inline)] -pub use self::{ - backend::Backend, - connection::Connection, - decode::Decode, - encode::Encode, - error::{Error, Result}, - executor::Executor, - pool::Pool, - query::{query, Query}, - row::{FromRow, Row}, - types::HasSqlType, -}; +mod connection; +mod database; +mod executor; +mod query; +mod url; -#[doc(hidden)] -pub use types::HasTypeMetadata; +pub mod arguments; +pub mod decode; +pub mod describe; +pub mod encode; +pub mod pool; +pub mod types; -#[doc(hidden)] -pub use describe::{Describe, ResultField}; +#[macro_use] +pub mod row; #[cfg(feature = "mysql")] pub mod mysql; +#[cfg(feature = "postgres")] +pub mod postgres; + +pub use database::Database; + +#[doc(inline)] +pub use error::{Error, Result}; + +pub use connection::Connection; +pub use executor::Executor; +pub use query::{query, Query}; + +#[doc(inline)] +pub use pool::Pool; + +#[doc(inline)] +pub use row::{FromRow, Row}; + #[cfg(feature = "mysql")] #[doc(inline)] pub use mysql::MySql; -#[cfg(feature = "postgres")] -pub mod postgres; - #[cfg(feature = "postgres")] #[doc(inline)] -pub use self::postgres::Postgres; +pub use postgres::Postgres; use std::marker::PhantomData; diff --git a/sqlx-core/src/macros.rs b/sqlx-core/src/macros.rs deleted file mode 100644 index e13567c7..00000000 --- a/sqlx-core/src/macros.rs +++ /dev/null @@ -1,14 +0,0 @@ -#[cfg(test)] -#[doc(hidden)] -#[macro_export] -macro_rules! __bytes_builder ( - ($($b: expr), *) => {{ - use $crate::io::ToBuf; - - let mut buf = Vec::new(); - $( - buf.extend_from_slice($b.to_buf()); - )* - buf - }} -); diff --git a/sqlx-core/src/mysql/arguments.rs b/sqlx-core/src/mysql/arguments.rs new file mode 100644 index 00000000..3cb1bd42 --- /dev/null +++ b/sqlx-core/src/mysql/arguments.rs @@ -0,0 +1,51 @@ +use crate::arguments::Arguments; +use crate::encode::{Encode, IsNull}; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +#[derive(Default)] +pub struct MySqlArguments { + pub(crate) param_types: Vec, + pub(crate) params: Vec, + pub(crate) null_bitmap: Vec, +} + +impl Arguments for MySqlArguments { + type Database = MySql; + + fn len(&self) -> usize { + self.param_types.len() + } + + fn size(&self) -> usize { + self.params.len() + } + + fn reserve(&mut self, len: usize, size: usize) { + self.param_types.reserve(len); + self.params.reserve(size); + + // ensure we have enough size in the bitmap to hold at least `len` extra bits + // the second `& 7` gives us 0 spare bits when param_types.len() is a multiple of 8 + let spare_bits = (8 - (self.param_types.len()) & 7) & 7; + // ensure that if there are no spare bits left, `len = 1` reserves another byte + self.null_bitmap.reserve((len + 7 - spare_bits) / 8); + } + + fn add(&mut self, value: T) + where + Self::Database: HasSqlType, + T: Encode, + { + let metadata = >::metadata(); + let index = self.param_types.len(); + + self.param_types.push(metadata); + self.null_bitmap.resize((index / 8) + 1, 0); + + if let IsNull::Yes = value.encode_nullable(&mut self.params) { + self.null_bitmap[index / 8] &= (1 << index % 8) as u8; + } + } +} diff --git a/sqlx-core/src/mysql/backend.rs b/sqlx-core/src/mysql/backend.rs deleted file mode 100644 index db325074..00000000 --- a/sqlx-core/src/mysql/backend.rs +++ /dev/null @@ -1,34 +0,0 @@ -use futures_core::{future::BoxFuture, stream::BoxStream}; - -use crate::{ - backend::Backend, - describe::{Describe, ResultField}, - mysql::{protocol::ResultRow, query::MySqlDbParameters}, - url::Url, -}; - -use super::{Connection, RawConnection}; -use super::MySql; -use crate::cache::StatementCache; - -impl Backend for MySql { - type Connection = Connection; - type QueryParameters = MySqlDbParameters; - type Row = ResultRow; - type TableIdent = String; - - fn connect(url: &str) -> BoxFuture<'static, crate::Result> { - let url = Url::parse(url); - - Box::pin(async move { - let url = url?; - Ok(Connection { - conn: RawConnection::open(url).await?, - cache: StatementCache::new(), - }) - }) - } -} - -impl_from_row_for_backend!(MySql, ResultRow); -impl_into_query_parameters_for_backend!(MySql); diff --git a/sqlx-core/src/mysql/connection.rs b/sqlx-core/src/mysql/connection.rs index f7c35e1b..3bb6c0c5 100644 --- a/sqlx-core/src/mysql/connection.rs +++ b/sqlx-core/src/mysql/connection.rs @@ -1,121 +1,41 @@ -use std::{ - io, - net::{IpAddr, SocketAddr}, -}; -use std::net::Shutdown; +use std::convert::TryInto; +use std::io; -use async_std::net::TcpStream; +use async_std::net::{Shutdown, TcpStream}; use byteorder::{ByteOrder, LittleEndian}; -use futures_util::AsyncWriteExt; +use futures_core::future::BoxFuture; -use crate::{Describe, Error, io::{Buf, BufMut, BufStream}, mysql::{ - protocol::{ - Capabilities, ColumnCountPacket, ColumnDefinitionPacket, ComPing, ComQuit, - ComSetOption, ComStmtExecute, - ComStmtPrepare, ComStmtPrepareOk, Encode, EofPacket, ErrPacket, OkPacket, - ResultRow, SetOptionOptions, StmtExecFlag, - }, - query::MySqlDbParameters, -}, Result, ResultField, url::Url}; -use crate::mysql::MySql; -use crate::mysql::protocol::ComQuery; +use crate::cache::StatementCache; +use crate::connection::Connection; +use crate::io::{Buf, BufMut, BufStream, ByteStr}; +use crate::mysql::error::MySqlError; +use crate::mysql::protocol::{ + Capabilities, Decode, Encode, EofPacket, ErrPacket, Handshake, HandshakeResponse, OkPacket, +}; +use crate::url::Url; -use super::establish; +pub struct MySqlConnection { + pub(super) stream: BufStream, -pub type StatementId = u32; + pub(super) capabilities: Capabilities, + + pub(super) statement_cache: StatementCache, + + rbuf: Vec, -pub struct Connection { - pub(crate) stream: BufStream, - pub(crate) rbuf: Vec, - pub(crate) capabilities: Capabilities, next_seq_no: u8, + + pub(super) ready: bool, } -impl Connection { - pub async fn open(url: Url) -> Result { - // TODO: Handle errors - let host = url.host(); - let port = url.port(3306); - - // TODO: handle errors - let host: IpAddr = host.parse().unwrap(); - let addr: SocketAddr = (host, port).into(); - - let stream = TcpStream::connect(&addr).await?; - - let mut conn = Self { - stream: BufStream::new(stream), - rbuf: Vec::with_capacity(8 * 1024), - capabilities: Capabilities::empty(), - next_seq_no: 0, - }; - - establish::establish(&mut conn, &url).await?; - - Ok(conn) - } - - pub async fn close(mut self) -> Result<()> { - // Send the quit command - - self.start_sequence(); - self.write(ComQuit); - - self.stream.flush().await?; - self.stream.stream.shutdown(Shutdown::Both)?; - - Ok(()) - } - - pub async fn ping(&mut self) -> Result<()> { - // Send the ping command and wait for (and drop) an OK packet - - self.start_sequence(); - self.write(ComPing); - - self.stream.flush().await?; - - let _ = self.receive_ok_or_err().await?; - - Ok(()) - } - - pub(crate) async fn receive(&mut self) -> Result<&[u8]> { - Ok(self - .try_receive() - .await? - .ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))?) - } - - async fn try_receive(&mut self) -> Result> { - // Read the packet header which contains the length and the sequence number - // https://mariadb.com/kb/en/library/0-packet/#standard-packet - let mut header = ret_if_none!(self.stream.peek(4).await?); - let len = header.get_u24::()? as usize; - self.next_seq_no = header.get_u8()? + 1; - self.stream.consume(4); - - // Read the packet body and copy it into our internal buf - // We must have a separate buffer around the stream as we can't operate directly - // on bytes returend from the stream. We have compression, split, etc. to - // unpack. - let body = ret_if_none!(self.stream.peek(len).await?); - self.rbuf.clear(); - self.rbuf.extend_from_slice(body); - self.stream.consume(len); - - Ok(Some(&self.rbuf[..len])) - } - - pub(super) fn start_sequence(&mut self) { - // At the start of a command sequence we reset our understanding - // of [next_seq_no]. In a sequence our initial command must be 0, followed - // by the server response that is 1, then our response to that response (if any), - // would be 2 +impl MySqlConnection { + pub(super) fn begin_command_phase(&mut self) { + // At the start of the *command phase*, the sequence ID sent from the client + // must be 0 self.next_seq_no = 0; } - pub(crate) fn write(&mut self, packet: T) { + pub(super) fn write(&mut self, packet: impl Encode + std::fmt::Debug) { let buf = self.stream.buffer_mut(); // Allocate room for the header that we write after the packet; @@ -137,19 +57,16 @@ impl Connection { // Take the last sequence number received, if any, and increment by 1 // If there was no sequence number, we only increment if we split packets header[3] = self.next_seq_no; - self.next_seq_no += 1; + self.next_seq_no = self.next_seq_no.wrapping_add(1); } - // Decode an OK packet or bubble an ERR packet as an error - // to terminate immediately - pub(crate) async fn receive_ok_or_err(&mut self) -> Result { - let capabilities = self.capabilities; - let buf = self.receive().await?; - Ok(match buf[0] { - 0xfe | 0x00 => OkPacket::decode(buf, capabilities)?, + async fn receive_ok(&mut self) -> crate::Result { + let packet = self.receive().await?; + Ok(match packet[0] { + 0xfe | 0x00 => OkPacket::decode(packet)?, 0xff => { - return ErrPacket::decode(buf)?.expect_error(); + return Err(MySqlError(ErrPacket::decode(packet)?).into()); } id => { @@ -163,185 +80,124 @@ impl Connection { }) } - async fn check_eof(&mut self) -> Result<()> { + pub(super) async fn receive_eof(&mut self) -> crate::Result<()> { // When (legacy) EOFs are enabled, the fixed number column definitions are further // terminated by an EOF packet - if !self - .capabilities - .contains(Capabilities::CLIENT_DEPRECATE_EOF) - { + if !self.capabilities.contains(Capabilities::DEPRECATE_EOF) { let _eof = EofPacket::decode(self.receive().await?)?; } Ok(()) } - async fn send_prepare<'c>( - &'c mut self, - statement: &'c str, - ) -> Result { - self.stream.flush().await?; - - self.start_sequence(); - self.write(ComStmtPrepare { statement }); - - self.stream.flush().await?; - - // COM_STMT_PREPARE returns COM_STMT_PREPARE_OK (0x00) or ERR (0xFF) - let packet = self.receive().await?; - - if packet[0] == 0xFF { - return ErrPacket::decode(packet)?.expect_error(); - } - - let ok = ComStmtPrepareOk::decode(packet)?; - - Ok(ok) + pub(super) async fn receive(&mut self) -> crate::Result<&[u8]> { + Ok(self + .try_receive() + .await? + .ok_or(io::ErrorKind::UnexpectedEof)?) } - // MySQL/Mysql responds with statement metadata for every PREPARE command - // sometimes we care, sometimes we don't - pub(super) async fn prepare_ignore_describe(&mut self, statement: &str) -> Result { - let ok = self.send_prepare(statement).await?; + pub(super) async fn try_receive(&mut self) -> crate::Result> { + self.rbuf.clear(); - if ok.params > 0 { - // Input parameters - for _ in 0..ok.params { - // TODO: Maybe do something with this data ? - let _column = ColumnDefinitionPacket::decode(self.receive().await?)?; - } + // Read the packet header which contains the length and the sequence number + // https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_packets.html + // https://mariadb.com/kb/en/library/0-packet/#standard-packet + let mut header = ret_if_none!(self.stream.peek(4).await?); + let payload_len = header.get_uint::(3)? as usize; + self.next_seq_no = header.get_u8()?.wrapping_add(1); + self.stream.consume(4); - self.check_eof().await?; - } + // Read the packet body and copy it into our internal buf + // We must have a separate buffer around the stream as we can't operate directly + // on bytes returned from the stream. We have various kinds of payload manipulation + // that must be handled before decoding. + let mut payload = ret_if_none!(self.stream.peek(payload_len).await?); + self.rbuf.extend_from_slice(payload); + self.stream.consume(payload_len); - if ok.columns > 0 { - // Output parameters - for _ in 0..ok.columns { - // TODO: Maybe do something with this data ? - let _column = ColumnDefinitionPacket::decode(self.receive().await?)?; - } + // TODO: Implement packet compression + // TODO: Implement packet joining - self.check_eof().await?; - } - - Ok(ok.statement_id) + Ok(Some(&self.rbuf[..payload_len])) } +} - pub(super) async fn prepare_describe(&mut self, statement: &str) -> Result> { - let ok = self.send_prepare(statement).await?; +impl MySqlConnection { + // TODO: Authentication ?! + async fn open(url: crate::Result) -> crate::Result { + let url = url?; + let stream = TcpStream::connect((url.host(), url.port(3306))).await?; - let mut param_types = Vec::with_capacity(ok.params as usize); - let mut result_fields= Vec::with_capacity(ok.columns as usize); + let mut self_ = Self { + stream: BufStream::new(stream), + capabilities: Capabilities::empty(), + rbuf: Vec::with_capacity(8192), + next_seq_no: 0, + statement_cache: StatementCache::new(), + ready: true, + }; - // Input parameters - for _ in 0..ok.params { - let param = ColumnDefinitionPacket::decode(self.receive().await?)?; - param_types.push(param.field_type.0); - } + // https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_connection_phase.html + // https://mariadb.com/kb/en/connection/ - self.check_eof().await?; + // First, we receive the Handshake - // Output parameters - for _ in 0..ok.columns { - let column = ColumnDefinitionPacket::decode(self.receive().await?)?; - result_fields.push(ResultField { - name: column.column_alias.or(column.column), - table_id: column.table_alias.or(column.table), - type_id: column.field_type.0, - _backcompat: () - }); - } + let handshake_packet = self_.receive().await?; + let handshake = Handshake::decode(handshake_packet)?; - self.check_eof().await?; + // TODO: Capabilities::SECURE_CONNECTION + // TODO: Capabilities::CONNECT_ATTRS + // TODO: Capabilities::PLUGIN_AUTH + // TODO: Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA + // TODO: Capabilities::TRANSACTIONS + // TODO: Capabilities::CLIENT_DEPRECATE_EOF + // TODO: Capabilities::COMPRESS + // TODO: Capabilities::ZSTD_COMPRESSION_ALGORITHM + let client_capabilities = Capabilities::PROTOCOL_41 + | Capabilities::IGNORE_SPACE + | Capabilities::FOUND_ROWS + | Capabilities::CONNECT_WITH_DB; - Ok(Describe { - param_types, - result_fields, - _backcompat: (), - }) - } + // Fails if [Capabilities::PROTOCOL_41] is not in [server_capabilities] + self_.capabilities = + (client_capabilities & handshake.server_capabilities) | Capabilities::PROTOCOL_41; - pub(super) async fn result_column_defs(&mut self) -> Result> { - let packet = self.receive().await?; + // Next we send the response - // A Resultset starts with a [ColumnCountPacket] which is a single field that encodes - // how many columns we can expect when fetching rows from this statement - - if packet[0] == 255 { - ErrPacket::decode(packet)?.expect_error()?; - } - - let column_count: u64 = ColumnCountPacket::decode(packet)?.columns; - - // Next we have a [ColumnDefinitionPacket] which verbosely explains each minute - // detail about the column in question including table, aliasing, and type - // TODO: This information was *already* returned by PREPARE .., is there a way to suppress generation - let mut columns = vec![]; - for _ in 0..column_count { - let column = ColumnDefinitionPacket::decode(self.receive().await?)?; - columns.push(column); - } - - self.check_eof().await?; - - Ok(columns) - } - - pub(super) async fn send_execute( - &mut self, - statement_id: u32, - params: MySqlDbParameters, - ) -> Result<()> { - // TODO: EXECUTE(READ_ONLY) => FETCH instead of EXECUTE(NO) - - // SEND ================ - self.start_sequence(); - self.write(ComStmtExecute { - statement_id, - params: ¶ms.params, - null: ¶ms.null_bitmap, - flags: StmtExecFlag::NO_CURSOR, - param_types: ¶ms.param_types, + self_.write(HandshakeResponse { + client_collation: 192, // utf8_unicode_ci + max_packet_size: 1024, + username: url.username().unwrap_or("root"), + // TODO: Remove the panic! + database: url.database().expect("required database"), }); - self.stream.flush().await?; - // ===================== - Ok(()) + self_.stream.flush().await?; + + let _ok = self_.receive_ok().await?; + + Ok(self_) } - async fn expect_eof_or_err(&mut self) -> crate::Result<()> { - let packet = self.receive().await?; - - match packet[0] { - 0xFE => { EofPacket::decode(packet)?; }, - 0xFF => { ErrPacket::decode(packet)?.expect_error()?; }, - _ => return Err(protocol_err!("expected EOF or ERR, got {:02X}", packet[0]).into()), - } - - Ok(()) - } - - pub(super) async fn send_raw( - &mut self, - commands: &str - ) -> Result<()> { + async fn close(mut self) -> crate::Result<()> { self.stream.flush().await?; - self.start_sequence(); - // enable multi-statement only for this query - self.write(ComSetOption { option: SetOptionOptions::MySqlOptionMultiStatementsOn }); - self.write(ComQuery { sql_statement: commands }); - self.write(ComSetOption { option: SetOptionOptions::MySqlOptionMultiStatementsOff }); - self.stream.flush().await?; - - self.expect_eof_or_err().await?; - - let packet = self.receive().await?; - - if packet[0] == 0xFF { return ErrPacket::decode(packet)?.expect_error() } - // otherwise ignore packet - - self.expect_eof_or_err().await?; + self.stream.stream.shutdown(Shutdown::Both)?; Ok(()) } } + +impl Connection for MySqlConnection { + fn open(url: T) -> BoxFuture<'static, crate::Result> + where + T: TryInto, + Self: Sized, + { + Box::pin(MySqlConnection::open(url.try_into())) + } + + fn close(self) -> BoxFuture<'static, crate::Result<()>> { + Box::pin(self.close()) + } +} diff --git a/sqlx-core/src/mysql/database.rs b/sqlx-core/src/mysql/database.rs new file mode 100644 index 00000000..ddd753ba --- /dev/null +++ b/sqlx-core/src/mysql/database.rs @@ -0,0 +1,12 @@ +use crate::Database; + +/// **MySQL** database driver. +pub struct MySql; + +impl Database for MySql { + type Connection = super::MySqlConnection; + + type Arguments = super::MySqlArguments; + + type Row = super::MySqlRow; +} diff --git a/sqlx-core/src/mysql/error.rs b/sqlx-core/src/mysql/error.rs index af7c5bca..0c5a00dc 100644 --- a/sqlx-core/src/mysql/error.rs +++ b/sqlx-core/src/mysql/error.rs @@ -1,25 +1,53 @@ -use crate::{error::DatabaseError, mysql::protocol::ErrorCode}; +use std::fmt::{self, Debug, Display}; -use std::fmt; +use crate::error::DatabaseError; +use crate::mysql::protocol::ErrPacket; -#[derive(Debug)] -pub struct Error { - pub code: ErrorCode, - pub message: Box, -} +pub struct MySqlError(pub(super) ErrPacket); -impl DatabaseError for Error { +impl DatabaseError for MySqlError { fn message(&self) -> &str { - &self.message + &*self.0.error_message + } + + fn details(&self) -> Option<&str> { + None + } + + fn hint(&self) -> Option<&str> { + None + } + + fn table_name(&self) -> Option<&str> { + None + } + + fn column_name(&self) -> Option<&str> { + None + } + + fn constraint_name(&self) -> Option<&str> { + None } } -impl fmt::Display for Error { +// TODO: De-duplicate these two impls with Postgres (macro?) + +impl Debug for MySqlError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "Mysql returned an error: {}; {}", - self.code, self.message - ) + f.debug_struct("DatabaseError") + .field("message", &self.message()) + .field("details", &self.details()) + .field("hint", &self.hint()) + .field("table_name", &self.table_name()) + .field("column_name", &self.column_name()) + .field("constraint_name", &self.constraint_name()) + .finish() + } +} + +impl Display for MySqlError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad(self.message()) } } diff --git a/sqlx-core/src/mysql/establish.rs b/sqlx-core/src/mysql/establish.rs deleted file mode 100644 index 1d911172..00000000 --- a/sqlx-core/src/mysql/establish.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::{ - mysql::{ - connection::Connection, - protocol::{Capabilities, HandshakeResponsePacket, InitialHandshakePacket}, - }, - url::Url, - Result, -}; - -pub(crate) async fn establish(conn: &mut Connection, url: &Url) -> Result<()> { - let initial = InitialHandshakePacket::decode(conn.receive().await?)?; - - // TODO: Capabilities::SECURE_CONNECTION - // TODO: Capabilities::CONNECT_ATTRS - // TODO: Capabilities::PLUGIN_AUTH - // TODO: Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA - // TODO: Capabilities::TRANSACTIONS - // TODO: Capabilities::CLIENT_DEPRECATE_EOF - // TODO?: Capabilities::CLIENT_SESSION_TRACK - let capabilities = Capabilities::CLIENT_PROTOCOL_41 | Capabilities::CONNECT_WITH_DB; - - let response = HandshakeResponsePacket { - // TODO: Find a good value for [max_packet_size] - capabilities, - max_packet_size: 1024, - client_collation: 192, // utf8_unicode_ci - username: url.username(), - database: &url.database(), - auth_data: None, - auth_plugin_name: None, - connection_attrs: &[], - }; - - // The AND between our supported capabilities and the servers' is - // what we can use so remember it on the connection - conn.capabilities = capabilities & initial.capabilities; - - conn.write(response); - conn.stream.flush().await?; - - let _ = conn.receive_ok_or_err().await?; - - // TODO: If CONNECT_WITH_DB is not supported we need to send an InitDb command just after establish - - Ok(()) -} diff --git a/sqlx-core/src/mysql/executor.rs b/sqlx-core/src/mysql/executor.rs index 7c7e9070..46fbb124 100644 --- a/sqlx-core/src/mysql/executor.rs +++ b/sqlx-core/src/mysql/executor.rs @@ -1,159 +1,375 @@ -use super::{MySql, Connection}; -use crate::{backend::Backend, describe::{Describe, ResultField}, executor::Executor, mysql::{ - protocol::{ - Capabilities, ColumnCountPacket, ColumnDefinitionPacket, ComStmtExecute, EofPacket, - ErrPacket, OkPacket, ResultRow, StmtExecFlag, - }, - query::MySqlDbParameters, -}, params::{IntoQueryParameters, QueryParameters}, row::FromRow, url::Url, Error}; -use futures_core::{future::BoxFuture, stream::BoxStream, Future}; -use std::pin::Pin; +use std::collections::HashMap; +use std::sync::Arc; -impl Connection { - async fn prepare_cached(&mut self, query: &str) -> crate::Result { - let conn = &mut self.conn; - Ok(*(self.cache.get_or_compute(query, || conn.prepare_ignore_describe(query)).await?)) +use futures_core::future::BoxFuture; +use futures_core::stream::BoxStream; + +use crate::describe::{Column, Describe}; +use crate::executor::Executor; +use crate::mysql::error::MySqlError; +use crate::mysql::protocol::{ + Capabilities, ColumnCount, ColumnDefinition, ComQuery, ComSetOption, ComStmtExecute, + ComStmtPrepare, ComStmtPrepareOk, Cursor, Decode, EofPacket, ErrPacket, OkPacket, Row, + SetOption, Type, +}; +use crate::mysql::{MySql, MySqlArguments, MySqlConnection, MySqlRow}; + +enum Step { + Command(u64), + Row(Row), +} + +enum OkOrResultSet { + Ok(OkPacket), + ResultSet(ColumnCount), +} + +impl MySqlConnection { + async fn ignore_columns(&mut self, count: usize) -> crate::Result<()> { + for _ in 0..count { + let _column = ColumnDefinition::decode(self.receive().await?)?; + } + + if count > 0 { + self.receive_eof().await?; + } + + Ok(()) + } + + async fn receive_ok_or_column_count(&mut self) -> crate::Result { + let packet = self.receive().await?; + + match packet[0] { + 0xfe if packet.len() < 0xffffff => { + let ok = OkPacket::decode(packet)?; + self.ready = true; + + Ok(OkOrResultSet::Ok(ok)) + } + + 0x00 => { + let ok = OkPacket::decode(packet)?; + self.ready = true; + + Ok(OkOrResultSet::Ok(ok)) + } + + 0xff => { + let err = ErrPacket::decode(packet)?; + self.ready = true; + + Err(MySqlError(err).into()) + } + + _ => { + let cc = ColumnCount::decode(packet)?; + + Ok(OkOrResultSet::ResultSet(cc)) + } + } + } + + async fn receive_column_types(&mut self, count: usize) -> crate::Result> { + let mut columns: Vec = Vec::with_capacity(count); + + for _ in 0..count { + let packet = self.receive().await?; + let column: ColumnDefinition = ColumnDefinition::decode(packet)?; + + columns.push(column.r#type); + } + + if count > 0 { + self.receive_eof().await?; + } + + Ok(columns.into_boxed_slice()) + } + + async fn wait_for_ready(&mut self) -> crate::Result<()> { + if !self.ready { + while let Some(_step) = self.step(&[], true).await? { + // Drain steps until we hit the end + } + } + + Ok(()) + } + + async fn prepare(&mut self, query: &str) -> crate::Result { + // Start by sending a COM_STMT_PREPARE + self.begin_command_phase(); + self.write(ComStmtPrepare { query }); + self.stream.flush().await?; + + // https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html + + // First we should receive a COM_STMT_PREPARE_OK + let packet = self.receive().await?; + + if packet[0] == 0xff { + // Oops, there was an error in the prepare command + return Err(MySqlError(ErrPacket::decode(packet)?).into()); + } + + ComStmtPrepareOk::decode(packet) + } + + async fn prepare_with_cache(&mut self, query: &str) -> crate::Result { + if let Some(&id) = self.statement_cache.get(query) { + Ok(id) + } else { + let prepare_ok = self.prepare(query).await?; + + // Remember our statement ID, so we do'd do this again the next time + self.statement_cache + .put(query.to_owned(), prepare_ok.statement_id); + + // Ignore input parameters + self.ignore_columns(prepare_ok.params as usize).await?; + + // Collect output parameter names + let mut columns = HashMap::with_capacity(prepare_ok.columns as usize); + let mut index = 0_usize; + for _ in 0..prepare_ok.columns { + let column = ColumnDefinition::decode(self.receive().await?)?; + + if let Some(name) = column.column_alias.or(column.column) { + columns.insert(name, index); + } + + index += 1; + } + + if prepare_ok.columns > 0 { + self.receive_eof().await?; + } + + // Remember our column map in the statement cache + self.statement_cache + .put_columns(prepare_ok.statement_id, columns); + + Ok(prepare_ok.statement_id) + } + } + + // [COM_STMT_EXECUTE] + async fn execute_statement(&mut self, id: u32, args: MySqlArguments) -> crate::Result<()> { + self.begin_command_phase(); + self.ready = false; + + self.write(ComStmtExecute { + cursor: Cursor::NO_CURSOR, + statement_id: id, + params: &args.params, + null_bitmap: &args.null_bitmap, + param_types: &args.param_types, + }); + + self.stream.flush().await?; + + Ok(()) + } + + async fn step(&mut self, columns: &[Type], binary: bool) -> crate::Result> { + let capabilities = self.capabilities; + let packet = ret_if_none!(self.try_receive().await?); + + match packet[0] { + 0xfe if packet.len() < 0xffffff => { + // Resultset row can begin with 0xfe byte (when using text protocol + // with a field length > 0xffffff) + + if !capabilities.contains(Capabilities::DEPRECATE_EOF) { + let _eof = EofPacket::decode(packet)?; + self.ready = true; + + return Ok(None); + } else { + let ok = OkPacket::decode(packet)?; + self.ready = true; + + return Ok(Some(Step::Command(ok.affected_rows))); + } + } + + 0xff => { + let err = ErrPacket::decode(packet)?; + self.ready = true; + + return Err(MySqlError(err).into()); + } + + _ => { + return Ok(Some(Step::Row(Row::decode(packet, columns, binary)?))); + } + } } } -impl Executor for Connection { - type Backend = MySql; +impl MySqlConnection { + async fn send(&mut self, query: &str) -> crate::Result<()> { + self.wait_for_ready().await?; - fn ping(&mut self) -> BoxFuture> { - Box::pin(self.conn.ping()) + self.begin_command_phase(); + self.ready = false; + + // enable multi-statement only for this query + self.write(ComQuery { query }); + + self.stream.flush().await?; + + // COM_QUERY can terminate before the result set with an ERR or OK packet + let num_columns = match self.receive_ok_or_column_count().await? { + OkOrResultSet::Ok(_) => { + return Ok(()); + } + + OkOrResultSet::ResultSet(cc) => cc.columns as usize, + }; + + let columns = self.receive_column_types(num_columns as usize).await?; + + while let Some(step) = self.step(&columns, false).await? { + // Drop all responses + } + + Ok(()) + } + + async fn execute(&mut self, query: &str, args: MySqlArguments) -> crate::Result { + self.wait_for_ready().await?; + + let statement_id = self.prepare_with_cache(query).await?; + + self.execute_statement(statement_id, args).await?; + + // COM_STMT_EXECUTE can terminate before the result set with an ERR or OK packet + let num_columns = match self.receive_ok_or_column_count().await? { + OkOrResultSet::Ok(ok) => { + return Ok(ok.affected_rows); + } + + OkOrResultSet::ResultSet(cc) => cc.columns as usize, + }; + + self.ignore_columns(num_columns).await?; + + let mut res = 0; + + while let Some(step) = self.step(&[], true).await? { + if let Step::Command(affected) = step { + res = affected; + } + } + + Ok(res) + } + + async fn describe(&mut self, query: &str) -> crate::Result> { + self.wait_for_ready().await?; + + let prepare_ok = self.prepare(query).await?; + + let mut param_types = Vec::with_capacity(prepare_ok.params as usize); + let mut result_columns = Vec::with_capacity(prepare_ok.columns as usize); + + for _ in 0..prepare_ok.params { + let param = ColumnDefinition::decode(self.receive().await?)?; + param_types.push(param.r#type.0); + } + + if prepare_ok.params > 0 { + self.receive_eof().await?; + } + + for _ in 0..prepare_ok.columns { + let column = ColumnDefinition::decode(self.receive().await?)?; + result_columns.push(Column:: { + name: column.column_alias.or(column.column), + + table_id: column.table_alias.or(column.table), + + type_id: column.r#type.0, + + _non_exhaustive: (), + }); + } + + if prepare_ok.columns > 0 { + self.receive_eof().await?; + } + + Ok(Describe { + param_types: param_types.into_boxed_slice(), + result_columns: result_columns.into_boxed_slice(), + + _non_exhaustive: (), + }) + } + + fn fetch<'e, 'q: 'e>( + &'e mut self, + query: &'q str, + args: MySqlArguments, + ) -> BoxStream<'e, crate::Result> { + Box::pin(async_stream::try_stream! { + self.wait_for_ready().await?; + + let statement_id = self.prepare_with_cache(query).await?; + + let columns = self.statement_cache.get_columns(statement_id); + + self.execute_statement(statement_id, args).await?; + + // COM_STMT_EXECUTE can terminate before the result set with an ERR or OK packet + let num_columns = match self.receive_ok_or_column_count().await? { + OkOrResultSet::Ok(_) => { + return; + } + + OkOrResultSet::ResultSet(cc) => { + cc.columns as usize + } + }; + + let column_types = self.receive_column_types(num_columns).await?; + + while let Some(Step::Row(row)) = self.step(&column_types, true).await? { + yield MySqlRow { row, columns: Arc::clone(&columns) }; + } + }) + } +} + +impl Executor for MySqlConnection { + type Database = super::MySql; + + fn send<'e, 'q: 'e>(&'e mut self, query: &'q str) -> BoxFuture<'e, crate::Result<()>> { + Box::pin(self.send(query)) } fn execute<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: MySqlDbParameters, + args: MySqlArguments, ) -> BoxFuture<'e, crate::Result> { - Box::pin(async move { - let statement_id = self.prepare_cached(query).await?; - self.conn.send_execute(statement_id, params).await?; - - let columns = self.conn.result_column_defs().await?; - let capabilities = self.conn.capabilities; - - // For each row in the result set we will receive a ResultRow packet. - // We may receive an [OkPacket], [EofPacket], or [ErrPacket] (depending on if EOFs are enabled) to finalize the iteration. - let mut rows = 0u64; - loop { - let packet = self.conn.receive().await?; - if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF { - // NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows) - // but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF. - if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) { - let _eof = EofPacket::decode(packet)?; - } else { - let _ok = OkPacket::decode(packet, capabilities)?; - } - - break; - } else if packet[0] == 0xFF { - let err = ErrPacket::decode(packet)?; - panic!("received db err = {:?}", err); - } else { - // Ignore result rows; exec only returns number of affected rows; - let _ = ResultRow::decode(packet, &columns)?; - - // For every row we decode we increment counter - rows = rows + 1; - } - } - - Ok(rows) - }) + Box::pin(self.execute(query, args)) } - fn fetch<'e, 'q: 'e, T: 'e>( + fn fetch<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: MySqlDbParameters, - ) -> BoxStream<'e, crate::Result> - where - T: FromRow + Send + Unpin, - { - Box::pin(async_stream::try_stream! { - let prepare = self.prepare_cached(query).await?; - self.conn.send_execute(prepare, params).await?; - - let columns = self.conn.result_column_defs().await?; - let capabilities = self.conn.capabilities; - - loop { - let packet = self.conn.receive().await?; - if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF { - // NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows) - // but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF. - if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) { - let _eof = EofPacket::decode(packet)?; - } else { - let _ok = OkPacket::decode(packet, capabilities)?; - } - - break; - } else if packet[0] == 0xFF { - let _err = ErrPacket::decode(packet)?; - panic!("ErrPacket received"); - } else { - let row = ResultRow::decode(packet, &columns)?; - yield FromRow::from_row(row); - } - } - }) - } - - fn fetch_optional<'e, 'q: 'e, T: 'e>( - &'e mut self, - query: &'q str, - params: MySqlDbParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send, - { - Box::pin(async move { - let statement_id = self.prepare_cached(query).await?; - self.conn.send_execute(statement_id, params).await?; - - let columns = self.conn.result_column_defs().await?; - let capabilities = self.conn.capabilities; - - let mut row = None; - - loop { - let packet = self.conn.receive().await?; - - if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF { - // NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows) - // but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF. - if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) { - let _eof = EofPacket::decode(packet)?; - } else { - let _ok = OkPacket::decode(packet, capabilities)?; - } - - break; - } else if packet[0] == 0xFF { - let _err = ErrPacket::decode(packet)?; - panic!("Received error packet: {:?}", _err); - } else { - row = Some(FromRow::from_row(ResultRow::decode(packet, &columns)?)); - } - } - - Ok(row) - }) + args: MySqlArguments, + ) -> BoxStream<'e, crate::Result> { + self.fetch(query, args) } fn describe<'e, 'q: 'e>( &'e mut self, query: &'q str, - ) -> BoxFuture<'e, crate::Result>> { - Box::pin(self.conn.prepare_describe(query)) - } - - fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { - Box::pin(self.conn.send_raw(commands)) + ) -> BoxFuture<'e, crate::Result>> { + Box::pin(self.describe(query)) } } diff --git a/sqlx-core/src/mysql/io/buf_ext.rs b/sqlx-core/src/mysql/io/buf_ext.rs index 55786d4b..fc01624b 100644 --- a/sqlx-core/src/mysql/io/buf_ext.rs +++ b/sqlx-core/src/mysql/io/buf_ext.rs @@ -1,52 +1,35 @@ -use crate::io::Buf; -use byteorder::ByteOrder; use std::io; +use byteorder::ByteOrder; + +use crate::io::Buf; + pub trait BufExt { - fn get_uint(&mut self, n: usize) -> io::Result; fn get_uint_lenenc(&mut self) -> io::Result>; - fn get_str_eof(&mut self) -> io::Result<&str>; + fn get_str_lenenc(&mut self) -> io::Result>; - fn get_bytes(&mut self, n: usize) -> io::Result<&[u8]>; + fn get_bytes_lenenc(&mut self) -> io::Result>; } -impl<'a> BufExt for &'a [u8] { - fn get_uint(&mut self, n: usize) -> io::Result { - let val = T::read_uint(*self, n); - self.advance(n); - - Ok(val) - } - +impl BufExt for &'_ [u8] { fn get_uint_lenenc(&mut self) -> io::Result> { Ok(match self.get_u8()? { 0xFB => None, 0xFC => Some(u64::from(self.get_u16::()?)), 0xFD => Some(u64::from(self.get_u24::()?)), 0xFE => Some(self.get_u64::()?), - // ? 0xFF => panic!("int unprocessable first byte 0xFF"), + value => Some(u64::from(value)), }) } - fn get_str_eof(&mut self) -> io::Result<&str> { - self.get_str(self.len()) - } - fn get_str_lenenc(&mut self) -> io::Result> { self.get_uint_lenenc::()? .map(move |len| self.get_str(len as usize)) .transpose() } - fn get_bytes(&mut self, n: usize) -> io::Result<&[u8]> { - let buf = &self[..n]; - self.advance(n); - - Ok(buf) - } - fn get_bytes_lenenc(&mut self) -> io::Result> { self.get_uint_lenenc::()? .map(move |len| self.get_bytes(len as usize)) diff --git a/sqlx-core/src/mysql/io/buf_mut_ext.rs b/sqlx-core/src/mysql/io/buf_mut_ext.rs index 4c1e2f34..332c6f8c 100644 --- a/sqlx-core/src/mysql/io/buf_mut_ext.rs +++ b/sqlx-core/src/mysql/io/buf_mut_ext.rs @@ -1,16 +1,14 @@ -use crate::io::BufMut; -use byteorder::ByteOrder; use std::{u16, u32, u64, u8}; +use byteorder::ByteOrder; + +use crate::io::BufMut; + pub trait BufMutExt { fn put_uint_lenenc>>(&mut self, val: U); fn put_str_lenenc(&mut self, val: &str); - fn put_str(&mut self, val: &str); - - fn put_bytes(&mut self, val: &[u8]); - fn put_bytes_lenenc(&mut self, val: &[u8]); } @@ -49,23 +47,11 @@ impl BufMutExt for Vec { } } - #[inline] - fn put_str(&mut self, val: &str) { - self.put_bytes(val.as_bytes()); - } - - #[inline] fn put_str_lenenc(&mut self, val: &str) { self.put_uint_lenenc::(val.len() as u64); self.extend_from_slice(val.as_bytes()); } - #[inline] - fn put_bytes(&mut self, val: &[u8]) { - self.extend_from_slice(val); - } - - #[inline] fn put_bytes_lenenc(&mut self, val: &[u8]) { self.put_uint_lenenc::(val.len() as u64); self.extend_from_slice(val); @@ -74,28 +60,9 @@ impl BufMutExt for Vec { #[cfg(test)] mod tests { - use super::BufMutExt; - use crate::io::BufMut; + use super::{BufMut, BufMutExt}; use byteorder::LittleEndian; - // [X] it_encodes_int_lenenc_u64 - // [X] it_encodes_int_lenenc_u32 - // [X] it_encodes_int_lenenc_u24 - // [X] it_encodes_int_lenenc_u16 - // [X] it_encodes_int_lenenc_u8 - // [X] it_encodes_int_u64 - // [X] it_encodes_int_u32 - // [X] it_encodes_int_u24 - // [X] it_encodes_int_u16 - // [X] it_encodes_int_u8 - // [X] it_encodes_string_lenenc - // [X] it_encodes_string_fix - // [X] it_encodes_string_null - // [X] it_encodes_string_eof - // [X] it_encodes_byte_lenenc - // [X] it_encodes_byte_fix - // [X] it_encodes_byte_eof - #[test] fn it_encodes_int_lenenc_none() { let mut buf = Vec::with_capacity(1024); diff --git a/sqlx-core/src/mysql/io/mod.rs b/sqlx-core/src/mysql/io/mod.rs index 11f11f39..a8867b15 100644 --- a/sqlx-core/src/mysql/io/mod.rs +++ b/sqlx-core/src/mysql/io/mod.rs @@ -1,5 +1,5 @@ -pub mod buf_ext; -pub mod buf_mut_ext; +mod buf_ext; +mod buf_mut_ext; pub use buf_ext::BufExt; pub use buf_mut_ext::BufMutExt; diff --git a/sqlx-core/src/mysql/mod.rs b/sqlx-core/src/mysql/mod.rs index 98f83f4c..3b8b00da 100644 --- a/sqlx-core/src/mysql/mod.rs +++ b/sqlx-core/src/mysql/mod.rs @@ -1,36 +1,17 @@ -mod backend; +//! **MySQL** database and connection types. + +mod arguments; mod connection; +mod database; mod error; -mod establish; mod executor; mod io; mod protocol; -mod query; mod row; -pub mod types; +mod types; -use self::connection::Connection as RawConnection; -use crate::cache::StatementCache; -use futures_core::future::BoxFuture; -use crate::Backend; - -/// Backend for MySQL. -pub enum MySql {} - -impl MySql { - /// An alias for [Backend::connect()](../trait.Backend.html#method.connect) - pub async fn connect(url: &str) -> crate::Result { - ::connect(url).await - } -} - -pub struct Connection { - conn: RawConnection, - cache: StatementCache, -} - -impl crate::Connection for Connection { - fn close(self) -> BoxFuture<'static, crate::Result<()>> { - Box::pin(self.conn.close()) - } -} +pub use arguments::MySqlArguments; +pub use connection::MySqlConnection; +pub use database::MySql; +// pub use error::DatabaseError; +pub use row::MySqlRow; diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_close.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_close.rs deleted file mode 100644 index 4eef1fed..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_close.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{binary::BinaryProtocol, Capabilities, Encode}, - }, -}; -use byteorder::LittleEndian; - -/// Closes a previously prepared statement. -#[derive(Debug)] -pub struct ComStmtClose { - statement_id: i32, -} - -impl Encode for ComStmtClose { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STMT_CLOSE : int<1> - buf.put_u8(BinaryProtocol::ComStmtClose as u8); - - // statement_id : int<4> - buf.put_i32::(self.statement_id); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_stmt_close() { - let mut buf = Vec::new(); - - ComStmtClose { statement_id: 1 }.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x19\x01\0\0\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_exec.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_exec.rs deleted file mode 100644 index 18ece8c7..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_exec.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{binary::BinaryProtocol, Capabilities, Encode}, - types::MySqlTypeMetadata, - }, -}; -use byteorder::LittleEndian; - -bitflags::bitflags! { - // https://mariadb.com/kb/en/library/com_stmt_execute/#flag - pub struct StmtExecFlag: u8 { - const NO_CURSOR = 0; - const READ_ONLY = 1; - const CURSOR_FOR_UPDATE = 2; - const SCROLLABLE_CURSOR = 4; - } -} - -// https://mariadb.com/kb/en/library/com_stmt_execute -/// Executes a previously prepared statement. -#[derive(Debug)] -pub struct ComStmtExecute<'a> { - pub statement_id: u32, - pub flags: StmtExecFlag, - pub params: &'a [u8], - pub null: &'a [u8], - pub param_types: &'a [MySqlTypeMetadata], -} - -impl Encode for ComStmtExecute<'_> { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STMT_EXECUTE : int<1> - buf.put_u8(BinaryProtocol::ComStmtExec as u8); - - // statement id : int<4> - buf.put_u32::(self.statement_id); - - // flags : int<1> - buf.put_u8(self.flags.bits()); - - // Iteration count (always 1) : int<4> - buf.put_u32::(1); - - // if (param_count > 0) - if self.param_types.len() > 0 { - // null bitmap : byte<(param_count + 7)/8> - buf.put_bytes(self.null); - - // send type to server (0 / 1) : byte<1> - buf.put_u8(1); - - // for each parameter : - for param_type in self.param_types { - // field type : byte<1> - buf.put_u8(param_type.field_type.0); - - // parameter flag : byte<1> - buf.put_u8(param_type.param_flag.bits()); - } - - // for each parameter (i.e param_count times) - // byte binary parameter value - buf.put_bytes(self.params); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_stmt_exec() { - let mut buf = Vec::new(); - - ComStmtExecute { - statement_id: 1, - flags: StmtExecFlag::NO_CURSOR, - null: &vec![], - params: &vec![], - param_types: &vec![], - } - .encode(&mut buf, Capabilities::empty()); - - // TODO: Add a regression test - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_fetch.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_fetch.rs deleted file mode 100644 index db62f329..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_fetch.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::{ - io::BufMut, - mysql::protocol::{binary::BinaryProtocol, Capabilities, Encode}, -}; -use byteorder::LittleEndian; - -// https://mariadb.com/kb/en/library/com_stmt_fetch/ -/// Fetch rows from a prepared statement. -#[derive(Debug)] -pub struct ComStmtFetch { - pub statement_id: u32, - pub rows: u32, -} - -impl Encode for ComStmtFetch { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STMT_FETCH : int<1> - buf.put_u8(BinaryProtocol::ComStmtFetch as u8); - - // statement id : int<4> - buf.put_u32::(self.statement_id); - - // number of rows to fetch : int<4> - buf.put_u32::(self.rows); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_stmt_fetch() { - let mut buf = Vec::new(); - - ComStmtFetch { - statement_id: 1, - rows: 10, - } - .encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x1C\x01\0\0\0\x0A\0\0\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare.rs deleted file mode 100644 index 31c5611a..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{Capabilities, Encode}, - }, -}; - -#[derive(Debug)] -pub struct ComStmtPrepare<'a> { - pub statement: &'a str, -} - -impl Encode for ComStmtPrepare<'_> { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STMT_PREPARE : int<1> - buf.put_u8(super::BinaryProtocol::ComStmtPrepare as u8); - - // SQL Statement : string - buf.put_str(&self.statement); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_stmt_prepare() { - let mut buf = Vec::new(); - - ComStmtPrepare { - statement: "SELECT * FROM users WHERE username = ?", - } - .encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], &b"\x16SELECT * FROM users WHERE username = ?"[..]); - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare_ok.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare_ok.rs deleted file mode 100644 index a6c0578d..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_prepare_ok.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::io::Buf; -use byteorder::LittleEndian; -use std::io; - -// https://mariadb.com/kb/en/library/com_stmt_prepare/#com_stmt_prepare_ok -#[derive(Debug)] -pub struct ComStmtPrepareOk { - pub statement_id: u32, - - /// Number of columns in the returned result set (or 0 if statement does not return result set). - pub columns: u16, - - /// Number of prepared statement parameters ('?' placeholders). - pub params: u16, - - /// Number of warnings. - pub warnings: u16, -} - -impl ComStmtPrepareOk { - pub(crate) fn decode(mut buf: &[u8]) -> crate::Result { - let header = buf.get_u8()?; - - if header != 0x00 { - return Err( - protocol_err!("expected COM_STMT_PREPARE_OK (0x00); received {}", header).into(), - ); - } - - let statement_id = buf.get_u32::()?; - let columns = buf.get_u16::()?; - let params = buf.get_u16::()?; - - // Skip 1 unused byte - // -not used- : string<1> - buf.advance(1); - - let warnings = buf.get_u16::()?; - - Ok(Self { - statement_id, - columns, - params, - warnings, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_com_stmt_prepare_ok() -> crate::Result<()> { - #[rustfmt::skip] - let buf = &__bytes_builder!( - // int<1> 0x00 COM_STMT_PREPARE_OK header - 0u8, - // int<4> statement id - 1u8, 0u8, 0u8, 0u8, - // int<2> number of columns in the returned result set (or 0 if statement does not return result set) - 10u8, 0u8, - // int<2> number of prepared statement parameters ('?' placeholders) - 1u8, 0u8, - // string<1> -not used- - 0u8, - // int<2> number of warnings - 0u8, 0u8 - )[..]; - - let message = ComStmtPrepareOk::decode(&buf)?; - - assert_eq!(message.statement_id, 1); - assert_eq!(message.columns, 10); - assert_eq!(message.params, 1); - assert_eq!(message.warnings, 0); - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/com_stmt_reset.rs b/sqlx-core/src/mysql/protocol/binary/com_stmt_reset.rs deleted file mode 100644 index 04659779..00000000 --- a/sqlx-core/src/mysql/protocol/binary/com_stmt_reset.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::{ - io::BufMut, - mysql::protocol::{binary::BinaryProtocol, Capabilities, Encode}, -}; -use byteorder::LittleEndian; - -#[derive(Debug)] -pub struct ComStmtReset { - pub statement_id: u32, -} - -impl Encode for ComStmtReset { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STMT_RESET : int<1> - buf.put_u8(BinaryProtocol::ComStmtReset as u8); - - // statement_id : int<4> - buf.put_u32::(self.statement_id); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_stmt_reset() { - let mut buf = Vec::new(); - - ComStmtReset { statement_id: 1 }.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x1A\x01\0\0\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/binary/mod.rs b/sqlx-core/src/mysql/protocol/binary/mod.rs deleted file mode 100644 index 6c3e6bec..00000000 --- a/sqlx-core/src/mysql/protocol/binary/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -pub mod com_stmt_close; -pub mod com_stmt_exec; -pub mod com_stmt_fetch; -pub mod com_stmt_prepare; -pub mod com_stmt_prepare_ok; -pub mod com_stmt_reset; - -pub use com_stmt_close::ComStmtClose; -pub use com_stmt_exec::{ComStmtExecute, StmtExecFlag}; -pub use com_stmt_fetch::ComStmtFetch; -pub use com_stmt_prepare::ComStmtPrepare; -pub use com_stmt_prepare_ok::ComStmtPrepareOk; -pub use com_stmt_reset::ComStmtReset; - -pub enum BinaryProtocol { - ComStmtPrepare = 0x16, - ComStmtExec = 0x17, - ComStmtClose = 0x19, - ComStmtReset = 0x1A, - ComStmtFetch = 0x1C, -} diff --git a/sqlx-core/src/mysql/protocol/capabilities.rs b/sqlx-core/src/mysql/protocol/capabilities.rs index e40da3de..39a6970f 100644 --- a/sqlx-core/src/mysql/protocol/capabilities.rs +++ b/sqlx-core/src/mysql/protocol/capabilities.rs @@ -1,65 +1,86 @@ +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/group__group__cs__capabilities__flags.html // https://mariadb.com/kb/en/library/connection/#capabilities bitflags::bitflags! { - pub struct Capabilities: u128 { - const CLIENT_MYSQL = 1; + pub struct Capabilities: u64 { + // [MariaDB] MySQL compatibility + const MYSQL = 1; + + // [*] Send found rows instead of affected rows in EOF_Packet. const FOUND_ROWS = 2; - // One can specify db on connect + // Get all column flags. + const LONG_FLAG = 4; + + // [*] Database (schema) name can be specified on connect in Handshake Response Packet. const CONNECT_WITH_DB = 8; - // Can use compression protocol + // Don't allow database.table.column + const NO_SCHEMA = 16; + + // [*] Compression protocol supported const COMPRESS = 32; + // Special handling of ODBC behavior. + const ODBC = 64; + // Can use LOAD DATA LOCAL const LOCAL_FILES = 128; - // Ignore spaces before '(' + // [*] Ignore spaces before '(' const IGNORE_SPACE = 256; - // 4.1+ protocol - const CLIENT_PROTOCOL_41 = 1 << 9; + // [*] New 4.1+ protocol + const PROTOCOL_41 = 512; - const CLIENT_INTERACTIVE = 1 << 10; + // This is an interactive client + const INTERACTIVE = 1024; - // Can use SSL - const SSL = 1 << 11; + // Use SSL encryption for this session + const SSL = 2048; - const TRANSACTIONS = 1 << 12; + // Client knows about transactions + const TRANSACTIONS = 8192; // 4.1+ authentication - const SECURE_CONNECTION = 1 << 13; + const SECURE_CONNECTION = (1 << 13); - // Enable/disable multi-stmt support - const MULTI_STATEMENTS = 1 << 16; + // Enable/disable multi-statement support for COM_QUERY *and* COM_STMT_PREPARE + const MULTI_STATEMENTS = (1 << 16); - // Enable/disable multi-results - const MULTI_RESULTS = 1 << 17; + // Enable/disable multi-results for COM_QUERY + const MULTI_RESULTS = (1 << 17); - // Enable/disable multi-results for PrepareStatement - const PS_MULTI_RESULTS = 1 << 18; + // Enable/disable multi-results for COM_STMT_PREPARE + const PS_MULTI_RESULTS = (1 << 18); // Client supports plugin authentication - const PLUGIN_AUTH = 1 << 19; + const PLUGIN_AUTH = (1 << 19); - // Client send connection attributes - const CONNECT_ATTRS = 1 << 20; + // Client supports connection attributes + const CONNECT_ATTRS = (1 << 20); - // Enable authentication response packet to be larger than 255 bytes - const PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21; + // Enable authentication response packet to be larger than 255 bytes. + const PLUGIN_AUTH_LENENC_DATA = (1 << 21); - // Enable/disable session tracking in OK_Packet - const CLIENT_SESSION_TRACK = 1 << 23; + // Don't close the connection for a user account with expired password. + const CAN_HANDLE_EXPIRED_PASSWORDS = (1 << 22); - // EOF_Packet deprecation - const CLIENT_DEPRECATE_EOF = 1 << 24; + // Capable of handling server state change information. + const SESSION_TRACK = (1 << 23); - // Client support progress indicator (since 10.2) - const MARIA_DB_CLIENT_PROGRESS = 1 << 32; + // Client no longer needs EOF_Packet and will use OK_Packet instead. + const DEPRECATE_EOF = (1 << 24); - // Permit COM_MULTI protocol - const MARIA_DB_CLIENT_COM_MULTI = 1 << 33; + // Support ZSTD protocol compression + const ZSTD_COMPRESSION_ALGORITHM = (1 << 26); - // Permit bulk insert - const MARIA_CLIENT_STMT_BULK_OPERATIONS = 1 << 34; + // Verify server certificate + const SSL_VERIFY_SERVER_CERT = (1 << 30); + + // The client can handle optional metadata information in the resultset + const OPTIONAL_RESULTSET_METADATA = (1 << 25); + + // Don't reset the options after an unsuccessful connect + const REMEMBER_OPTIONS = (1 << 31); } } diff --git a/sqlx-core/src/mysql/protocol/column_count.rs b/sqlx-core/src/mysql/protocol/column_count.rs new file mode 100644 index 00000000..85bf0891 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/column_count.rs @@ -0,0 +1,18 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::Decode; + +#[derive(Debug)] +pub struct ColumnCount { + pub columns: u64, +} + +impl Decode for ColumnCount { + fn decode(mut buf: &[u8]) -> crate::Result { + let columns = buf.get_uint_lenenc::()?.unwrap_or(0); + + Ok(Self { columns }) + } +} diff --git a/sqlx-core/src/mysql/protocol/column_def.rs b/sqlx-core/src/mysql/protocol/column_def.rs new file mode 100644 index 00000000..35441908 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/column_def.rs @@ -0,0 +1,77 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::{Decode, FieldFlags, Type}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_query_response_text_resultset_column_definition.html +// https://mariadb.com/kb/en/resultset/#column-definition-packet +#[derive(Debug)] +pub struct ColumnDefinition { + pub schema: Option>, + + pub table_alias: Option>, + pub table: Option>, + + pub column_alias: Option>, + pub column: Option>, + + pub char_set: u16, + + pub max_size: u32, + + pub r#type: Type, + + pub flags: FieldFlags, + + pub decimals: u8, +} + +impl Decode for ColumnDefinition { + fn decode(mut buf: &[u8]) -> crate::Result { + // catalog : string + let catalog = buf.get_str_lenenc::()?; + + if catalog != Some("def") { + return Err(protocol_err!( + "expected ColumnDefinition (\"def\"); received {:?}", + catalog + ))?; + } + + let schema = buf.get_str_lenenc::()?.map(Into::into); + let table_alias = buf.get_str_lenenc::()?.map(Into::into); + let table = buf.get_str_lenenc::()?.map(Into::into); + let column_alias = buf.get_str_lenenc::()?.map(Into::into); + let column = buf.get_str_lenenc::()?.map(Into::into); + + let len_fixed_fields = buf.get_uint_lenenc::()?.unwrap_or(0); + + if len_fixed_fields != 0x0c { + return Err(protocol_err!( + "expected ColumnDefinition (0x0c); received {:?}", + len_fixed_fields + ))?; + } + + let char_set = buf.get_u16::()?; + let max_size = buf.get_u32::()?; + + let r#type = buf.get_u8()?; + let flags = buf.get_u16::()?; + let decimals = buf.get_u8()?; + + Ok(Self { + schema, + table, + table_alias, + column, + column_alias, + char_set, + max_size, + r#type: Type(r#type), + flags: FieldFlags::from_bits_truncate(flags), + decimals, + }) + } +} diff --git a/sqlx-core/src/mysql/protocol/com_query.rs b/sqlx-core/src/mysql/protocol/com_query.rs new file mode 100644 index 00000000..512caa71 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/com_query.rs @@ -0,0 +1,21 @@ +use byteorder::LittleEndian; + +use crate::io::BufMut; +use crate::mysql::io::BufMutExt; +use crate::mysql::protocol::{Capabilities, Encode}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_query.html +#[derive(Debug)] +pub struct ComQuery<'a> { + pub query: &'a str, +} + +impl Encode for ComQuery<'_> { + fn encode(&self, buf: &mut Vec, _: Capabilities) { + // COM_QUERY : int<1> + buf.put_u8(0x03); + + // query : string + buf.put_str(self.query); + } +} diff --git a/sqlx-core/src/mysql/protocol/com_set_option.rs b/sqlx-core/src/mysql/protocol/com_set_option.rs new file mode 100644 index 00000000..6ce7545c --- /dev/null +++ b/sqlx-core/src/mysql/protocol/com_set_option.rs @@ -0,0 +1,29 @@ +use byteorder::LittleEndian; + +use crate::io::BufMut; +use crate::mysql::io::BufMutExt; +use crate::mysql::protocol::{Capabilities, Encode}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a53f60000da139fc7d547db96635a2c02 +#[derive(Debug, Copy, Clone)] +#[repr(u16)] +pub enum SetOption { + MultiStatementsOn = 0x00, + MultiStatementsOff = 0x01, +} + +// https://dev.mysql.com/doc/internals/en/com-set-option.html +#[derive(Debug)] +pub struct ComSetOption { + pub option: SetOption, +} + +impl Encode for ComSetOption { + fn encode(&self, buf: &mut Vec, _: Capabilities) { + // COM_SET_OPTION : int<1> + buf.put_u8(0x1a); + + // option : int<2> + buf.put_u16::(self.option as u16); + } +} diff --git a/sqlx-core/src/mysql/protocol/com_stmt_execute.rs b/sqlx-core/src/mysql/protocol/com_stmt_execute.rs new file mode 100644 index 00000000..e3f6f8ad --- /dev/null +++ b/sqlx-core/src/mysql/protocol/com_stmt_execute.rs @@ -0,0 +1,62 @@ +use byteorder::LittleEndian; + +use crate::io::BufMut; +use crate::mysql::io::BufMutExt; +use crate::mysql::protocol::{Capabilities, Encode}; +use crate::mysql::types::MySqlTypeMetadata; + +bitflags::bitflags! { + // https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a3e5e9e744ff6f7b989a604fd669977da + // https://mariadb.com/kb/en/library/com_stmt_execute/#flag + pub struct Cursor: u8 { + const NO_CURSOR = 0; + const READ_ONLY = 1; + const FOR_UPDATE = 2; + const SCROLLABLE = 4; + } +} + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_execute.html +#[derive(Debug)] +pub struct ComStmtExecute<'a> { + pub statement_id: u32, + pub cursor: Cursor, + pub params: &'a [u8], + pub null_bitmap: &'a [u8], + pub param_types: &'a [MySqlTypeMetadata], +} + +impl Encode for ComStmtExecute<'_> { + fn encode(&self, buf: &mut Vec, capabilities: Capabilities) { + // COM_STMT_EXECUTE : int<1> + buf.put_u8(0x17); + + // statement_id : int<4> + buf.put_u32::(self.statement_id); + + // cursor : int<1> + buf.put_u8(self.cursor.bits()); + + // iterations (always 1) : int<4> + buf.put_u32::(1); + + if self.param_types.len() > 0 { + // null bitmap : byte<(param_count + 7)/8> + buf.put_bytes(self.null_bitmap); + + // send type to server (0 / 1) : byte<1> + buf.put_u8(1); + + for ty in self.param_types { + // field type : byte<1> + buf.put_u8(ty.r#type.0); + + // parameter flag : byte<1> + buf.put_u8(ty.flag); + } + + // byte binary parameter value + buf.put_bytes(self.params); + } + } +} diff --git a/sqlx-core/src/mysql/protocol/com_stmt_prepare.rs b/sqlx-core/src/mysql/protocol/com_stmt_prepare.rs new file mode 100644 index 00000000..4bb95e1d --- /dev/null +++ b/sqlx-core/src/mysql/protocol/com_stmt_prepare.rs @@ -0,0 +1,21 @@ +use byteorder::LittleEndian; + +use crate::io::BufMut; +use crate::mysql::io::BufMutExt; +use crate::mysql::protocol::{Capabilities, Encode}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html +#[derive(Debug)] +pub struct ComStmtPrepare<'a> { + pub query: &'a str, +} + +impl Encode for ComStmtPrepare<'_> { + fn encode(&self, buf: &mut Vec, _: Capabilities) { + // COM_STMT_PREPARE : int<1> + buf.put_u8(0x16); + + // query : string + buf.put_str(self.query); + } +} diff --git a/sqlx-core/src/mysql/protocol/com_stmt_prepare_ok.rs b/sqlx-core/src/mysql/protocol/com_stmt_prepare_ok.rs new file mode 100644 index 00000000..9620c161 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/com_stmt_prepare_ok.rs @@ -0,0 +1,49 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::Decode; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html#sect_protocol_com_stmt_prepare_response_ok +#[derive(Debug)] +pub struct ComStmtPrepareOk { + pub statement_id: u32, + + /// Number of columns in the returned result set (or 0 if statement does not return result set). + pub columns: u16, + + /// Number of prepared statement parameters ('?' placeholders). + pub params: u16, + + /// Number of warnings. + pub warnings: u16, +} + +impl Decode for ComStmtPrepareOk { + fn decode(mut buf: &[u8]) -> crate::Result { + let header = buf.get_u8()?; + + if header != 0x00 { + return Err(protocol_err!( + "expected COM_STMT_PREPARE_OK (0x00); received 0x{:X}", + header + ))?; + } + + let statement_id = buf.get_u32::()?; + let columns = buf.get_u16::()?; + let params = buf.get_u16::()?; + + // -not used- : string<1> + buf.advance(1); + + let warnings = buf.get_u16::()?; + + Ok(Self { + statement_id, + columns, + params, + warnings, + }) + } +} diff --git a/sqlx-core/src/mysql/protocol/connect/auth_switch_request.rs b/sqlx-core/src/mysql/protocol/connect/auth_switch_request.rs deleted file mode 100644 index 7fb9fc1a..00000000 --- a/sqlx-core/src/mysql/protocol/connect/auth_switch_request.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{Capabilities, Encode}, - }, -}; - -#[derive(Default, Debug)] -pub struct AuthenticationSwitchRequest<'a> { - pub auth_plugin_name: &'a str, - pub auth_plugin_data: &'a [u8], -} - -impl Encode for AuthenticationSwitchRequest<'_> { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - buf.put_u8(0xFE); - buf.put_str_nul(&self.auth_plugin_name); - buf.put_bytes(&self.auth_plugin_data); - } -} diff --git a/sqlx-core/src/mysql/protocol/connect/initial.rs b/sqlx-core/src/mysql/protocol/connect/initial.rs deleted file mode 100644 index 79278c3b..00000000 --- a/sqlx-core/src/mysql/protocol/connect/initial.rs +++ /dev/null @@ -1,164 +0,0 @@ -use crate::{ - io::Buf, - mysql::{ - io::BufExt, - protocol::{Capabilities, ServerStatusFlag}, - }, -}; -use byteorder::LittleEndian; -use std::io; - -#[derive(Debug)] -pub struct InitialHandshakePacket { - pub protocol_version: u8, - pub server_version: String, - pub server_status: ServerStatusFlag, - pub server_default_collation: u8, - pub connection_id: u32, - pub scramble: Box<[u8]>, - pub capabilities: Capabilities, - pub auth_plugin_name: Option, -} - -impl InitialHandshakePacket { - pub(crate) fn decode(mut buf: &[u8]) -> io::Result { - let protocol_version = buf.get_u8()?; - let server_version = buf.get_str_nul()?.to_owned(); - let connection_id = buf.get_u32::()?; - let mut scramble = Vec::with_capacity(8); - - // scramble 1st part (authentication seed) : string<8> - scramble.extend_from_slice(&buf[..8]); - buf.advance(8); - - // reserved : string<1> - buf.advance(1); - - // server capabilities (1st part) : int<2> - let capabilities_1 = buf.get_u16::()?; - let mut capabilities = Capabilities::from_bits_truncate(capabilities_1.into()); - - // server default collation : int<1> - let server_default_collation = buf.get_u8()?; - - // status flags : int<2> - let server_status = buf.get_u16::()?; - - // server capabilities (2nd part) : int<2> - let capabilities_2 = buf.get_u16::()?; - capabilities |= Capabilities::from_bits_truncate(((capabilities_2 as u32) << 16).into()); - - // if (server_capabilities & PLUGIN_AUTH) - let plugin_data_length = if capabilities.contains(Capabilities::PLUGIN_AUTH) { - // plugin data length : int<1> - buf.get_u8()? - } else { - // 0x00 : int<1> - buf.advance(0); - 0 - }; - - // filler : string<6> - buf.advance(6); - - // if (server_capabilities & CLIENT_MYSQL) - if capabilities.contains(Capabilities::CLIENT_MYSQL) { - // filler : string<4> - buf.advance(4); - } else { - // server capabilities 3rd part . Mysql specific flags : int<4> - let capabilities_3 = buf.get_u32::()?; - capabilities |= Capabilities::from_bits_truncate((capabilities_2 as u128) << 32); - } - - // if (server_capabilities & CLIENT_SECURE_CONNECTION) - if capabilities.contains(Capabilities::SECURE_CONNECTION) { - // scramble 2nd part . Length = max(12, plugin data length - 9) : string - let len = ((plugin_data_length as isize) - 9).max(12) as usize; - scramble.extend_from_slice(&buf[..len]); - buf.advance(len); - - // reserved byte : string<1> - buf.advance(1); - } - - // if (server_capabilities & PLUGIN_AUTH) - let auth_plugin_name = if capabilities.contains(Capabilities::PLUGIN_AUTH) { - Some(buf.get_str_nul()?.to_owned()) - } else { - None - }; - - Ok(Self { - protocol_version, - server_version, - server_default_collation, - server_status: ServerStatusFlag::from_bits_truncate(server_status), - connection_id, - scramble: scramble.into_boxed_slice(), - capabilities, - auth_plugin_name, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_initial_handshake_packet() -> io::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // int<3> length - 1u8, 0u8, 0u8, - // int<1> seq_no - 0u8, - //int<1> protocol version - 10u8, - //string server version (Mysql server version is by default prefixed by "5.5.5-") - b"5.5.5-10.4.6-Mysql-1:10.4.6+maria~bionic\0", - //int<4> connection id - 13u8, 0u8, 0u8, 0u8, - //string<8> scramble 1st part (authentication seed) - b"?~~|vZAu", - //string<1> reserved byte - 0u8, - //int<2> server capabilities (1st part) - 0xFEu8, 0xF7u8, - //int<1> server default collation - 8u8, - //int<2> status flags - 2u8, 0u8, - //int<2> server capabilities (2nd part) - 0xFF_u8, 0x81_u8, - - //if (server_capabilities & PLUGIN_AUTH) - // int<1> plugin data length - 15u8, - //else - // int<1> 0x00 - - //string<6> filler - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - //if (server_capabilities & CLIENT_MYSQL) - // string<4> filler - //else - // int<4> server capabilities 3rd part . Mysql specific flags /* Mysql 10.2 or later */ - 7u8, 0u8, 0u8, 0u8, - //if (server_capabilities & CLIENT_SECURE_CONNECTION) - // string scramble 2nd part . Length = max(12, plugin data length - 9) - b"JQ8cihP4Q}Dx", - // string<1> reserved byte - 0u8, - //if (server_capabilities & PLUGIN_AUTH) - // string authentication plugin name - b"mysql_native_password\0" - ); - - let _message = InitialHandshakePacket::decode(&buf)?; - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/connect/mod.rs b/sqlx-core/src/mysql/protocol/connect/mod.rs deleted file mode 100644 index 80bd6e92..00000000 --- a/sqlx-core/src/mysql/protocol/connect/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod auth_switch_request; -mod initial; -mod response; -mod ssl_request; - -pub use auth_switch_request::AuthenticationSwitchRequest; -pub use initial::InitialHandshakePacket; -pub use response::HandshakeResponsePacket; -pub use ssl_request::SslRequest; diff --git a/sqlx-core/src/mysql/protocol/connect/response.rs b/sqlx-core/src/mysql/protocol/connect/response.rs deleted file mode 100644 index 34b5f799..00000000 --- a/sqlx-core/src/mysql/protocol/connect/response.rs +++ /dev/null @@ -1,86 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{Capabilities, Encode}, - }, -}; -use byteorder::LittleEndian; - -#[derive(Debug)] -pub struct HandshakeResponsePacket<'a> { - pub capabilities: Capabilities, - pub max_packet_size: u32, - pub client_collation: u8, - pub username: &'a str, - pub database: &'a str, - pub auth_data: Option<&'a [u8]>, - pub auth_plugin_name: Option<&'a str>, - pub connection_attrs: &'a [(&'a str, &'a str)], -} - -impl<'a> Encode for HandshakeResponsePacket<'a> { - fn encode(&self, buf: &mut Vec, capabilities: Capabilities) { - // client capabilities : int<4> - buf.put_u32::(self.capabilities.bits() as u32); - - // max packet size : int<4> - buf.put_u32::(self.max_packet_size); - - // client character collation : int<1> - buf.put_u8(self.client_collation); - - // reserved : string<19> - buf.advance(19); - - // if not (capabilities & CLIENT_MYSQL) - if !capabilities.contains(Capabilities::CLIENT_MYSQL) { - // extended client capabilities : int<4> - buf.put_u32::((self.capabilities.bits() >> 32) as u32); - } else { - // reserved : int<4> - buf.advance(4); - } - - // username : string - buf.put_str_nul(self.username); - - // if (capabilities & PLUGIN_AUTH_LENENC_CLIENT_DATA) - let auth_data = self.auth_data.unwrap_or_default(); - if capabilities.contains(Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA) { - // authentication data : string - buf.put_bytes_lenenc::(auth_data); - } else if capabilities.contains(Capabilities::SECURE_CONNECTION) { - // length of authentication response : int<1> - // authentication response (length is indicated by previous field) : string - buf.put_u8(auth_data.len() as u8); - buf.put_bytes(auth_data); - } else { - // 0x00 : int<1> - buf.put_u8(0); - } - - // if (capabilities & CLIENT_CONNECT_WITH_DB) - if capabilities.contains(Capabilities::CONNECT_WITH_DB) { - // default database name : string - buf.put_str_nul(self.database); - } - - // if (capabilities & CLIENT_PLUGIN_AUTH) - if capabilities.contains(Capabilities::PLUGIN_AUTH) { - // authentication plugin name : string - buf.put_str_nul(self.auth_plugin_name.unwrap_or_default()); - } - - // if (capabilities & CLIENT_CONNECT_ATTRS) - if capabilities.contains(Capabilities::CONNECT_ATTRS) { - // size of connection attributes : int - buf.put_uint_lenenc::(self.connection_attrs.len() as u64); - - for (key, value) in self.connection_attrs { - buf.put_str_lenenc::(key); - buf.put_str_lenenc::(value); - } - } - } -} diff --git a/sqlx-core/src/mysql/protocol/connect/ssl_request.rs b/sqlx-core/src/mysql/protocol/connect/ssl_request.rs deleted file mode 100644 index a7fff6de..00000000 --- a/sqlx-core/src/mysql/protocol/connect/ssl_request.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{Capabilities, Encode}, - }, -}; -use byteorder::LittleEndian; - -#[derive(Debug)] -pub struct SslRequest { - pub capabilities: Capabilities, - pub max_packet_size: u32, - pub client_collation: u8, -} - -impl Encode for SslRequest { - fn encode(&self, buf: &mut Vec, capabilities: Capabilities) { - // client capabilities : int<4> - buf.put_u32::(self.capabilities.bits() as u32); - - // max packet size : int<4> - buf.put_u32::(self.max_packet_size); - - // client character collation : int<1> - buf.put_u8(self.client_collation); - - // reserved : string<19> - buf.advance(19); - - // if not (capabilities & CLIENT_MYSQL) - if !capabilities.contains(Capabilities::CLIENT_MYSQL) { - // extended client capabilities : int<4> - buf.put_u32::((self.capabilities.bits() >> 32) as u32); - } else { - // reserved : int<4> - buf.advance(4); - } - } -} diff --git a/sqlx-core/src/mysql/protocol/decode.rs b/sqlx-core/src/mysql/protocol/decode.rs new file mode 100644 index 00000000..5a8dd601 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/decode.rs @@ -0,0 +1,7 @@ +use std::io; + +pub trait Decode { + fn decode(buf: &[u8]) -> crate::Result + where + Self: Sized; +} diff --git a/sqlx-core/src/mysql/protocol/encode.rs b/sqlx-core/src/mysql/protocol/encode.rs index deb888cf..1781acbe 100644 --- a/sqlx-core/src/mysql/protocol/encode.rs +++ b/sqlx-core/src/mysql/protocol/encode.rs @@ -1,4 +1,4 @@ -use super::Capabilities; +use crate::mysql::protocol::Capabilities; pub trait Encode { fn encode(&self, buf: &mut Vec, capabilities: Capabilities); diff --git a/sqlx-core/src/mysql/protocol/eof.rs b/sqlx-core/src/mysql/protocol/eof.rs new file mode 100644 index 00000000..e01c3743 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/eof.rs @@ -0,0 +1,52 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::{Capabilities, Decode, Status}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_eof_packet.html +// https://mariadb.com/kb/en/eof_packet/ +#[derive(Debug)] +pub struct EofPacket { + warnings: u16, + status: Status, +} + +impl Decode for EofPacket { + fn decode(mut buf: &[u8]) -> crate::Result + where + Self: Sized, + { + let header = buf.get_u8()?; + if header != 0xFE { + return Err(protocol_err!( + "expected EOF (0xFE); received 0x{:X}", + header + ))?; + } + + let warnings = buf.get_u16::()?; + let status = buf.get_u16::()?; + + Ok(Self { + warnings, + status: Status::from_bits_truncate(status), + }) + } +} + +//#[cfg(test)] +//mod tests { +// use super::{Capabilities, Decode, ErrPacket, Status}; +// +// const ERR_HANDSHAKE_UNKNOWN_DB: &[u8] = b"\xff\x19\x04#42000Unknown database \'unknown\'"; +// +// #[test] +// fn it_decodes_ok_handshake() { +// let mut p = ErrPacket::decode(ERR_HANDSHAKE_UNKNOWN_DB).unwrap(); +// +// assert_eq!(p.error_code, 1049); +// assert_eq!(&*p.sql_state, "42000"); +// assert_eq!(&*p.error_message, "Unknown database \'unknown\'"); +// } +//} diff --git a/sqlx-core/src/mysql/protocol/err.rs b/sqlx-core/src/mysql/protocol/err.rs new file mode 100644 index 00000000..5aa70a28 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/err.rs @@ -0,0 +1,55 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::{Capabilities, Decode, Status}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_err_packet.html +// https://mariadb.com/kb/en/err_packet/ +#[derive(Debug)] +pub struct ErrPacket { + pub error_code: u16, + pub sql_state: Box, + pub error_message: Box, +} + +impl Decode for ErrPacket { + fn decode(mut buf: &[u8]) -> crate::Result + where + Self: Sized, + { + let header = buf.get_u8()?; + if header != 0xFF { + return Err(protocol_err!("expected 0xFF; received 0x{:X}", header))?; + } + + let error_code = buf.get_u16::()?; + + let _sql_state_marker: u8 = buf.get_u8()?; + let sql_state = buf.get_str(5)?.into(); + + let error_message = buf.get_str(buf.len())?.into(); + + Ok(Self { + error_code, + sql_state, + error_message, + }) + } +} + +#[cfg(test)] +mod tests { + use super::{Capabilities, Decode, ErrPacket, Status}; + + const ERR_HANDSHAKE_UNKNOWN_DB: &[u8] = b"\xff\x19\x04#42000Unknown database \'unknown\'"; + + #[test] + fn it_decodes_ok_handshake() { + let mut p = ErrPacket::decode(ERR_HANDSHAKE_UNKNOWN_DB).unwrap(); + + assert_eq!(p.error_code, 1049); + assert_eq!(&*p.sql_state, "42000"); + assert_eq!(&*p.error_message, "Unknown database \'unknown\'"); + } +} diff --git a/sqlx-core/src/mysql/protocol/error_code.rs b/sqlx-core/src/mysql/protocol/error_code.rs deleted file mode 100644 index 06557a88..00000000 --- a/sqlx-core/src/mysql/protocol/error_code.rs +++ /dev/null @@ -1,997 +0,0 @@ -use std::fmt; - -#[derive(Default, Debug)] -pub struct ErrorCode(pub(crate) u16); - -use crate::error::DatabaseError; - -macro_rules! error_code_impl { - ($(const $name:ident: ErrorCode = ErrorCode($code:expr));*;) => { - impl ErrorCode { - $(const $name: ErrorCode = ErrorCode($code);)* - - pub fn code_name(&self) -> &'static str { - match self.0 { - $($code => stringify!($name),)* - _ => "" - } - } - } - } -} - -impl fmt::Display for ErrorCode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} ({})", self.code_name(), self.0) - } -} - -// Values from https://mariadb.com/kb/en/library/mysql-error-codes/ -error_code_impl! { - const ER_ABORTING_CONNECTION: ErrorCode = ErrorCode(1152); - const ER_ACCESS_DENIED_CHANGE_USER_ERROR: ErrorCode = ErrorCode(1873); - const ER_ACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1045); - const ER_ACCESS_DENIED_NO_PASSWORD_ERROR: ErrorCode = ErrorCode(1698); - const ER_ADD_PARTITION_NO_NEW_PARTITION: ErrorCode = ErrorCode(1514); - const ER_ADD_PARTITION_SUBPART_ERROR: ErrorCode = ErrorCode(1513); - const ER_ADMIN_WRONG_MRG_TABLE: ErrorCode = ErrorCode(1472); - const ER_AES_INVALID_IV: ErrorCode = ErrorCode(1882); - const ER_ALTER_FILEGROUP_FAILED: ErrorCode = ErrorCode(1533); - const ER_ALTER_INF: ErrorCode = ErrorCode(1088); - const ER_ALTER_OPERATION_NOT_SUPPORTED: ErrorCode = ErrorCode(1845); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: ErrorCode = ErrorCode(1846); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC: ErrorCode = ErrorCode(1854); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS: ErrorCode = ErrorCode(1856); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE: ErrorCode = ErrorCode(1850); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY: ErrorCode = ErrorCode(1847); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK: ErrorCode = ErrorCode(1851); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME: ErrorCode = ErrorCode(1849); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS: ErrorCode = ErrorCode(1857); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS: ErrorCode = ErrorCode(1855); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE: ErrorCode = ErrorCode(1852); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK: ErrorCode = ErrorCode(1853); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL: ErrorCode = ErrorCode(1861); - const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION: ErrorCode = ErrorCode(1848); - const ER_AMBIGUOUS_FIELD_TERM: ErrorCode = ErrorCode(1475); - const ER_AUTOINC_READ_FAILED: ErrorCode = ErrorCode(1467); - const ER_AUTO_CONVERT: ErrorCode = ErrorCode(1246); - const ER_AUTO_INCREMENT_CONFLICT: ErrorCode = ErrorCode(1869); - const ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON: ErrorCode = ErrorCode(1777); - const ER_BAD_BASE64_DATA: ErrorCode = ErrorCode(1958); - const ER_BAD_DATA: ErrorCode = ErrorCode(1918); - const ER_BAD_DB_ERROR: ErrorCode = ErrorCode(1049); - const ER_BAD_FIELD_ERROR: ErrorCode = ErrorCode(1054); - const ER_BAD_FT_COLUMN: ErrorCode = ErrorCode(1283); - const ER_BAD_HOST_ERROR: ErrorCode = ErrorCode(1042); - const ER_BAD_LOG_STATEMENT: ErrorCode = ErrorCode(1580); - const ER_BAD_NULL_ERROR: ErrorCode = ErrorCode(1048); - const ER_BAD_OPTION_VALUE: ErrorCode = ErrorCode(1912); - const ER_BAD_SLAVE: ErrorCode = ErrorCode(1200); - const ER_BAD_SLAVE_AUTO_POSITION: ErrorCode = ErrorCode(1776); - const ER_BAD_SLAVE_UNTIL_COND: ErrorCode = ErrorCode(1277); - const ER_BAD_TABLE_ERROR: ErrorCode = ErrorCode(1051); - const ER_BASE64_DECODE_ERROR: ErrorCode = ErrorCode(1575); - const ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX: ErrorCode = ErrorCode(1738); - const ER_BINLOG_CREATE_ROUTINE_NEED_SUPER: ErrorCode = ErrorCode(1419); - const ER_BINLOG_LOGGING_IMPOSSIBLE: ErrorCode = ErrorCode(1598); - const ER_BINLOG_LOGICAL_CORRUPTION: ErrorCode = ErrorCode(1866); - const ER_BINLOG_MULTIPLE_ENGINES: ErrorCode = ErrorCode(1667); - const ER_BINLOG_MUST_BE_EMPTY: ErrorCode = ErrorCode(1956); - const ER_BINLOG_PURGE_EMFILE: ErrorCode = ErrorCode(1587); - const ER_BINLOG_PURGE_FATAL_ERR: ErrorCode = ErrorCode(1377); - const ER_BINLOG_PURGE_PROHIBITED: ErrorCode = ErrorCode(1375); - const ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE: ErrorCode = ErrorCode(1744); - const ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1661); - const ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE: ErrorCode = ErrorCode(1664); - const ER_BINLOG_ROW_INJECTION_AND_STMT_MODE: ErrorCode = ErrorCode(1666); - const ER_BINLOG_ROW_LOGGING_FAILED: ErrorCode = ErrorCode(1534); - const ER_BINLOG_ROW_MODE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1662); - const ER_BINLOG_ROW_RBR_TO_SBR: ErrorCode = ErrorCode(1536); - const ER_BINLOG_ROW_WRONG_TABLE_DEF: ErrorCode = ErrorCode(1535); - const ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX: ErrorCode = ErrorCode(1745); - const ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES: ErrorCode = ErrorCode(1844); - const ER_BINLOG_STMT_MODE_AND_ROW_ENGINE: ErrorCode = ErrorCode(1665); - const ER_BINLOG_UNSAFE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1663); - const ER_BINLOG_UNSAFE_AUTOINC_COLUMNS: ErrorCode = ErrorCode(1671); - const ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST: ErrorCode = ErrorCode(1727); - const ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT: ErrorCode = ErrorCode(1717); - const ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT: ErrorCode = ErrorCode(1718); - const ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC: ErrorCode = ErrorCode(1723); - const ER_BINLOG_UNSAFE_INSERT_DELAYED: ErrorCode = ErrorCode(1669); - const ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT: ErrorCode = ErrorCode(1714); - const ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE: ErrorCode = ErrorCode(1715); - const ER_BINLOG_UNSAFE_INSERT_TWO_KEYS: ErrorCode = ErrorCode(1724); - const ER_BINLOG_UNSAFE_LIMIT: ErrorCode = ErrorCode(1668); - const ER_BINLOG_UNSAFE_MIXED_STATEMENT: ErrorCode = ErrorCode(1693); - const ER_BINLOG_UNSAFE_MULTIPLE_ENGINES: ErrorCode = ErrorCode(1692); - const ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS: ErrorCode = ErrorCode(1675); - const ER_BINLOG_UNSAFE_REPLACE_SELECT: ErrorCode = ErrorCode(1716); - const ER_BINLOG_UNSAFE_ROUTINE: ErrorCode = ErrorCode(1418); - const ER_BINLOG_UNSAFE_STATEMENT: ErrorCode = ErrorCode(1592); - const ER_BINLOG_UNSAFE_SYSTEM_FUNCTION: ErrorCode = ErrorCode(1674); - const ER_BINLOG_UNSAFE_SYSTEM_TABLE: ErrorCode = ErrorCode(1670); - const ER_BINLOG_UNSAFE_SYSTEM_VARIABLE: ErrorCode = ErrorCode(1673); - const ER_BINLOG_UNSAFE_UDF: ErrorCode = ErrorCode(1672); - const ER_BINLOG_UNSAFE_UPDATE_IGNORE: ErrorCode = ErrorCode(1719); - const ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT: ErrorCode = ErrorCode(1722); - const ER_BLOBS_AND_NO_TERMINATED: ErrorCode = ErrorCode(1084); - const ER_BLOB_CANT_HAVE_DEFAULT: ErrorCode = ErrorCode(1101); - const ER_BLOB_FIELD_IN_PART_FUNC_ERROR: ErrorCode = ErrorCode(1502); - const ER_BLOB_KEY_WITHOUT_LENGTH: ErrorCode = ErrorCode(1170); - const ER_BLOB_USED_AS_KEY: ErrorCode = ErrorCode(1073); - const ER_CANNOT_ADD_FOREIGN: ErrorCode = ErrorCode(1215); - const ER_CANNOT_CONVERT_CHARACTER: ErrorCode = ErrorCode(1977); - const ER_CANNOT_GRANT_ROLE: ErrorCode = ErrorCode(1961); - const ER_CANNOT_LOAD_FROM_TABLE: ErrorCode = ErrorCode(1548); - const ER_CANNOT_LOAD_FROM_TABLE_V2: ErrorCode = ErrorCode(1728); - const ER_CANNOT_LOAD_SLAVE_GTID_STATE: ErrorCode = ErrorCode(1946); - const ER_CANNOT_REVOKE_ROLE: ErrorCode = ErrorCode(1962); - const ER_CANNOT_UPDATE_GTID_STATE: ErrorCode = ErrorCode(1942); - const ER_CANNOT_USER: ErrorCode = ErrorCode(1396); - const ER_CANT_ACTIVATE_LOG: ErrorCode = ErrorCode(1573); - const ER_CANT_AGGREGATE2_COLLATIONS: ErrorCode = ErrorCode(1267); - const ER_CANT_AGGREGATE3_COLLATIONS: ErrorCode = ErrorCode(1270); - const ER_CANT_AGGREGATE_NCOLLATIONS: ErrorCode = ErrorCode(1271); - const ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL: ErrorCode = - ErrorCode(1768); - const ER_CANT_CHANGE_TX_ISOLATION: ErrorCode = ErrorCode(1568); - const ER_CANT_CREATE_DB: ErrorCode = ErrorCode(1006); - const ER_CANT_CREATE_FEDERATED_TABLE: ErrorCode = ErrorCode(1434); - const ER_CANT_CREATE_FILE: ErrorCode = ErrorCode(1004); - const ER_CANT_CREATE_GEOMETRY_OBJECT: ErrorCode = ErrorCode(1416); - const ER_CANT_CREATE_HANDLER_FILE: ErrorCode = ErrorCode(1501); - const ER_CANT_CREATE_SROUTINE: ErrorCode = ErrorCode(1607); - const ER_CANT_CREATE_TABLE: ErrorCode = ErrorCode(1005); - const ER_CANT_CREATE_THREAD: ErrorCode = ErrorCode(1135); - const ER_CANT_CREATE_USER_WITH_GRANT: ErrorCode = ErrorCode(1410); - const ER_CANT_DELETE_FILE: ErrorCode = ErrorCode(1011); - const ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET: ErrorCode = ErrorCode(1778); - const ER_CANT_DO_ONLINE: ErrorCode = ErrorCode(1915); - const ER_CANT_DO_THIS_DURING_AN_TRANSACTION: ErrorCode = ErrorCode(1179); - const ER_CANT_DROP_FIELD_OR_KEY: ErrorCode = ErrorCode(1091); - const ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: ErrorCode = ErrorCode(1792); - const ER_CANT_FIND_DL_ENTRY: ErrorCode = ErrorCode(1127); - const ER_CANT_FIND_SYSTEM_REC: ErrorCode = ErrorCode(1012); - const ER_CANT_FIND_UDF: ErrorCode = ErrorCode(1122); - const ER_CANT_GET_STAT: ErrorCode = ErrorCode(1013); - const ER_CANT_GET_WD: ErrorCode = ErrorCode(1014); - const ER_CANT_INITIALIZE_UDF: ErrorCode = ErrorCode(1123); - const ER_CANT_LOCK: ErrorCode = ErrorCode(1015); - const ER_CANT_LOCK_LOG_TABLE: ErrorCode = ErrorCode(1556); - const ER_CANT_OPEN_FILE: ErrorCode = ErrorCode(1016); - const ER_CANT_OPEN_LIBRARY: ErrorCode = ErrorCode(1126); - const ER_CANT_READ_DIR: ErrorCode = ErrorCode(1018); - const ER_CANT_REMOVE_ALL_FIELDS: ErrorCode = ErrorCode(1090); - const ER_CANT_RENAME_LOG_TABLE: ErrorCode = ErrorCode(1581); - const ER_CANT_REOPEN_TABLE: ErrorCode = ErrorCode(1137); - const ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1783); - const ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON: ErrorCode = ErrorCode(1782); - const ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1781); - const ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID: ErrorCode = ErrorCode(1790); - const ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY: ErrorCode = ErrorCode(1840); - const ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1839); - const ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY: ErrorCode = ErrorCode(1841); - const ER_CANT_SET_WD: ErrorCode = ErrorCode(1019); - const ER_CANT_START_STOP_SLAVE: ErrorCode = ErrorCode(1936); - const ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT: ErrorCode = ErrorCode(1746); - const ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG: ErrorCode = ErrorCode(1442); - const ER_CANT_UPDATE_WITH_READLOCK: ErrorCode = ErrorCode(1223); - const ER_CANT_USE_OPTION_HERE: ErrorCode = ErrorCode(1234); - const ER_CANT_WRITE_LOCK_LOG_TABLE: ErrorCode = ErrorCode(1555); - const ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE: ErrorCode = ErrorCode(1750); - const ER_CHANGE_SLAVE_PARALLEL_THREADS_ACTIVE: ErrorCode = ErrorCode(1963); - const ER_CHECKREAD: ErrorCode = ErrorCode(1020); - const ER_CHECK_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1178); - const ER_CHECK_NO_SUCH_TABLE: ErrorCode = ErrorCode(1177); - const ER_COALESCE_ONLY_ON_HASH_PARTITION: ErrorCode = ErrorCode(1509); - const ER_COALESCE_PARTITION_NO_PARTITION: ErrorCode = ErrorCode(1515); - const ER_COLLATION_CHARSET_MISMATCH: ErrorCode = ErrorCode(1253); - const ER_COLUMNACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1143); - const ER_COL_COUNT_DOESNT_MATCH_CORRUPTED: ErrorCode = ErrorCode(1547); - const ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2: ErrorCode = ErrorCode(1805); - const ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE: ErrorCode = ErrorCode(1558); - const ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG: ErrorCode = ErrorCode(1422); - const ER_COND_ITEM_TOO_LONG: ErrorCode = ErrorCode(1648); - const ER_CONFLICTING_DECLARATIONS: ErrorCode = ErrorCode(1302); - const ER_CONFLICT_FN_PARSE_ERROR: ErrorCode = ErrorCode(1626); - const ER_CONNECTION_ALREADY_EXISTS: ErrorCode = ErrorCode(1934); - const ER_CONNECTION_KILLED: ErrorCode = ErrorCode(1927); - const ER_CONNECT_TO_FOREIGN_DATA_SOURCE: ErrorCode = ErrorCode(1429); - const ER_CONNECT_TO_MASTER: ErrorCode = ErrorCode(1218); - const ER_CONSECUTIVE_REORG_PARTITIONS: ErrorCode = ErrorCode(1519); - const ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR: ErrorCode = ErrorCode(1486); - const ER_CONST_EXPR_IN_VCOL: ErrorCode = ErrorCode(1908); - const ER_CON_COUNT_ERROR: ErrorCode = ErrorCode(1040); - const ER_CORRUPT_HELP_DB: ErrorCode = ErrorCode(1244); - const ER_CRASHED_ON_REPAIR: ErrorCode = ErrorCode(1195); - const ER_CRASHED_ON_USAGE: ErrorCode = ErrorCode(1194); - const ER_CREATE_DB_WITH_READ_LOCK: ErrorCode = ErrorCode(1209); - const ER_CREATE_FILEGROUP_FAILED: ErrorCode = ErrorCode(1528); - const ER_CUT_VALUE_GROUP_CONCAT: ErrorCode = ErrorCode(1260); - const ER_CYCLIC_REFERENCE: ErrorCode = ErrorCode(1245); - const ER_DATABASE_NAME: ErrorCode = ErrorCode(1631); - const ER_DATA_CONVERSION_ERROR_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1902); - const ER_DATA_OUT_OF_RANGE: ErrorCode = ErrorCode(1690); - const ER_DATA_OVERFLOW: ErrorCode = ErrorCode(1916); - const ER_DATA_TOO_LONG: ErrorCode = ErrorCode(1406); - const ER_DATA_TRUNCATED: ErrorCode = ErrorCode(1917); - const ER_DATETIME_FUNCTION_OVERFLOW: ErrorCode = ErrorCode(1441); - const ER_DA_INVALID_CONDITION_NUMBER: ErrorCode = ErrorCode(1758); - const ER_DBACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1044); - const ER_DB_CREATE_EXISTS: ErrorCode = ErrorCode(1007); - const ER_DB_DROP_DELETE: ErrorCode = ErrorCode(1009); - const ER_DB_DROP_EXISTS: ErrorCode = ErrorCode(1008); - const ER_DB_DROP_RMDIR: ErrorCode = ErrorCode(1010); - const ER_DDL_LOG_ERROR: ErrorCode = ErrorCode(1565); - const ER_DEBUG_SYNC_HIT_LIMIT: ErrorCode = ErrorCode(1640); - const ER_DEBUG_SYNC_TIMEOUT: ErrorCode = ErrorCode(1639); - const ER_DELAYED_CANT_CHANGE_LOCK: ErrorCode = ErrorCode(1150); - const ER_DELAYED_INSERT_TABLE_LOCKED: ErrorCode = ErrorCode(1165); - const ER_DELAYED_NOT_SUPPORTED: ErrorCode = ErrorCode(1616); - const ER_DERIVED_MUST_HAVE_ALIAS: ErrorCode = ErrorCode(1248); - const ER_DIFF_GROUPS_PROC: ErrorCode = ErrorCode(1384); - const ER_DISCARD_FK_CHECKS_RUNNING: ErrorCode = ErrorCode(1807); - const ER_DISK_FULL: ErrorCode = ErrorCode(1021); - const ER_DIVISION_BY_ZER: ErrorCode = ErrorCode(1365); - const ER_DROP_DB_WITH_READ_LOCK: ErrorCode = ErrorCode(1208); - const ER_DROP_FILEGROUP_FAILED: ErrorCode = ErrorCode(1529); - const ER_DROP_INDEX_FK: ErrorCode = ErrorCode(1553); - const ER_DROP_LAST_PARTITION: ErrorCode = ErrorCode(1508); - const ER_DROP_PARTITION_NON_EXISTENT: ErrorCode = ErrorCode(1507); - const ER_DROP_USER: ErrorCode = ErrorCode(1268); - const ER_DUMP_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1185); - const ER_DUPLICATED_VALUE_IN_TYPE: ErrorCode = ErrorCode(1291); - const ER_DUPLICATE_GTID_DOMAIN: ErrorCode = ErrorCode(1943); - const ER_DUP_ARGUMENT: ErrorCode = ErrorCode(1225); - const ER_DUP_ENTRY: ErrorCode = ErrorCode(1062); - const ER_DUP_ENTRY_AUTOINCREMENT_CASE: ErrorCode = ErrorCode(1569); - const ER_DUP_ENTRY_WITH_KEY_NAME: ErrorCode = ErrorCode(1586); - const ER_DUP_FIELDNAME: ErrorCode = ErrorCode(1060); - const ER_DUP_INDEX: ErrorCode = ErrorCode(1831); - const ER_DUP_KEY: ErrorCode = ErrorCode(1022); - const ER_DUP_KEYNAME: ErrorCode = ErrorCode(1061); - const ER_DUP_SIGNAL_SET: ErrorCode = ErrorCode(1641); - const ER_DUP_UNIQUE: ErrorCode = ErrorCode(1169); - const ER_DUP_UNKNOWN_IN_INDEX: ErrorCode = ErrorCode(1859); - const ER_DYN_COL_DATA: ErrorCode = ErrorCode(1921); - const ER_DYN_COL_IMPLEMENTATION_LIMIT: ErrorCode = ErrorCode(1920); - const ER_DYN_COL_WRONG_CHARSET: ErrorCode = ErrorCode(1922); - const ER_DYN_COL_WRONG_FORMAT: ErrorCode = ErrorCode(1919); - const ER_EMPTY_QUERY: ErrorCode = ErrorCode(1065); - const ER_ERROR_DURING_CHECKPOINT: ErrorCode = ErrorCode(1183); - const ER_ERROR_DURING_COMMIT: ErrorCode = ErrorCode(1180); - const ER_ERROR_DURING_FLUSH_LOGS: ErrorCode = ErrorCode(1182); - const ER_ERROR_DURING_ROLLBACK: ErrorCode = ErrorCode(1181); - const ER_ERROR_IN_TRIGGER_BODY: ErrorCode = ErrorCode(1710); - const ER_ERROR_IN_UNKNOWN_TRIGGER_BODY: ErrorCode = ErrorCode(1711); - const ER_ERROR_ON_CLOSE: ErrorCode = ErrorCode(1023); - const ER_ERROR_ON_READ: ErrorCode = ErrorCode(1024); - const ER_ERROR_ON_RENAME: ErrorCode = ErrorCode(1025); - const ER_ERROR_ON_WRITE: ErrorCode = ErrorCode(1026); - const ER_ERROR_WHEN_EXECUTING_COMMAND: ErrorCode = ErrorCode(1220); - const ER_EVENTS_DB_ERROR: ErrorCode = ErrorCode(1577); - const ER_EVENT_ALREADY_EXISTS: ErrorCode = ErrorCode(1537); - const ER_EVENT_CANNOT_ALTER_IN_THE_PAST: ErrorCode = ErrorCode(1589); - const ER_EVENT_CANNOT_CREATE_IN_THE_PAST: ErrorCode = ErrorCode(1588); - const ER_EVENT_CANNOT_DELETE: ErrorCode = ErrorCode(1549); - const ER_EVENT_CANT_ALTER: ErrorCode = ErrorCode(1540); - const ER_EVENT_COMPILE_ERROR: ErrorCode = ErrorCode(1550); - const ER_EVENT_DATA_TOO_LONG: ErrorCode = ErrorCode(1552); - const ER_EVENT_DOES_NOT_EXIST: ErrorCode = ErrorCode(1539); - const ER_EVENT_DROP_FAILED: ErrorCode = ErrorCode(1541); - const ER_EVENT_ENDS_BEFORE_STARTS: ErrorCode = ErrorCode(1543); - const ER_EVENT_EXEC_TIME_IN_THE_PAST: ErrorCode = ErrorCode(1544); - const ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG: ErrorCode = ErrorCode(1542); - const ER_EVENT_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1605); - const ER_EVENT_MODIFY_QUEUE_ERROR: ErrorCode = ErrorCode(1570); - const ER_EVENT_NEITHER_M_EXPR_NOR_M_AT: ErrorCode = ErrorCode(1546); - const ER_EVENT_OPEN_TABLE_FAILED: ErrorCode = ErrorCode(1545); - const ER_EVENT_RECURSION_FORBIDDEN: ErrorCode = ErrorCode(1576); - const ER_EVENT_SAME_NAME: ErrorCode = ErrorCode(1551); - const ER_EVENT_SET_VAR_ERROR: ErrorCode = ErrorCode(1571); - const ER_EVENT_STORE_FAILED: ErrorCode = ErrorCode(1538); - const ER_EXCEPTIONS_WRITE_ERROR: ErrorCode = ErrorCode(1627); - const ER_EXEC_STMT_WITH_OPEN_CURSOR: ErrorCode = ErrorCode(1420); - const ER_FAILED_GTID_STATE_INIT: ErrorCode = ErrorCode(1940); - const ER_FAILED_READ_FROM_PAR_FILE: ErrorCode = ErrorCode(1696); - const ER_FAILED_ROUTINE_BREAK_BINLOG: ErrorCode = ErrorCode(1417); - const ER_FEATURE_DISABLED: ErrorCode = ErrorCode(1289); - const ER_FIELD_NOT_FOUND_PART_ERROR: ErrorCode = ErrorCode(1488); - const ER_FIELD_SPECIFIED_TWICE: ErrorCode = ErrorCode(1110); - const ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD: ErrorCode = ErrorCode(1659); - const ER_FILEGROUP_OPTION_ONLY_ONCE: ErrorCode = ErrorCode(1527); - const ER_FILE_EXISTS_ERROR: ErrorCode = ErrorCode(1086); - const ER_FILE_NOT_FOUND: ErrorCode = ErrorCode(1017); - const ER_FILE_USED: ErrorCode = ErrorCode(1027); - const ER_FILSORT_ABORT: ErrorCode = ErrorCode(1028); - const ER_FK_CANNOT_DELETE_PARENT: ErrorCode = ErrorCode(1834); - const ER_FK_CANNOT_OPEN_PARENT: ErrorCode = ErrorCode(1824); - const ER_FK_COLUMN_CANNOT_CHANGE: ErrorCode = ErrorCode(1832); - const ER_FK_COLUMN_CANNOT_CHANGE_CHILD: ErrorCode = ErrorCode(1833); - const ER_FK_COLUMN_CANNOT_DROP: ErrorCode = ErrorCode(1828); - const ER_FK_COLUMN_CANNOT_DROP_CHILD: ErrorCode = ErrorCode(1829); - const ER_FK_COLUMN_NOT_NULL: ErrorCode = ErrorCode(1830); - const ER_FK_DUP_NAME: ErrorCode = ErrorCode(1826); - const ER_FK_FAIL_ADD_SYSTEM: ErrorCode = ErrorCode(1823); - const ER_FK_INCORRECT_OPTION: ErrorCode = ErrorCode(1825); - const ER_FK_NO_INDEX_CHILD: ErrorCode = ErrorCode(1821); - const ER_FK_NO_INDEX_PARENT: ErrorCode = ErrorCode(1822); - const ER_FLUSH_MASTER_BINLOG_CLOSED: ErrorCode = ErrorCode(1186); - const ER_FORBID_SCHEMA_CHANGE: ErrorCode = ErrorCode(1450); - const ER_FORCING_CLOSE: ErrorCode = ErrorCode(1080); - const ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST: ErrorCode = ErrorCode(1431); - const ER_FOREIGN_DATA_STRING_INVALID: ErrorCode = ErrorCode(1433); - const ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE: ErrorCode = ErrorCode(1432); - const ER_FOREIGN_DUPLICATE_KEY: ErrorCode = ErrorCode(1557); - const ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: ErrorCode = ErrorCode(1762); - const ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: ErrorCode = ErrorCode(1761); - const ER_FOREIGN_KEY_ON_PARTITIONED: ErrorCode = ErrorCode(1506); - const ER_FOREIGN_SERVER_DOESNT_EXIST: ErrorCode = ErrorCode(1477); - const ER_FOREIGN_SERVER_EXISTS: ErrorCode = ErrorCode(1476); - const ER_FORM_NOT_FOUND: ErrorCode = ErrorCode(1029); - const ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1784); - const ER_FPARSER_BAD_HEADER: ErrorCode = ErrorCode(1341); - const ER_FPARSER_EOF_IN_COMMENT: ErrorCode = ErrorCode(1342); - const ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER: ErrorCode = ErrorCode(1344); - const ER_FPARSER_ERROR_IN_PARAMETER: ErrorCode = ErrorCode(1343); - const ER_FPARSER_TOO_BIG_FILE: ErrorCode = ErrorCode(1340); - const ER_FRM_UNKNOWN_TYPE: ErrorCode = ErrorCode(1346); - const ER_FSEEK_FAIL: ErrorCode = ErrorCode(1376); - const ER_FT_MATCHING_KEY_NOT_FOUND: ErrorCode = ErrorCode(1191); - const ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING: ErrorCode = ErrorCode(1757); - const ER_FUNCTION_NOT_DEFINED: ErrorCode = ErrorCode(1128); - const ER_FUNC_INEXISTENT_NAME_COLLISION: ErrorCode = ErrorCode(1630); - const ER_GET_ERRMSG: ErrorCode = ErrorCode(1296); - const ER_GET_ERRN: ErrorCode = ErrorCode(1030); - const ER_GET_TEMPORARY_ERRMSG: ErrorCode = ErrorCode(1297); - const ER_GLOBAL_VARIABLE: ErrorCode = ErrorCode(1229); - const ER_GNO_EXHAUSTED: ErrorCode = ErrorCode(1775); - const ER_GOT_SIGNAL: ErrorCode = ErrorCode(1078); - const ER_GRANT_PLUGIN_USER_EXISTS: ErrorCode = ErrorCode(1700); - const ER_GRANT_WRONG_HOST_OR_USER: ErrorCode = ErrorCode(1145); - const ER_GTID_EXECUTED_WAS_CHANGED: ErrorCode = ErrorCode(1843); - const ER_GTID_MODE2_OR3_REQUIRES_DISABLE_GTID_UNSAFE_STATEMENTS_ON: ErrorCode = ErrorCode(1779); - const ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME: ErrorCode = ErrorCode(1788); - const ER_GTID_MODE_REQUIRES_BINLOG: ErrorCode = ErrorCode(1780); - const ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL: ErrorCode = ErrorCode(1770); - const ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST: ErrorCode = ErrorCode(1767); - const ER_GTID_NEXT_TYPE_UNDEFINED_GROUP: ErrorCode = ErrorCode(1837); - const ER_GTID_OPEN_TABLE_FAILED: ErrorCode = ErrorCode(1944); - const ER_GTID_POSITION_NOT_FOUND_IN_BINLOG: ErrorCode = ErrorCode(1945); - const ER_GTID_POSITION_NOT_FOUND_IN_BINLOG2: ErrorCode = ErrorCode(1955); - const ER_GTID_PURGED_WAS_CHANGED: ErrorCode = ErrorCode(1842); - const ER_GTID_START_FROM_BINLOG_HOLE: ErrorCode = ErrorCode(1951); - const ER_GTID_STRICT_OUT_OF_ORDER: ErrorCode = ErrorCode(1950); - const ER_GTID_UNSAFE_BINLOG_SPLITTABLE_STATEMENT_AND_GTID_GROUP: ErrorCode = ErrorCode(1884); - const ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION: ErrorCode = ErrorCode(1787); - const ER_GTID_UNSAFE_CREATE_SELECT: ErrorCode = ErrorCode(1786); - const ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE: ErrorCode = ErrorCode(1785); - const ER_HANDSHAKE_ERROR: ErrorCode = ErrorCode(1043); - const ER_HASHCHK: ErrorCode = ErrorCode(1000); - const ER_HOSTNAME: ErrorCode = ErrorCode(1469); - const ER_HOST_IS_BLOCKED: ErrorCode = ErrorCode(1129); - const ER_HOST_NOT_PRIVILEGED: ErrorCode = ErrorCode(1130); - const ER_IDENT_CAUSES_TOO_LONG_PATH: ErrorCode = ErrorCode(1860); - const ER_ILLEGAL_GRANT_FOR_TABLE: ErrorCode = ErrorCode(1144); - const ER_ILLEGAL_HA: ErrorCode = ErrorCode(1031); - const ER_ILLEGAL_HA_CREATE_OPTION: ErrorCode = ErrorCode(1478); - const ER_ILLEGAL_REFERENCE: ErrorCode = ErrorCode(1247); - const ER_ILLEGAL_SUBQUERY_OPTIMIZER_SWITCHES: ErrorCode = ErrorCode(1923); - const ER_ILLEGAL_VALUE_FOR_TYPE: ErrorCode = ErrorCode(1367); - const ER_INCONSISTENT_PARTITION_INFO_ERROR: ErrorCode = ErrorCode(1490); - const ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR: ErrorCode = ErrorCode(1494); - const ER_INCORRECT_GLOBAL_LOCAL_VAR: ErrorCode = ErrorCode(1238); - const ER_INCORRECT_GTID_STATE: ErrorCode = ErrorCode(1941); - const ER_INDEX_COLUMN_TOO_LONG: ErrorCode = ErrorCode(1709); - const ER_INDEX_CORRUPT: ErrorCode = ErrorCode(1712); - const ER_INDEX_REBUILD: ErrorCode = ErrorCode(1187); - const ER_INNODB_FORCED_RECOVERY: ErrorCode = ErrorCode(1881); - const ER_INNODB_FT_AUX_NOT_HEX_ID: ErrorCode = ErrorCode(1879); - const ER_INNODB_FT_LIMIT: ErrorCode = ErrorCode(1795); - const ER_INNODB_FT_WRONG_DOCID_COLUMN: ErrorCode = ErrorCode(1797); - const ER_INNODB_FT_WRONG_DOCID_INDEX: ErrorCode = ErrorCode(1798); - const ER_INNODB_IMPORT_ERROR: ErrorCode = ErrorCode(1816); - const ER_INNODB_INDEX_CORRUPT: ErrorCode = ErrorCode(1817); - const ER_INNODB_NO_FT_TEMP_TABLE: ErrorCode = ErrorCode(1796); - const ER_INNODB_NO_FT_USES_PARSER: ErrorCode = ErrorCode(1865); - const ER_INNODB_ONLINE_LOG_TOO_BIG: ErrorCode = ErrorCode(1799); - const ER_INNODB_READ_ONLY: ErrorCode = ErrorCode(1874); - const ER_INSECURE_CHANGE_MASTER: ErrorCode = ErrorCode(1760); - const ER_INSECURE_PLAIN_TEXT: ErrorCode = ErrorCode(1759); - const ER_INSERT_INF: ErrorCode = ErrorCode(1092); - const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT: ErrorCode = ErrorCode(1685); - const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1679); - const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO: ErrorCode = ErrorCode(1953); - const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION: ErrorCode = ErrorCode(1929); - const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN: ErrorCode = ErrorCode(1694); - const ER_INTERNAL_ERROR: ErrorCode = ErrorCode(1815); - const ER_INVALID_CHARACTER_STRING: ErrorCode = ErrorCode(1300); - const ER_INVALID_CURRENT_USER: ErrorCode = ErrorCode(1960); - const ER_INVALID_DEFAULT: ErrorCode = ErrorCode(1067); - const ER_INVALID_DEFAULT_VALUE_FOR_FIELD: ErrorCode = ErrorCode(1978); - const ER_INVALID_GROUP_FUNC_USE: ErrorCode = ErrorCode(1111); - const ER_INVALID_ON_UPDATE: ErrorCode = ErrorCode(1294); - const ER_INVALID_ROLE: ErrorCode = ErrorCode(1959); - const ER_INVALID_USE_OF_NULL: ErrorCode = ErrorCode(1138); - const ER_INVALID_YEAR_COLUMN_LENGTH: ErrorCode = ErrorCode(1818); - const ER_IO_ERR_LOG_INDEX_READ: ErrorCode = ErrorCode(1374); - const ER_IO_READ_ERROR: ErrorCode = ErrorCode(1810); - const ER_IO_WRITE_ERROR: ErrorCode = ErrorCode(1811); - const ER_IPSOCK_ERROR: ErrorCode = ErrorCode(1081); - const ER_IT_IS_A_VIEW: ErrorCode = ErrorCode(1965); - const ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1904); - const ER_KEY_COLUMN_DOES_NOT_EXITS: ErrorCode = ErrorCode(1072); - const ER_KEY_DOES_NOT_EXITS: ErrorCode = ErrorCode(1176); - const ER_KEY_NOT_FOUND: ErrorCode = ErrorCode(1032); - const ER_KEY_PART0: ErrorCode = ErrorCode(1391); - const ER_KEY_REF_DO_NOT_MATCH_TABLE_REF: ErrorCode = ErrorCode(1240); - const ER_KILL_DENIED_ERROR: ErrorCode = ErrorCode(1095); - const ER_KILL_QUERY_DENIED_ERROR: ErrorCode = ErrorCode(1979); - const ER_LIMITED_PART_RANGE: ErrorCode = ErrorCode(1523); - const ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR: ErrorCode = ErrorCode(1489); - const ER_LOAD_DATA_INVALID_COLUMN: ErrorCode = ErrorCode(1611); - const ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR: ErrorCode = ErrorCode(1409); - const ER_LOAD_INF: ErrorCode = ErrorCode(1087); - const ER_LOCAL_VARIABLE: ErrorCode = ErrorCode(1228); - const ER_LOCK_ABORTED: ErrorCode = ErrorCode(1689); - const ER_LOCK_DEADLOCK: ErrorCode = ErrorCode(1213); - const ER_LOCK_OR_ACTIVE_TRANSACTION: ErrorCode = ErrorCode(1192); - const ER_LOCK_TABLE_FULL: ErrorCode = ErrorCode(1206); - const ER_LOCK_WAIT_TIMEOUT: ErrorCode = ErrorCode(1205); - const ER_LOGGING_PROHIBIT_CHANGING_OF: ErrorCode = ErrorCode(1387); - const ER_LOG_IN_USE: ErrorCode = ErrorCode(1378); - const ER_LOG_PURGE_NO_FILE: ErrorCode = ErrorCode(1612); - const ER_LOG_PURGE_UNKNOWN_ERR: ErrorCode = ErrorCode(1379); - const ER_MALFORMED_DEFINER: ErrorCode = ErrorCode(1446); - const ER_MALFORMED_GTID_SET_ENCODING: ErrorCode = ErrorCode(1773); - const ER_MALFORMED_GTID_SET_SPECIFICATION: ErrorCode = ErrorCode(1772); - const ER_MALFORMED_GTID_SPECIFICATION: ErrorCode = ErrorCode(1774); - const ER_MALFORMED_PACKET: ErrorCode = ErrorCode(1835); - const ER_MASTER: ErrorCode = ErrorCode(1188); - const ER_MASTER_DELAY_VALUE_OUT_OF_RANGE: ErrorCode = ErrorCode(1729); - const ER_MASTER_FATAL_ERROR_READING_BINLOG: ErrorCode = ErrorCode(1236); - const ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG: ErrorCode = ErrorCode(1947); - const ER_MASTER_GTID_POS_MISSING_DOMAIN: ErrorCode = ErrorCode(1948); - const ER_MASTER_HAS_PURGED_REQUIRED_GTIDS: ErrorCode = ErrorCode(1789); - const ER_MASTER_INF: ErrorCode = ErrorCode(1201); - const ER_MASTER_LOG_PREFIX: ErrorCode = ErrorCode(1935); - const ER_MASTER_NET_READ: ErrorCode = ErrorCode(1189); - const ER_MASTER_NET_WRITE: ErrorCode = ErrorCode(1190); - const ER_MAXVALUE_IN_VALUES_IN: ErrorCode = ErrorCode(1656); - const ER_MAX_PREPARED_STMT_COUNT_REACHED: ErrorCode = ErrorCode(1461); - const ER_MESSAGE_AND_STATEMENT: ErrorCode = ErrorCode(1676); - const ER_MISSING_SKIP_SLAVE: ErrorCode = ErrorCode(1278); - const ER_MIXING_NOT_ALLOWED: ErrorCode = ErrorCode(1224); - const ER_MIX_HANDLER_ERROR: ErrorCode = ErrorCode(1497); - const ER_MIX_OF_GROUP_FUNC_AND_FIELDS: ErrorCode = ErrorCode(1140); - const ER_MTS_CANT_PARALLEL: ErrorCode = ErrorCode(1755); - const ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS: ErrorCode = ErrorCode(1802); - const ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX: ErrorCode = ErrorCode(1864); - const ER_MTS_FEATURE_IS_NOT_SUPPORTED: ErrorCode = ErrorCode(1753); - const ER_MTS_INCONSISTENT_DATA: ErrorCode = ErrorCode(1756); - const ER_MTS_RECOVERY_FAILURE: ErrorCode = ErrorCode(1803); - const ER_MTS_RESET_WORKERS: ErrorCode = ErrorCode(1804); - const ER_MTS_UPDATED_DBS_GREATER_MAX: ErrorCode = ErrorCode(1754); - const ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR: ErrorCode = ErrorCode(1495); - const ER_MULTIPLE_PRI_KEY: ErrorCode = ErrorCode(1068); - const ER_MULTI_UPDATE_KEY_CONFLICT: ErrorCode = ErrorCode(1706); - const ER_MUST_CHANGE_PASSWORD: ErrorCode = ErrorCode(1820); - const ER_MUST_CHANGE_PASSWORD_LOGIN: ErrorCode = ErrorCode(1862); - const ER_M_BIGGER_THAN_D: ErrorCode = ErrorCode(1427); - const ER_NAME_BECOMES_EMPTY: ErrorCode = ErrorCode(1474); - const ER_NATIVE_FCT_NAME_COLLISION: ErrorCode = ErrorCode(1585); - const ER_NDB_CANT_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1561); - const ER_NDB_REPLICATION_SCHEMA_ERROR: ErrorCode = ErrorCode(1625); - const ER_NEED_REPREPARE: ErrorCode = ErrorCode(1615); - const ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE: ErrorCode = ErrorCode(1743); - const ER_NET_ERROR_ON_WRITE: ErrorCode = ErrorCode(1160); - const ER_NET_FCNTL_ERROR: ErrorCode = ErrorCode(1155); - const ER_NET_PACKETS_OUT_OF_ORDER: ErrorCode = ErrorCode(1156); - const ER_NET_PACKET_TOO_LARGE: ErrorCode = ErrorCode(1153); - const ER_NET_READ_ERROR: ErrorCode = ErrorCode(1158); - const ER_NET_READ_ERROR_FROM_PIPE: ErrorCode = ErrorCode(1154); - const ER_NET_READ_INTERRUPTED: ErrorCode = ErrorCode(1159); - const ER_NET_UNCOMPRESS_ERROR: ErrorCode = ErrorCode(1157); - const ER_NET_WRITE_INTERRUPTED: ErrorCode = ErrorCode(1161); - const ER_NEW_ABORTING_CONNECTION: ErrorCode = ErrorCode(1184); - const ER_NISAMCHK: ErrorCode = ErrorCode(1001); - const ER_NO: ErrorCode = ErrorCode(1002); - const ER_NONEXISTING_GRANT: ErrorCode = ErrorCode(1141); - const ER_NONEXISTING_PROC_GRANT: ErrorCode = ErrorCode(1403); - const ER_NONEXISTING_TABLE_GRANT: ErrorCode = ErrorCode(1147); - const ER_NONUNIQ_TABLE: ErrorCode = ErrorCode(1066); - const ER_NONUPDATEABLE_COLUMN: ErrorCode = ErrorCode(1348); - const ER_NON_GROUPING_FIELD_USED: ErrorCode = ErrorCode(1463); - const ER_NON_INSERTABLE_TABLE: ErrorCode = ErrorCode(1471); - const ER_NON_UNIQ_ERROR: ErrorCode = ErrorCode(1052); - const ER_NON_UPDATABLE_TABLE: ErrorCode = ErrorCode(1288); - const ER_NORMAL_SHUTDOWN: ErrorCode = ErrorCode(1077); - const ER_NOT_ALLOWED_COMMAND: ErrorCode = ErrorCode(1148); - const ER_NOT_FORM_FILE: ErrorCode = ErrorCode(1033); - const ER_NOT_KEYFILE: ErrorCode = ErrorCode(1034); - const ER_NOT_SUPPORTED_AUTH_MODE: ErrorCode = ErrorCode(1251); - const ER_NOT_SUPPORTED_YET: ErrorCode = ErrorCode(1235); - const ER_NOT_VALID_PASSWORD: ErrorCode = ErrorCode(1819); - const ER_NO_BINARY_LOGGING: ErrorCode = ErrorCode(1381); - const ER_NO_BINLOG_ERROR: ErrorCode = ErrorCode(1518); - const ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR: ErrorCode = ErrorCode(1487); - const ER_NO_DB_ERROR: ErrorCode = ErrorCode(1046); - const ER_NO_DEFAULT: ErrorCode = ErrorCode(1230); - const ER_NO_DEFAULT_FOR_FIELD: ErrorCode = ErrorCode(1364); - const ER_NO_DEFAULT_FOR_VIEW_FIELD: ErrorCode = ErrorCode(1423); - const ER_NO_EIS_FOR_FIELD: ErrorCode = ErrorCode(1980); - const ER_NO_FILE_MAPPING: ErrorCode = ErrorCode(1388); - const ER_NO_FORMAT_DESCRIPTION_EVENT: ErrorCode = ErrorCode(1609); - const ER_NO_GROUP_FOR_PROC: ErrorCode = ErrorCode(1385); - const ER_NO_PARTITION_FOR_GIVEN_VALUE: ErrorCode = ErrorCode(1526); - const ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT: ErrorCode = ErrorCode(1591); - const ER_NO_PARTS_ERROR: ErrorCode = ErrorCode(1504); - const ER_NO_PERMISSION_TO_CREATE_USER: ErrorCode = ErrorCode(1211); - const ER_NO_RAID_COMPILED: ErrorCode = ErrorCode(1174); - const ER_NO_REFERENCED_ROW: ErrorCode = ErrorCode(1216); - const ER_NO_REFERENCED_ROW2: ErrorCode = ErrorCode(1452); - const ER_NO_SUCH_INDEX: ErrorCode = ErrorCode(1082); - const ER_NO_SUCH_KEY_VALUE: ErrorCode = ErrorCode(1741); - const ER_NO_SUCH_PARTITION_UNUSED: ErrorCode = ErrorCode(1749); - const ER_NO_SUCH_QUERY: ErrorCode = ErrorCode(1957); - const ER_NO_SUCH_TABLE: ErrorCode = ErrorCode(1146); - const ER_NO_SUCH_TABLE_IN_ENGINE: ErrorCode = ErrorCode(1932); - const ER_NO_SUCH_THREAD: ErrorCode = ErrorCode(1094); - const ER_NO_SUCH_USER: ErrorCode = ErrorCode(1449); - const ER_NO_TABLES_USED: ErrorCode = ErrorCode(1096); - const ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA: ErrorCode = ErrorCode(1465); - const ER_NO_UNIQUE_LOGFILE: ErrorCode = ErrorCode(1098); - const ER_NULL_COLUMN_IN_INDEX: ErrorCode = ErrorCode(1121); - const ER_NULL_IN_VALUES_LESS_THAN: ErrorCode = ErrorCode(1566); - const ER_OLD_FILE_FORMAT: ErrorCode = ErrorCode(1455); - const ER_OLD_KEYFILE: ErrorCode = ErrorCode(1035); - const ER_OLD_TEMPORALS_UPGRADED: ErrorCode = ErrorCode(1880); - const ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT: ErrorCode = ErrorCode(1730); - const ER_ONLY_INTEGERS_ALLOWED: ErrorCode = ErrorCode(1578); - const ER_ONLY_ON_RANGE_LIST_PARTITION: ErrorCode = ErrorCode(1512); - const ER_OPEN_AS_READONLY: ErrorCode = ErrorCode(1036); - const ER_OPERAND_COLUMNS: ErrorCode = ErrorCode(1241); - const ER_OPTION_PREVENTS_STATEMENT: ErrorCode = ErrorCode(1290); - const ER_ORDER_WITH_PROC: ErrorCode = ErrorCode(1386); - const ER_OUTOFMEMORY: ErrorCode = ErrorCode(1037); - const ER_OUT_OF_RESOURCES: ErrorCode = ErrorCode(1041); - const ER_OUT_OF_SORTMEMORY: ErrorCode = ErrorCode(1038); - const ER_PARSE_ERROR: ErrorCode = ErrorCode(1064); - const ER_PARTITIONS_MUST_BE_DEFINED_ERROR: ErrorCode = ErrorCode(1492); - const ER_PARTITION_CLAUSE_ON_NONPARTITIONED: ErrorCode = ErrorCode(1747); - const ER_PARTITION_COLUMN_LIST_ERROR: ErrorCode = ErrorCode(1653); - const ER_PARTITION_CONST_DOMAIN_ERROR: ErrorCode = ErrorCode(1563); - const ER_PARTITION_ENTRY_ERROR: ErrorCode = ErrorCode(1496); - const ER_PARTITION_EXCHANGE_DIFFERENT_OPTION: ErrorCode = ErrorCode(1731); - const ER_PARTITION_EXCHANGE_FOREIGN_KEY: ErrorCode = ErrorCode(1740); - const ER_PARTITION_EXCHANGE_PART_TABLE: ErrorCode = ErrorCode(1732); - const ER_PARTITION_EXCHANGE_TEMP_TABLE: ErrorCode = ErrorCode(1733); - const ER_PARTITION_FIELDS_TOO_LONG: ErrorCode = ErrorCode(1660); - const ER_PARTITION_FUNCTION_FAILURE: ErrorCode = ErrorCode(1521); - const ER_PARTITION_FUNCTION_IS_NOT_ALLOWED: ErrorCode = ErrorCode(1564); - const ER_PARTITION_FUNC_NOT_ALLOWED_ERROR: ErrorCode = ErrorCode(1491); - const ER_PARTITION_INSTEAD_OF_SUBPARTITION: ErrorCode = ErrorCode(1734); - const ER_PARTITION_MAXVALUE_ERROR: ErrorCode = ErrorCode(1481); - const ER_PARTITION_MERGE_ERROR: ErrorCode = ErrorCode(1572); - const ER_PARTITION_MGMT_ON_NONPARTITIONED: ErrorCode = ErrorCode(1505); - const ER_PARTITION_NAME: ErrorCode = ErrorCode(1633); - const ER_PARTITION_NOT_DEFINED_ERROR: ErrorCode = ErrorCode(1498); - const ER_PARTITION_NO_TEMPORARY: ErrorCode = ErrorCode(1562); - const ER_PARTITION_REQUIRES_VALUES_ERROR: ErrorCode = ErrorCode(1479); - const ER_PARTITION_SUBPARTITION_ERROR: ErrorCode = ErrorCode(1482); - const ER_PARTITION_SUBPART_MIX_ERROR: ErrorCode = ErrorCode(1483); - const ER_PARTITION_WRONG_NO_PART_ERROR: ErrorCode = ErrorCode(1484); - const ER_PARTITION_WRONG_NO_SUBPART_ERROR: ErrorCode = ErrorCode(1485); - const ER_PARTITION_WRONG_VALUES_ERROR: ErrorCode = ErrorCode(1480); - const ER_PART_STATE_ERROR: ErrorCode = ErrorCode(1522); - const ER_PASSWD_LENGTH: ErrorCode = ErrorCode(1372); - const ER_PASSWORD_ANONYMOUS_USER: ErrorCode = ErrorCode(1131); - const ER_PASSWORD_FORMAT: ErrorCode = ErrorCode(1827); - const ER_PASSWORD_NOT_ALLOWED: ErrorCode = ErrorCode(1132); - const ER_PASSWORD_NO_MATCH: ErrorCode = ErrorCode(1133); - const ER_PATH_LENGTH: ErrorCode = ErrorCode(1680); - const ER_PLUGIN_CANNOT_BE_UNINSTALLED: ErrorCode = ErrorCode(1883); - const ER_PLUGIN_INSTALLED: ErrorCode = ErrorCode(1968); - const ER_PLUGIN_IS_NOT_LOADED: ErrorCode = ErrorCode(1524); - const ER_PLUGIN_IS_PERMANENT: ErrorCode = ErrorCode(1702); - const ER_PLUGIN_NO_INSTALL: ErrorCode = ErrorCode(1721); - const ER_PLUGIN_NO_UNINSTALL: ErrorCode = ErrorCode(1720); - const ER_PRIMARY_CANT_HAVE_NULL: ErrorCode = ErrorCode(1171); - const ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1903); - const ER_PRIOR_COMMIT_FAILED: ErrorCode = ErrorCode(1964); - const ER_PROCACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1370); - const ER_PROC_AUTO_GRANT_FAIL: ErrorCode = ErrorCode(1404); - const ER_PROC_AUTO_REVOKE_FAIL: ErrorCode = ErrorCode(1405); - const ER_PS_MANY_PARAM: ErrorCode = ErrorCode(1390); - const ER_PS_NO_RECURSION: ErrorCode = ErrorCode(1444); - const ER_QUERY_CACHE_DISABLED: ErrorCode = ErrorCode(1651); - const ER_QUERY_CACHE_IS_DISABLED: ErrorCode = ErrorCode(1924); - const ER_QUERY_CACHE_IS_GLOBALY_DISABLED: ErrorCode = ErrorCode(1925); - const ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT: ErrorCode = ErrorCode(1931); - const ER_QUERY_INTERRUPTED: ErrorCode = ErrorCode(1317); - const ER_QUERY_ON_FOREIGN_DATA_SOURCE: ErrorCode = ErrorCode(1430); - const ER_QUERY_ON_MASTER: ErrorCode = ErrorCode(1219); - const ER_RANGE_NOT_INCREASING_ERROR: ErrorCode = ErrorCode(1493); - const ER_RBR_NOT_AVAILABLE: ErrorCode = ErrorCode(1574); - const ER_READY: ErrorCode = ErrorCode(1076); - const ER_READ_ONLY_MODE: ErrorCode = ErrorCode(1836); - const ER_READ_ONLY_TRANSACTION: ErrorCode = ErrorCode(1207); - const ER_RECORD_FILE_FULL: ErrorCode = ErrorCode(1114); - const ER_REGEXP_ERROR: ErrorCode = ErrorCode(1139); - const ER_RELAY_LOG_FAIL: ErrorCode = ErrorCode(1371); - const ER_RELAY_LOG_INIT: ErrorCode = ErrorCode(1380); - const ER_REMOVED_SPACES: ErrorCode = ErrorCode(1466); - const ER_RENAMED_NAME: ErrorCode = ErrorCode(1636); - const ER_REORG_HASH_ONLY_ON_SAME_N: ErrorCode = ErrorCode(1510); - const ER_REORG_NO_PARAM_ERROR: ErrorCode = ErrorCode(1511); - const ER_REORG_OUTSIDE_RANGE: ErrorCode = ErrorCode(1520); - const ER_REORG_PARTITION_NOT_EXIST: ErrorCode = ErrorCode(1516); - const ER_REQUIRES_PRIMARY_KEY: ErrorCode = ErrorCode(1173); - const ER_RESERVED_SYNTAX: ErrorCode = ErrorCode(1382); - const ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: ErrorCode = ErrorCode(1645); - const ER_REVOKE_GRANTS: ErrorCode = ErrorCode(1269); - const ER_ROLE_CREATE_EXISTS: ErrorCode = ErrorCode(1975); - const ER_ROLE_DROP_EXISTS: ErrorCode = ErrorCode(1976); - const ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET: ErrorCode = ErrorCode(1748); - const ER_ROW_DOES_NOT_MATCH_PARTITION: ErrorCode = ErrorCode(1737); - const ER_ROW_EXPR_FOR_VCOL: ErrorCode = ErrorCode(1909); - const ER_ROW_IN_WRONG_PARTITION: ErrorCode = ErrorCode(1863); - const ER_ROW_IS_REFERENCED: ErrorCode = ErrorCode(1217); - const ER_ROW_IS_REFERENCED2: ErrorCode = ErrorCode(1451); - const ER_ROW_SINGLE_PARTITION_FIELD_ERROR: ErrorCode = ErrorCode(1658); - const ER_RPL_INFO_DATA_TOO_LONG: ErrorCode = ErrorCode(1742); - const ER_SAME_NAME_PARTITION: ErrorCode = ErrorCode(1517); - const ER_SAME_NAME_PARTITION_FIELD: ErrorCode = ErrorCode(1652); - const ER_SELECT_REDUCED: ErrorCode = ErrorCode(1249); - const ER_SERVER_IS_IN_SECURE_AUTH_MODE: ErrorCode = ErrorCode(1275); - const ER_SERVER_SHUTDOWN: ErrorCode = ErrorCode(1053); - const ER_SET_CONSTANTS_ONLY: ErrorCode = ErrorCode(1204); - const ER_SET_PASSWORD_AUTH_PLUGIN: ErrorCode = ErrorCode(1699); - const ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION: ErrorCode = ErrorCode(1769); - const ER_SET_STATEMENT_NOT_SUPPORTED: ErrorCode = ErrorCode(1971); - const ER_SHUTDOWN_COMPLETE: ErrorCode = ErrorCode(1079); - const ER_SIGNAL_BAD_CONDITION_TYPE: ErrorCode = ErrorCode(1646); - const ER_SIGNAL_EXCEPTION: ErrorCode = ErrorCode(1644); - const ER_SIGNAL_NOT_FOUND: ErrorCode = ErrorCode(1643); - const ER_SIGNAL_WARN: ErrorCode = ErrorCode(1642); - const ER_SIZE_OVERFLOW_ERROR: ErrorCode = ErrorCode(1532); - const ER_SKIPPING_LOGGED_TRANSACTION: ErrorCode = ErrorCode(1771); - const ER_SLAVE_CANT_CREATE_CONVERSION: ErrorCode = ErrorCode(1678); - const ER_SLAVE_CONFIGURATION: ErrorCode = ErrorCode(1794); - const ER_SLAVE_CONVERSION_FAILED: ErrorCode = ErrorCode(1677); - const ER_SLAVE_CORRUPT_EVENT: ErrorCode = ErrorCode(1610); - const ER_SLAVE_CREATE_EVENT_FAILURE: ErrorCode = ErrorCode(1596); - const ER_SLAVE_FATAL_ERROR: ErrorCode = ErrorCode(1593); - const ER_SLAVE_HAS_MORE_GTIDS_THAN_MASTER: ErrorCode = ErrorCode(1885); - const ER_SLAVE_HEARTBEAT_FAILURE: ErrorCode = ErrorCode(1623); - const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE: ErrorCode = ErrorCode(1624); - const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX: ErrorCode = ErrorCode(1704); - const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN: ErrorCode = ErrorCode(1703); - const ER_SLAVE_IGNORED_SSL_PARAMS: ErrorCode = ErrorCode(1274); - const ER_SLAVE_IGNORED_TABLE: ErrorCode = ErrorCode(1237); - const ER_SLAVE_IGNORE_SERVER_IDS: ErrorCode = ErrorCode(1650); - const ER_SLAVE_INCIDENT: ErrorCode = ErrorCode(1590); - const ER_SLAVE_MASTER_COM_FAILURE: ErrorCode = ErrorCode(1597); - const ER_SLAVE_MI_INIT_REPOSITORY: ErrorCode = ErrorCode(1871); - const ER_SLAVE_MUST_STOP: ErrorCode = ErrorCode(1198); - const ER_SLAVE_NOT_RUNNING: ErrorCode = ErrorCode(1199); - const ER_SLAVE_RELAY_LOG_READ_FAILURE: ErrorCode = ErrorCode(1594); - const ER_SLAVE_RELAY_LOG_WRITE_FAILURE: ErrorCode = ErrorCode(1595); - const ER_SLAVE_RLI_INIT_REPOSITORY: ErrorCode = ErrorCode(1872); - const ER_SLAVE_SILENT_RETRY_TRANSACTION: ErrorCode = ErrorCode(1806); - const ER_SLAVE_SKIP_NOT_IN_GTID: ErrorCode = ErrorCode(1966); - const ER_SLAVE_STARTED: ErrorCode = ErrorCode(1937); - const ER_SLAVE_STOPPED: ErrorCode = ErrorCode(1938); - const ER_SLAVE_THREAD: ErrorCode = ErrorCode(1202); - const ER_SLAVE_UNEXPECTED_MASTER_SWITCH: ErrorCode = ErrorCode(1952); - const ER_SLAVE_WAS_NOT_RUNNING: ErrorCode = ErrorCode(1255); - const ER_SLAVE_WAS_RUNNING: ErrorCode = ErrorCode(1254); - const ER_SPATIAL_CANT_HAVE_NULL: ErrorCode = ErrorCode(1252); - const ER_SPATIAL_MUST_HAVE_GEOM_COL: ErrorCode = ErrorCode(1687); - const ER_SPECIFIC_ACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1227); - const ER_SP_ALREADY_EXISTS: ErrorCode = ErrorCode(1304); - const ER_SP_BADRETURN: ErrorCode = ErrorCode(1313); - const ER_SP_BADSELECT: ErrorCode = ErrorCode(1312); - const ER_SP_BADSTATEMENT: ErrorCode = ErrorCode(1314); - const ER_SP_BAD_CURSOR_QUERY: ErrorCode = ErrorCode(1322); - const ER_SP_BAD_CURSOR_SELECT: ErrorCode = ErrorCode(1323); - const ER_SP_BAD_SQLSTATE: ErrorCode = ErrorCode(1407); - const ER_SP_BAD_VAR_SHADOW: ErrorCode = ErrorCode(1453); - const ER_SP_CANT_ALTER: ErrorCode = ErrorCode(1334); - const ER_SP_CANT_SET_AUTOCOMMIT: ErrorCode = ErrorCode(1445); - const ER_SP_CASE_NOT_FOUND: ErrorCode = ErrorCode(1339); - const ER_SP_COND_MISMATCH: ErrorCode = ErrorCode(1319); - const ER_SP_CURSOR_AFTER_HANDLER: ErrorCode = ErrorCode(1338); - const ER_SP_CURSOR_ALREADY_OPEN: ErrorCode = ErrorCode(1325); - const ER_SP_CURSOR_MISMATCH: ErrorCode = ErrorCode(1324); - const ER_SP_CURSOR_NOT_OPEN: ErrorCode = ErrorCode(1326); - const ER_SP_DOES_NOT_EXIST: ErrorCode = ErrorCode(1305); - const ER_SP_DROP_FAILED: ErrorCode = ErrorCode(1306); - const ER_SP_DUP_COND: ErrorCode = ErrorCode(1332); - const ER_SP_DUP_CURS: ErrorCode = ErrorCode(1333); - const ER_SP_DUP_HANDLER: ErrorCode = ErrorCode(1413); - const ER_SP_DUP_PARAM: ErrorCode = ErrorCode(1330); - const ER_SP_DUP_VAR: ErrorCode = ErrorCode(1331); - const ER_SP_FETCH_NO_DATA: ErrorCode = ErrorCode(1329); - const ER_SP_GOTO_IN_HNDLR: ErrorCode = ErrorCode(1358); - const ER_SP_LABEL_MISMATCH: ErrorCode = ErrorCode(1310); - const ER_SP_LABEL_REDEFINE: ErrorCode = ErrorCode(1309); - const ER_SP_LILABEL_MISMATCH: ErrorCode = ErrorCode(1308); - const ER_SP_NORETURN: ErrorCode = ErrorCode(1320); - const ER_SP_NORETURNEND: ErrorCode = ErrorCode(1321); - const ER_SP_NOT_VAR_ARG: ErrorCode = ErrorCode(1414); - const ER_SP_NO_AGGREGATE: ErrorCode = ErrorCode(1460); - const ER_SP_NO_DROP_SP: ErrorCode = ErrorCode(1357); - const ER_SP_NO_RECURSION: ErrorCode = ErrorCode(1424); - const ER_SP_NO_RECURSIVE_CREATE: ErrorCode = ErrorCode(1303); - const ER_SP_NO_RETSET: ErrorCode = ErrorCode(1415); - const ER_SP_PROC_TABLE_CORRUPT: ErrorCode = ErrorCode(1457); - const ER_SP_RECURSION_LIMIT: ErrorCode = ErrorCode(1456); - const ER_SP_STORE_FAILED: ErrorCode = ErrorCode(1307); - const ER_SP_SUBSELECT_NYI: ErrorCode = ErrorCode(1335); - const ER_SP_UNDECLARED_VAR: ErrorCode = ErrorCode(1327); - const ER_SP_UNINIT_VAR: ErrorCode = ErrorCode(1311); - const ER_SP_VARCOND_AFTER_CURSHNDLR: ErrorCode = ErrorCode(1337); - const ER_SP_WRONG_NAME: ErrorCode = ErrorCode(1458); - const ER_SP_WRONG_NO_OF_ARGS: ErrorCode = ErrorCode(1318); - const ER_SP_WRONG_NO_OF_FETCH_ARGS: ErrorCode = ErrorCode(1328); - const ER_SQLTHREAD_WITH_SECURE_SLAVE: ErrorCode = ErrorCode(1763); - const ER_SQL_DISCOVER_ERROR: ErrorCode = ErrorCode(1939); - const ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE: ErrorCode = ErrorCode(1858); - const ER_SR_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1601); - const ER_STACK_OVERRUN: ErrorCode = ErrorCode(1119); - const ER_STACK_OVERRUN_NEED_MORE: ErrorCode = ErrorCode(1436); - const ER_STARTUP: ErrorCode = ErrorCode(1408); - const ER_STATEMENT_TIMEOUT: ErrorCode = ErrorCode(1969); - const ER_STMT_CACHE_FULL: ErrorCode = ErrorCode(1705); - const ER_STMT_HAS_NO_OPEN_CURSOR: ErrorCode = ErrorCode(1421); - const ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: ErrorCode = ErrorCode(1336); - const ER_STOP_SLAVE_IO_THREAD_TIMEOUT: ErrorCode = ErrorCode(1876); - const ER_STOP_SLAVE_SQL_THREAD_TIMEOUT: ErrorCode = ErrorCode(1875); - const ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT: ErrorCode = ErrorCode(1686); - const ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1560); - const ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO: ErrorCode = ErrorCode(1954); - const ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION: ErrorCode = ErrorCode(1930); - const ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN: ErrorCode = ErrorCode(1695); - const ER_SUBPARTITION_ERROR: ErrorCode = ErrorCode(1500); - const ER_SUBPARTITION_NAME: ErrorCode = ErrorCode(1634); - const ER_SUBQUERIES_NOT_SUPPORTED: ErrorCode = ErrorCode(1970); - const ER_SUBQUERY_NO1_ROW: ErrorCode = ErrorCode(1242); - const ER_SYNTAX_ERROR: ErrorCode = ErrorCode(1149); - const ER_TABLEACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1142); - const ER_TABLENAME_NOT_ALLOWED_HERE: ErrorCode = ErrorCode(1250); - const ER_TABLESPACE_AUTO_EXTEND_ERROR: ErrorCode = ErrorCode(1530); - const ER_TABLESPACE_DISCARDED: ErrorCode = ErrorCode(1814); - const ER_TABLESPACE_EXISTS: ErrorCode = ErrorCode(1813); - const ER_TABLESPACE_MISSING: ErrorCode = ErrorCode(1812); - const ER_TABLES_DIFFERENT_METADATA: ErrorCode = ErrorCode(1736); - const ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: ErrorCode = ErrorCode(1164); - const ER_TABLE_CANT_HANDLE_BLOB: ErrorCode = ErrorCode(1163); - const ER_TABLE_CANT_HANDLE_FT: ErrorCode = ErrorCode(1214); - const ER_TABLE_CANT_HANDLE_SPKEYS: ErrorCode = ErrorCode(1464); - const ER_TABLE_CORRUPT: ErrorCode = ErrorCode(1877); - const ER_TABLE_DEFINITION_TOO_BIG: ErrorCode = ErrorCode(1967); - const ER_TABLE_DEF_CHANGED: ErrorCode = ErrorCode(1412); - const ER_TABLE_EXISTS_ERROR: ErrorCode = ErrorCode(1050); - const ER_TABLE_HAS_NO_FT: ErrorCode = ErrorCode(1764); - const ER_TABLE_IN_FK_CHECK: ErrorCode = ErrorCode(1725); - const ER_TABLE_IN_SYSTEM_TABLESPACE: ErrorCode = ErrorCode(1809); - const ER_TABLE_MUST_HAVE_COLUMNS: ErrorCode = ErrorCode(1113); - const ER_TABLE_NAME: ErrorCode = ErrorCode(1632); - const ER_TABLE_NEEDS_REBUILD: ErrorCode = ErrorCode(1707); - const ER_TABLE_NEEDS_UPGRADE: ErrorCode = ErrorCode(1459); - const ER_TABLE_NOT_LOCKED: ErrorCode = ErrorCode(1100); - const ER_TABLE_NOT_LOCKED_FOR_WRITE: ErrorCode = ErrorCode(1099); - const ER_TABLE_SCHEMA_MISMATCH: ErrorCode = ErrorCode(1808); - const ER_TARGET_NOT_EXPLAINABLE: ErrorCode = ErrorCode(1933); - const ER_TEMPORARY_NAME: ErrorCode = ErrorCode(1635); - const ER_TEMP_FILE_WRITE_FAILURE: ErrorCode = ErrorCode(1878); - const ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR: ErrorCode = ErrorCode(1559); - const ER_TEXTFILE_NOT_READABLE: ErrorCode = ErrorCode(1085); - const ER_TOO_BIG_DISPLAYWIDTH: ErrorCode = ErrorCode(1439); - const ER_TOO_BIG_FIELDLENGTH: ErrorCode = ErrorCode(1074); - const ER_TOO_BIG_FOR_UNCOMPRESS: ErrorCode = ErrorCode(1256); - const ER_TOO_BIG_PRECISION: ErrorCode = ErrorCode(1426); - const ER_TOO_BIG_ROWSIZE: ErrorCode = ErrorCode(1118); - const ER_TOO_BIG_SCALE: ErrorCode = ErrorCode(1425); - const ER_TOO_BIG_SELECT: ErrorCode = ErrorCode(1104); - const ER_TOO_BIG_SET: ErrorCode = ErrorCode(1097); - const ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT: ErrorCode = ErrorCode(1473); - const ER_TOO_LONG_BODY: ErrorCode = ErrorCode(1437); - const ER_TOO_LONG_FIELD_COMMENT: ErrorCode = ErrorCode(1629); - const ER_TOO_LONG_IDENT: ErrorCode = ErrorCode(1059); - const ER_TOO_LONG_INDEX_COMMENT: ErrorCode = ErrorCode(1688); - const ER_TOO_LONG_KEY: ErrorCode = ErrorCode(1071); - const ER_TOO_LONG_STRING: ErrorCode = ErrorCode(1162); - const ER_TOO_LONG_TABLE_COMMENT: ErrorCode = ErrorCode(1628); - const ER_TOO_LONG_TABLE_PARTITION_COMMENT: ErrorCode = ErrorCode(1793); - const ER_TOO_MANY_CONCURRENT_TRXS: ErrorCode = ErrorCode(1637); - const ER_TOO_MANY_DELAYED_THREADS: ErrorCode = ErrorCode(1151); - const ER_TOO_MANY_FIELDS: ErrorCode = ErrorCode(1117); - const ER_TOO_MANY_KEYS: ErrorCode = ErrorCode(1069); - const ER_TOO_MANY_KEY_PARTS: ErrorCode = ErrorCode(1070); - const ER_TOO_MANY_PARTITIONS_ERROR: ErrorCode = ErrorCode(1499); - const ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR: ErrorCode = ErrorCode(1655); - const ER_TOO_MANY_ROWS: ErrorCode = ErrorCode(1172); - const ER_TOO_MANY_TABLES: ErrorCode = ErrorCode(1116); - const ER_TOO_MANY_USER_CONNECTIONS: ErrorCode = ErrorCode(1203); - const ER_TOO_MANY_VALUES_ERROR: ErrorCode = ErrorCode(1657); - const ER_TOO_MUCH_AUTO_TIMESTAMP_COLS: ErrorCode = ErrorCode(1293); - const ER_TRANS_CACHE_FULL: ErrorCode = ErrorCode(1197); - const ER_TRG_ALREADY_EXISTS: ErrorCode = ErrorCode(1359); - const ER_TRG_CANT_CHANGE_ROW: ErrorCode = ErrorCode(1362); - const ER_TRG_CANT_OPEN_TABLE: ErrorCode = ErrorCode(1606); - const ER_TRG_CORRUPTED_FILE: ErrorCode = ErrorCode(1602); - const ER_TRG_DOES_NOT_EXIST: ErrorCode = ErrorCode(1360); - const ER_TRG_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1604); - const ER_TRG_IN_WRONG_SCHEMA: ErrorCode = ErrorCode(1435); - const ER_TRG_NO_CREATION_CTX: ErrorCode = ErrorCode(1603); - const ER_TRG_NO_DEFINER: ErrorCode = ErrorCode(1454); - const ER_TRG_NO_SUCH_ROW_IN_TRG: ErrorCode = ErrorCode(1363); - const ER_TRG_ON_VIEW_OR_TEMP_TABLE: ErrorCode = ErrorCode(1361); - const ER_TRUNCATED_WRONG_VALUE: ErrorCode = ErrorCode(1292); - const ER_TRUNCATED_WRONG_VALUE_FOR_FIELD: ErrorCode = ErrorCode(1366); - const ER_TRUNCATE_ILLEGAL_FK: ErrorCode = ErrorCode(1701); - const ER_UDF_EXISTS: ErrorCode = ErrorCode(1125); - const ER_UDF_NO_PATHS: ErrorCode = ErrorCode(1124); - const ER_UNDO_RECORD_TOO_BIG: ErrorCode = ErrorCode(1713); - const ER_UNEXPECTED_EOF: ErrorCode = ErrorCode(1039); - const ER_UNION_TABLES_IN_DIFFERENT_DIR: ErrorCode = ErrorCode(1212); - const ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF: ErrorCode = ErrorCode(1503); - const ER_UNKNOWN_ALTER_ALGORITHM: ErrorCode = ErrorCode(1800); - const ER_UNKNOWN_ALTER_LOCK: ErrorCode = ErrorCode(1801); - const ER_UNKNOWN_CHARACTER_SET: ErrorCode = ErrorCode(1115); - const ER_UNKNOWN_COLLATION: ErrorCode = ErrorCode(1273); - const ER_UNKNOWN_COM_ERROR: ErrorCode = ErrorCode(1047); - const ER_UNKNOWN_ERROR: ErrorCode = ErrorCode(1105); - const ER_UNKNOWN_EXPLAIN_FORMAT: ErrorCode = ErrorCode(1791); - const ER_UNKNOWN_KEY_CACHE: ErrorCode = ErrorCode(1284); - const ER_UNKNOWN_LOCALE: ErrorCode = ErrorCode(1649); - const ER_UNKNOWN_OPTION: ErrorCode = ErrorCode(1911); - const ER_UNKNOWN_PARTITION: ErrorCode = ErrorCode(1735); - const ER_UNKNOWN_PROCEDURE: ErrorCode = ErrorCode(1106); - const ER_UNKNOWN_STMT_HANDLER: ErrorCode = ErrorCode(1243); - const ER_UNKNOWN_STORAGE_ENGINE: ErrorCode = ErrorCode(1286); - const ER_UNKNOWN_SYSTEM_VARIABLE: ErrorCode = ErrorCode(1193); - const ER_UNKNOWN_TABLE: ErrorCode = ErrorCode(1109); - const ER_UNKNOWN_TARGET_BINLOG: ErrorCode = ErrorCode(1373); - const ER_UNKNOWN_TIME_ZONE: ErrorCode = ErrorCode(1298); - const ER_UNSUPORTED_LOG_ENGINE: ErrorCode = ErrorCode(1579); - const ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1907); - const ER_UNSUPPORTED_ENGINE: ErrorCode = ErrorCode(1726); - const ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS: ErrorCode = ErrorCode(1910); - const ER_UNSUPPORTED_EXTENSION: ErrorCode = ErrorCode(1112); - const ER_UNSUPPORTED_PS: ErrorCode = ErrorCode(1295); - const ER_UNTIL_COND_IGNORED: ErrorCode = ErrorCode(1279); - const ER_UNTIL_REQUIRES_USING_GTID: ErrorCode = ErrorCode(1949); - const ER_UNUSED11: ErrorCode = ErrorCode(1608); - const ER_UNUSED17: ErrorCode = ErrorCode(1972); - const ER_UPDATE_INF: ErrorCode = ErrorCode(1134); - const ER_UPDATE_LOG_DEPRECATED_IGNORED: ErrorCode = ErrorCode(1315); - const ER_UPDATE_LOG_DEPRECATED_TRANSLATED: ErrorCode = ErrorCode(1316); - const ER_UPDATE_TABLE_USED: ErrorCode = ErrorCode(1093); - const ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE: ErrorCode = ErrorCode(1175); - const ER_USERNAME: ErrorCode = ErrorCode(1468); - const ER_USER_CREATE_EXISTS: ErrorCode = ErrorCode(1973); - const ER_USER_DROP_EXISTS: ErrorCode = ErrorCode(1974); - const ER_USER_LIMIT_REACHED: ErrorCode = ErrorCode(1226); - const ER_VALUES_IS_NOT_INT_TYPE_ERROR: ErrorCode = ErrorCode(1697); - const ER_VARIABLE_IS_NOT_STRUCT: ErrorCode = ErrorCode(1272); - const ER_VARIABLE_IS_READONLY: ErrorCode = ErrorCode(1621); - const ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER: ErrorCode = ErrorCode(1765); - const ER_VARIABLE_NOT_SETTABLE_IN_SP: ErrorCode = ErrorCode(1838); - const ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION: ErrorCode = ErrorCode(1766); - const ER_VAR_CANT_BE_READ: ErrorCode = ErrorCode(1233); - const ER_VCOL_BASED_ON_VCOL: ErrorCode = ErrorCode(1900); - const ER_VIEW_CHECKSUM: ErrorCode = ErrorCode(1392); - const ER_VIEW_CHECK_FAILED: ErrorCode = ErrorCode(1369); - const ER_VIEW_DELETE_MERGE_VIEW: ErrorCode = ErrorCode(1395); - const ER_VIEW_FRM_NO_USER: ErrorCode = ErrorCode(1447); - const ER_VIEW_INVALID: ErrorCode = ErrorCode(1356); - const ER_VIEW_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1600); - const ER_VIEW_MULTIUPDATE: ErrorCode = ErrorCode(1393); - const ER_VIEW_NONUPD_CHECK: ErrorCode = ErrorCode(1368); - const ER_VIEW_NO_CREATION_CTX: ErrorCode = ErrorCode(1599); - const ER_VIEW_NO_EXPLAIN: ErrorCode = ErrorCode(1345); - const ER_VIEW_NO_INSERT_FIELD_LIST: ErrorCode = ErrorCode(1394); - const ER_VIEW_ORDERBY_IGNORED: ErrorCode = ErrorCode(1926); - const ER_VIEW_OTHER_USER: ErrorCode = ErrorCode(1448); - const ER_VIEW_PREVENT_UPDATE: ErrorCode = ErrorCode(1443); - const ER_VIEW_RECURSIVE: ErrorCode = ErrorCode(1462); - const ER_VIEW_SELECT_CLAUSE: ErrorCode = ErrorCode(1350); - const ER_VIEW_SELECT_DERIVED: ErrorCode = ErrorCode(1349); - const ER_VIEW_SELECT_TMPTABLE: ErrorCode = ErrorCode(1352); - const ER_VIEW_SELECT_VARIABLE: ErrorCode = ErrorCode(1351); - const ER_VIEW_WRONG_LIST: ErrorCode = ErrorCode(1353); - const ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED: ErrorCode = ErrorCode(1901); - const ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1906); - const ER_WARNING_NOT_COMPLETE_ROLLBACK: ErrorCode = ErrorCode(1196); - const ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE: ErrorCode = ErrorCode(1751); - const ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE: ErrorCode = ErrorCode(1752); - const ER_WARN_AGGFUNC_DEPENDENCE: ErrorCode = ErrorCode(1981); - const ER_WARN_ALLOWED_PACKET_OVERFLOWED: ErrorCode = ErrorCode(1301); - const ER_WARN_CANT_DROP_DEFAULT_KEYCACHE: ErrorCode = ErrorCode(1438); - const ER_WARN_DATA_OUT_OF_RANGE: ErrorCode = ErrorCode(1264); - const ER_WARN_DEPRECATED_SYNTAX: ErrorCode = ErrorCode(1287); - const ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT: ErrorCode = ErrorCode(1681); - const ER_WARN_DEPRECATED_SYNTAX_WITH_VER: ErrorCode = ErrorCode(1554); - const ER_WARN_ENGINE_TRANSACTION_ROLLBACK: ErrorCode = ErrorCode(1622); - const ER_WARN_FIELD_RESOLVED: ErrorCode = ErrorCode(1276); - const ER_WARN_HOSTNAME_WONT_WORK: ErrorCode = ErrorCode(1285); - const ER_WARN_INDEX_NOT_APPLICABLE: ErrorCode = ErrorCode(1739); - const ER_WARN_INVALID_TIMESTAMP: ErrorCode = ErrorCode(1299); - const ER_WARN_IS_SKIPPED_TABLE: ErrorCode = ErrorCode(1684); - const ER_WARN_NULL_TO_NOTNULL: ErrorCode = ErrorCode(1263); - const ER_WARN_PURGE_LOG_IN_USE: ErrorCode = ErrorCode(1867); - const ER_WARN_PURGE_LOG_IS_ACTIVE: ErrorCode = ErrorCode(1868); - const ER_WARN_QC_RESIZE: ErrorCode = ErrorCode(1282); - const ER_WARN_TOO_FEW_RECORDS: ErrorCode = ErrorCode(1261); - const ER_WARN_TOO_MANY_RECORDS: ErrorCode = ErrorCode(1262); - const ER_WARN_USING_OTHER_HANDLER: ErrorCode = ErrorCode(1266); - const ER_WARN_VIEW_MERGE: ErrorCode = ErrorCode(1354); - const ER_WARN_VIEW_WITHOUT_KEY: ErrorCode = ErrorCode(1355); - const ER_WRONG_ARGUMENTS: ErrorCode = ErrorCode(1210); - const ER_WRONG_AUTO_KEY: ErrorCode = ErrorCode(1075); - const ER_WRONG_COLUMN_NAME: ErrorCode = ErrorCode(1166); - const ER_WRONG_DB_NAME: ErrorCode = ErrorCode(1102); - const ER_WRONG_FIELD_SPEC: ErrorCode = ErrorCode(1063); - const ER_WRONG_FIELD_TERMINATORS: ErrorCode = ErrorCode(1083); - const ER_WRONG_FIELD_WITH_GROUP: ErrorCode = ErrorCode(1055); - const ER_WRONG_FK_DEF: ErrorCode = ErrorCode(1239); - const ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1905); - const ER_WRONG_GROUP_FIELD: ErrorCode = ErrorCode(1056); - const ER_WRONG_KEY_COLUMN: ErrorCode = ErrorCode(1167); - const ER_WRONG_LOCK_OF_SYSTEM_TABLE: ErrorCode = ErrorCode(1428); - const ER_WRONG_MAGIC: ErrorCode = ErrorCode(1389); - const ER_WRONG_MRG_TABLE: ErrorCode = ErrorCode(1168); - const ER_WRONG_NAME_FOR_CATALOG: ErrorCode = ErrorCode(1281); - const ER_WRONG_NAME_FOR_INDEX: ErrorCode = ErrorCode(1280); - const ER_WRONG_NATIVE_TABLE_STRUCTURE: ErrorCode = ErrorCode(1682); - const ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: ErrorCode = ErrorCode(1222); - const ER_WRONG_OBJECT: ErrorCode = ErrorCode(1347); - const ER_WRONG_OUTER_JOIN: ErrorCode = ErrorCode(1120); - const ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: ErrorCode = ErrorCode(1582); - const ER_WRONG_PARAMCOUNT_TO_PROCEDURE: ErrorCode = ErrorCode(1107); - const ER_WRONG_PARAMETERS_TO_NATIVE_FCT: ErrorCode = ErrorCode(1583); - const ER_WRONG_PARAMETERS_TO_PROCEDURE: ErrorCode = ErrorCode(1108); - const ER_WRONG_PARAMETERS_TO_STORED_FCT: ErrorCode = ErrorCode(1584); - const ER_WRONG_PARTITION_NAME: ErrorCode = ErrorCode(1567); - const ER_WRONG_PERFSCHEMA_USAGE: ErrorCode = ErrorCode(1683); - const ER_WRONG_SIZE_NUMBER: ErrorCode = ErrorCode(1531); - const ER_WRONG_SPVAR_TYPE_IN_LIMIT: ErrorCode = ErrorCode(1691); - const ER_WRONG_STRING_LENGTH: ErrorCode = ErrorCode(1470); - const ER_WRONG_SUB_KEY: ErrorCode = ErrorCode(1089); - const ER_WRONG_SUM_SELECT: ErrorCode = ErrorCode(1057); - const ER_WRONG_TABLE_NAME: ErrorCode = ErrorCode(1103); - const ER_WRONG_TYPE_COLUMN_VALUE_ERROR: ErrorCode = ErrorCode(1654); - const ER_WRONG_TYPE_FOR_VAR: ErrorCode = ErrorCode(1232); - const ER_WRONG_USAGE: ErrorCode = ErrorCode(1221); - const ER_WRONG_VALUE: ErrorCode = ErrorCode(1525); - const ER_WRONG_VALUE_COUNT: ErrorCode = ErrorCode(1058); - const ER_WRONG_VALUE_COUNT_ON_ROW: ErrorCode = ErrorCode(1136); - const ER_WRONG_VALUE_FOR_TYPE: ErrorCode = ErrorCode(1411); - const ER_WRONG_VALUE_FOR_VAR: ErrorCode = ErrorCode(1231); - const ER_WSAS_FAILED: ErrorCode = ErrorCode(1383); - const ER_XAER_DUPID: ErrorCode = ErrorCode(1440); - const ER_XAER_INVAL: ErrorCode = ErrorCode(1398); - const ER_XAER_NOTA: ErrorCode = ErrorCode(1397); - const ER_XAER_OUTSIDE: ErrorCode = ErrorCode(1400); - const ER_XAER_RMERR: ErrorCode = ErrorCode(1401); - const ER_XAER_RMFAIL: ErrorCode = ErrorCode(1399); - const ER_XA_RBDEADLOCK: ErrorCode = ErrorCode(1614); - const ER_XA_RBROLLBACK: ErrorCode = ErrorCode(1402); - const ER_XA_RBTIMEOUT: ErrorCode = ErrorCode(1613); - const ER_YES: ErrorCode = ErrorCode(1003); - const ER_ZLIB_Z_BUF_ERROR: ErrorCode = ErrorCode(1258); - const ER_ZLIB_Z_DATA_ERROR: ErrorCode = ErrorCode(1259); - const ER_ZLIB_Z_MEM_ERROR: ErrorCode = ErrorCode(1257); - const WARN_COND_ITEM_TRUNCATED: ErrorCode = ErrorCode(1647); - const WARN_DATA_TRUNCATED: ErrorCode = ErrorCode(1265); - const WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1638); - const WARN_NO_MASTER_INF: ErrorCode = ErrorCode(1617); - const WARN_ON_BLOCKHOLE_IN_RBR: ErrorCode = ErrorCode(1870); - const WARN_OPTION_BELOW_LIMIT: ErrorCode = ErrorCode(1708); - const WARN_OPTION_IGNORED: ErrorCode = ErrorCode(1618); - const WARN_PLUGIN_BUSY: ErrorCode = ErrorCode(1620); - const WARN_PLUGIN_DELETE_BUILTIN: ErrorCode = ErrorCode(1619); -} diff --git a/sqlx-core/src/mysql/protocol/field.rs b/sqlx-core/src/mysql/protocol/field.rs index f5311e54..2c4b0aee 100644 --- a/sqlx-core/src/mysql/protocol/field.rs +++ b/sqlx-core/src/mysql/protocol/field.rs @@ -1,65 +1,50 @@ -// https://mariadb.com/kb/en/library/resultset/#field-types -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct FieldType(pub u8); - -impl FieldType { - pub const MYSQL_TYPE_BIT: FieldType = FieldType(16); - pub const MYSQL_TYPE_BLOB: FieldType = FieldType(252); - pub const MYSQL_TYPE_DATE: FieldType = FieldType(10); - pub const MYSQL_TYPE_DATETIME: FieldType = FieldType(12); - pub const MYSQL_TYPE_DATETIME2: FieldType = FieldType(18); - pub const MYSQL_TYPE_DECIMAL: FieldType = FieldType(0); - pub const MYSQL_TYPE_DOUBLE: FieldType = FieldType(5); - pub const MYSQL_TYPE_ENUM: FieldType = FieldType(247); - pub const MYSQL_TYPE_FLOAT: FieldType = FieldType(4); - pub const MYSQL_TYPE_GEOMETRY: FieldType = FieldType(255); - pub const MYSQL_TYPE_INT24: FieldType = FieldType(9); - pub const MYSQL_TYPE_JSON: FieldType = FieldType(245); - pub const MYSQL_TYPE_LONG: FieldType = FieldType(3); - pub const MYSQL_TYPE_LONGLONG: FieldType = FieldType(8); - pub const MYSQL_TYPE_LONG_BLOB: FieldType = FieldType(251); - pub const MYSQL_TYPE_MEDIUM_BLOB: FieldType = FieldType(250); - pub const MYSQL_TYPE_NEWDATE: FieldType = FieldType(14); - pub const MYSQL_TYPE_NEWDECIMAL: FieldType = FieldType(246); - pub const MYSQL_TYPE_NULL: FieldType = FieldType(6); - pub const MYSQL_TYPE_SET: FieldType = FieldType(248); - pub const MYSQL_TYPE_SHORT: FieldType = FieldType(2); - pub const MYSQL_TYPE_STRING: FieldType = FieldType(254); - pub const MYSQL_TYPE_TIME: FieldType = FieldType(11); - pub const MYSQL_TYPE_TIME2: FieldType = FieldType(19); - pub const MYSQL_TYPE_TIMESTAMP: FieldType = FieldType(7); - pub const MYSQL_TYPE_TIMESTAMP2: FieldType = FieldType(17); - pub const MYSQL_TYPE_TINY: FieldType = FieldType(1); - pub const MYSQL_TYPE_TINY_BLOB: FieldType = FieldType(249); - pub const MYSQL_TYPE_VARCHAR: FieldType = FieldType(15); - pub const MYSQL_TYPE_VAR_STRING: FieldType = FieldType(253); - pub const MYSQL_TYPE_YEAR: FieldType = FieldType(13); -} - -// https://mariadb.com/kb/en/library/com_stmt_execute/#parameter-flag -bitflags::bitflags! { - pub struct ParameterFlag: u8 { - const UNSIGNED = 128; - } -} - // https://mariadb.com/kb/en/library/resultset/#field-detail-flag +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/group__group__cs__column__definition__flags.html bitflags::bitflags! { - pub struct FieldDetailFlag: u16 { + pub struct FieldFlags: u16 { + /// Field cannot be NULL const NOT_NULL = 1; + + /// Field is **part of** a primary key const PRIMARY_KEY = 2; + + /// Field is **part of** a unique key/constraint const UNIQUE_KEY = 4; + + /// Field is **part of** a unique or primary key const MULTIPLE_KEY = 8; + + /// Field is a blob. const BLOB = 16; - const UNSIGNED = 32; - const ZEROFILL_FLAG = 64; - const BINARY_COLLATION = 128; + + /// Field is unsigned + const UNISIGNED = 32; + + /// Field is zero filled. + const ZEROFILL = 64; + + /// Field is binary (set for strings) + const BINARY = 128; + + /// Field is an enumeration const ENUM = 256; + + /// Field is an auto-increment field const AUTO_INCREMENT = 512; + + /// Field is a timestamp const TIMESTAMP = 1024; + + /// Field is a set const SET = 2048; - const NO_DEFAULT_VALUE_FLAG = 4096; - const ON_UPDATE_NOW_FLAG = 8192; - const NUM_FLAG = 32768; + + /// Field does not have a default value + const NO_DEFAULT_VALUE = 4096; + + /// Field is set to NOW on UPDATE + const ON_UPDATE_NOW = 8192; + + /// Field is a number + const NUM = 32768; } } diff --git a/sqlx-core/src/mysql/protocol/handshake.rs b/sqlx-core/src/mysql/protocol/handshake.rs new file mode 100644 index 00000000..910b737e --- /dev/null +++ b/sqlx-core/src/mysql/protocol/handshake.rs @@ -0,0 +1,159 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::protocol::{Capabilities, Decode, Status}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_connection_phase_packets_protocol_handshake_v10.html +// https://mariadb.com/kb/en/connection/#initial-handshake-packet +#[derive(Debug)] +pub struct Handshake { + pub protocol_version: u8, + pub server_version: Box, + pub connection_id: u32, + pub server_capabilities: Capabilities, + pub server_default_collation: u8, + pub status: Status, + pub auth_plugin_name: Option>, + pub auth_plugin_data: Box<[u8]>, +} + +impl Decode for Handshake { + fn decode(mut buf: &[u8]) -> crate::Result + where + Self: Sized, + { + let protocol_version = buf.get_u8()?; + let server_version = buf.get_str_nul()?.into(); + let connection_id = buf.get_u32::()?; + + let mut scramble = Vec::with_capacity(8); + + // scramble first part : string<8> + scramble.extend_from_slice(&buf[..8]); + buf.advance(8); + + // reserved : string<1> + buf.advance(1); + + // capability_flags_1 : int<2> + let capabilities_1 = buf.get_u16::()?; + let mut capabilities = Capabilities::from_bits_truncate(capabilities_1.into()); + + // character_set : int<1> + let char_set = buf.get_u8()?; + + // status_flags : int<2> + let status = buf.get_u16::()?; + let status = Status::from_bits_truncate(status); + + // capability_flags_2 : int<2> + let capabilities_2 = buf.get_u16::()?; + capabilities |= Capabilities::from_bits_truncate(((capabilities_2 as u32) << 16).into()); + + let auth_plugin_data_len = if capabilities.contains(Capabilities::PLUGIN_AUTH) { + // plugin data length : int<1> + buf.get_u8()? + } else { + // 0x00 : int<1> + buf.advance(0); + 0 + }; + + // reserved: string<6> + buf.advance(6); + + if capabilities.contains(Capabilities::MYSQL) { + // reserved: string<4> + buf.advance(4); + } else { + // capability_flags_3 : int<4> + let capabilities_3 = buf.get_u32::()?; + capabilities |= Capabilities::from_bits_truncate((capabilities_2 as u64) << 32); + } + + if capabilities.contains(Capabilities::SECURE_CONNECTION) { + // scramble 2nd part : string ( Length = max(12, plugin data length - 9) ) + let len = ((auth_plugin_data_len as isize) - 9).max(12) as usize; + scramble.extend_from_slice(&buf[..len]); + buf.advance(len); + + // reserved : string<1> + buf.advance(1); + } + + let auth_plugin_name = if capabilities.contains(Capabilities::PLUGIN_AUTH) { + Some(buf.get_str_nul()?.to_owned().into()) + } else { + None + }; + + Ok(Self { + protocol_version, + server_capabilities: capabilities, + server_version, + server_default_collation: char_set, + connection_id, + auth_plugin_data: scramble.into_boxed_slice(), + auth_plugin_name, + status, + }) + } +} + +#[cfg(test)] +mod tests { + use super::{Capabilities, Decode, Handshake, Status}; + + const HANDSHAKE_MARIA_DB_10_4_7: &[u8] = b"\n5.5.5-10.4.7-MariaDB-1:10.4.7+maria~bionic\x00\x0b\x00\x00\x00t6L\\j\"dS\x00\xfe\xf7\x08\x02\x00\xff\x81\x15\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00U14Oph9\" { + pub max_packet_size: u32, + pub client_collation: u8, + pub username: &'a str, + pub database: &'a str, +} + +impl Encode for HandshakeResponse<'_> { + fn encode(&self, buf: &mut Vec, capabilities: Capabilities) { + // client capabilities : int<4> + buf.put_u32::(capabilities.bits() as u32); + + // max packet size : int<4> + buf.put_u32::(self.max_packet_size); + + // client character collation : int<1> + buf.put_u8(self.client_collation); + + // reserved : string<19> + buf.advance(19); + + if capabilities.contains(Capabilities::MYSQL) { + // reserved : string<4> + buf.advance(4); + } else { + // extended client capabilities : int<4> + buf.put_u32::((capabilities.bits() >> 32) as u32); + } + + // username : string + buf.put_str_nul(self.username); + + if capabilities.contains(Capabilities::PLUGIN_AUTH_LENENC_DATA) { + // auth_response : string + buf.put_str_lenenc::(""); + } else { + // auth_response_length : int<1> + buf.put_u8(0); + + // auth_response : string<{auth_response_length}> + } + + if capabilities.contains(Capabilities::CONNECT_WITH_DB) { + // database : string + buf.put_str_nul(self.database); + } + } +} diff --git a/sqlx-core/src/mysql/protocol/mod.rs b/sqlx-core/src/mysql/protocol/mod.rs index 12710cf8..fa5858e3 100644 --- a/sqlx-core/src/mysql/protocol/mod.rs +++ b/sqlx-core/src/mysql/protocol/mod.rs @@ -1,33 +1,49 @@ -// Many protocol types are implemented but unused (currently). The hope is to eventually -// work them all into the (raw) connection type. +// There is much to the protocol that is not yet used. As we mature we'll be trimming +// the size of this module to exactly what is necessary. #![allow(unused)] -// Reference: https://mariadb.com/kb/en/library/connection -// Packets: https://mariadb.com/kb/en/library/0-packet - -mod binary; -mod capabilities; -mod connect; +mod decode; mod encode; -mod error_code; -mod field; -mod response; -mod server_status; -mod text; -pub use binary::{ - ComStmtClose, ComStmtExecute, ComStmtFetch, ComStmtPrepare, ComStmtPrepareOk, ComStmtReset, - StmtExecFlag, -}; -pub use capabilities::Capabilities; -pub use connect::{ - AuthenticationSwitchRequest, HandshakeResponsePacket, InitialHandshakePacket, SslRequest, -}; +pub use decode::Decode; pub use encode::Encode; -pub use error_code::ErrorCode; -pub use field::{FieldDetailFlag, FieldType, ParameterFlag}; -pub use response::{ - ColumnCountPacket, ColumnDefinitionPacket, EofPacket, ErrPacket, OkPacket, ResultRow, -}; -pub use server_status::ServerStatusFlag; -pub use text::{ComDebug, ComInitDb, ComPing, ComProcessKill, ComQuery, ComQuit, ComSetOption, SetOptionOptions}; + +mod capabilities; +mod field; +mod status; +mod r#type; + +pub use capabilities::Capabilities; +pub use field::FieldFlags; +pub use r#type::Type; +pub use status::Status; + +mod com_query; +mod com_set_option; +mod com_stmt_execute; +mod com_stmt_prepare; +mod handshake; + +pub use com_query::ComQuery; +pub use com_set_option::{ComSetOption, SetOption}; +pub use com_stmt_execute::{ComStmtExecute, Cursor}; +pub use com_stmt_prepare::ComStmtPrepare; +pub use handshake::Handshake; + +mod column_count; +mod column_def; +mod com_stmt_prepare_ok; +mod eof; +mod err; +mod handshake_response; +mod ok; +mod row; + +pub use column_count::ColumnCount; +pub use column_def::ColumnDefinition; +pub use com_stmt_prepare_ok::ComStmtPrepareOk; +pub use eof::EofPacket; +pub use err::ErrPacket; +pub use handshake_response::HandshakeResponse; +pub use ok::OkPacket; +pub use row::Row; diff --git a/sqlx-core/src/mysql/protocol/ok.rs b/sqlx-core/src/mysql/protocol/ok.rs new file mode 100644 index 00000000..cd7986aa --- /dev/null +++ b/sqlx-core/src/mysql/protocol/ok.rs @@ -0,0 +1,64 @@ +use byteorder::LittleEndian; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::{Capabilities, Decode, Status}; + +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_ok_packet.html +// https://mariadb.com/kb/en/ok_packet/ +#[derive(Debug)] +pub struct OkPacket { + pub affected_rows: u64, + pub last_insert_id: u64, + pub status: Status, + pub warnings: u16, + pub info: Box, +} + +impl Decode for OkPacket { + fn decode(mut buf: &[u8]) -> crate::Result + where + Self: Sized, + { + let header = buf.get_u8()?; + if header != 0 && header != 0xFE { + return Err(protocol_err!( + "expected 0x00 or 0xFE; received 0x{:X}", + header + ))?; + } + + let affected_rows = buf.get_uint_lenenc::()?.unwrap_or(0); // 0 + let last_insert_id = buf.get_uint_lenenc::()?.unwrap_or(0); // 2 + let status = Status::from_bits_truncate(buf.get_u16::()?); // + let warnings = buf.get_u16::()?; + let info = buf.get_str(buf.len())?.into(); + + Ok(Self { + affected_rows, + last_insert_id, + status, + warnings, + info, + }) + } +} + +#[cfg(test)] +mod tests { + use super::{Capabilities, Decode, OkPacket, Status}; + + const OK_HANDSHAKE: &[u8] = b"\x00\x00\x00\x02@\x00\x00"; + + #[test] + fn it_decodes_ok_handshake() { + let mut p = OkPacket::decode(OK_HANDSHAKE).unwrap(); + + assert_eq!(p.affected_rows, 0); + assert_eq!(p.last_insert_id, 0); + assert_eq!(p.warnings, 0); + assert!(p.status.contains(Status::SERVER_STATUS_AUTOCOMMIT)); + assert!(p.status.contains(Status::SERVER_SESSION_STATE_CHANGED)); + assert!(p.info.is_empty()); + } +} diff --git a/sqlx-core/src/mysql/protocol/response/column_count.rs b/sqlx-core/src/mysql/protocol/response/column_count.rs deleted file mode 100644 index 07291a5f..00000000 --- a/sqlx-core/src/mysql/protocol/response/column_count.rs +++ /dev/null @@ -1,43 +0,0 @@ -use crate::mysql::io::BufExt; -use byteorder::LittleEndian; -use std::io; - -// The column packet is the first packet of a result set. -// Inside of it it contains the number of columns in the result set -// encoded as an int. -// https://mariadb.com/kb/en/library/resultset/#column-count-packet -#[derive(Debug)] -pub struct ColumnCountPacket { - pub columns: u64, -} - -impl ColumnCountPacket { - pub(crate) fn decode(mut buf: &[u8]) -> io::Result { - let columns = buf.get_uint_lenenc::()?.unwrap_or(0); - - Ok(Self { columns }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_column_packet_0x_fb() -> io::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // int tag code: Some(3 bytes) - 0xFD_u8, - // value: 3 bytes - 0x01_u8, 0x01_u8, 0x01_u8 - ); - - let message = ColumnCountPacket::decode(&buf)?; - - assert_eq!(message.columns, 0x010101); - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/response/column_def.rs b/sqlx-core/src/mysql/protocol/response/column_def.rs deleted file mode 100644 index 15cdd7ee..00000000 --- a/sqlx-core/src/mysql/protocol/response/column_def.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::{ - io::Buf, - mysql::{ - io::BufExt, - protocol::{FieldDetailFlag, FieldType}, - }, -}; -use byteorder::LittleEndian; -use std::io; - -#[derive(Debug)] -// ColumnDefinitionPacket doesn't have a packet header because -// it's nested inside a result set packet -pub struct ColumnDefinitionPacket { - pub schema: Option, - pub table_alias: Option, - pub table: Option, - pub column_alias: Option, - pub column: Option, - pub char_set: u16, - pub max_columns: i32, - pub field_type: FieldType, - pub field_details: FieldDetailFlag, - pub decimals: u8, -} - -impl ColumnDefinitionPacket { - pub(crate) fn decode(mut buf: &[u8]) -> io::Result { - // string catalog (always 'def') - let _catalog = buf.get_str_lenenc::()?; - // TODO: Assert that this is always DEF - - // string schema - let schema = buf.get_str_lenenc::()?.map(ToOwned::to_owned); - // string table alias - let table_alias = buf.get_str_lenenc::()?.map(ToOwned::to_owned); - // string table - let table = buf.get_str_lenenc::()?.map(ToOwned::to_owned); - // string column alias - let column_alias = buf.get_str_lenenc::()?.map(ToOwned::to_owned); - // string column - let column = buf.get_str_lenenc::()?.map(ToOwned::to_owned); - - // int length of fixed fields (=0xC) - let _length_of_fixed_fields = buf.get_uint_lenenc::()?; - // TODO: Assert that this is always 0xC - - // int<2> character set number - let char_set = buf.get_u16::()?; - // int<4> max. column size - let max_columns = buf.get_i32::()?; - // int<1> Field types - let field_type = FieldType(buf.get_u8()?); - // int<2> Field detail flag - let field_details = FieldDetailFlag::from_bits_truncate(buf.get_u16::()?); - // int<1> decimals - let decimals = buf.get_u8()?; - // int<2> - unused - - buf.advance(2); - - Ok(Self { - schema, - table_alias, - table, - column_alias, - column, - char_set, - max_columns, - field_type, - field_details, - decimals, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_column_def_packet() -> io::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // string catalog (always 'def') - 1u8, b'a', - // string schema - 1u8, b'b', - // string table alias - 1u8, b'c', - // string table - 1u8, b'd', - // string column alias - 1u8, b'e', - // string column - 1u8, b'f', - // int length of fixed fields (=0xC) - 0xFC_u8, 1u8, 1u8, - // int<2> character set number - 1u8, 1u8, - // int<4> max. column size - 1u8, 1u8, 1u8, 1u8, - // int<1> Field types - 1u8, - // int<2> Field detail flag - 1u8, 0u8, - // int<1> decimals - 1u8, - // int<2> - unused - - 0u8, 0u8 - ); - - let message = ColumnDefinitionPacket::decode(&buf)?; - - assert_eq!(message.schema, Some("b".into())); - assert_eq!(message.table_alias, Some("c".into())); - assert_eq!(message.table, Some("d".into())); - assert_eq!(message.column_alias, Some("e".into())); - assert_eq!(message.column, Some("f".into())); - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/response/eof.rs b/sqlx-core/src/mysql/protocol/response/eof.rs deleted file mode 100644 index 9fb73798..00000000 --- a/sqlx-core/src/mysql/protocol/response/eof.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::{ - io::Buf, - mysql::{ - io::BufExt, - protocol::{ErrorCode, ServerStatusFlag}, - }, -}; -use byteorder::LittleEndian; -use std::io; - -#[derive(Debug)] -pub struct EofPacket { - pub warning_count: u16, - pub status: ServerStatusFlag, -} - -impl EofPacket { - pub(crate) fn decode(mut buf: &[u8]) -> crate::Result { - let header = buf.get_u8()?; - if header != 0xFE { - return Err(protocol_err!("expected 0xFE; received {}", header))?; - } - - let warning_count = buf.get_u16::()?; - let status = ServerStatusFlag::from_bits_truncate(buf.get_u16::()?); - - Ok(Self { - warning_count, - status, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - use std::io; - - #[test] - fn it_decodes_eof_packet() -> crate::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // int<1> 0xfe : EOF header - 0xFE_u8, - // int<2> warning count - 0u8, 0u8, - // int<2> server status - 1u8, 1u8 - ); - - let _message = EofPacket::decode(&buf)?; - - // TODO: Assert fields? - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/response/err.rs b/sqlx-core/src/mysql/protocol/response/err.rs deleted file mode 100644 index 1638196f..00000000 --- a/sqlx-core/src/mysql/protocol/response/err.rs +++ /dev/null @@ -1,116 +0,0 @@ -use crate::{ - io::Buf, - mysql::{error::Error, io::BufExt, protocol::ErrorCode}, -}; -use byteorder::LittleEndian; -use std::io; - -#[derive(Debug)] -pub enum ErrPacket { - Progress { - stage: u8, - max_stage: u8, - progress: u32, - info: Box, - }, - - Error { - code: ErrorCode, - sql_state: Option>, - message: Box, - }, -} - -impl ErrPacket { - pub fn decode(mut buf: &[u8]) -> io::Result { - let header = buf.get_u8()?; - debug_assert_eq!(header, 0xFF); - - // error code : int<2> - let code = buf.get_u16::()?; - - // if (errorcode == 0xFFFF) /* progress reporting */ - if code == 0xFF_FF { - let stage = buf.get_u8()?; - let max_stage = buf.get_u8()?; - let progress = buf.get_u24::()?; - let info = buf - .get_str_lenenc::()? - .unwrap_or_default() - .into(); - - Ok(Self::Progress { - stage, - max_stage, - progress, - info, - }) - } else { - // if (next byte = '#') - let sql_state = if buf[0] == b'#' { - // '#' : string<1> - buf.advance(1); - - // sql state : string<5> - Some(buf.get_str(5)?.into()) - } else { - None - }; - - let message = buf.get_str_eof()?.into(); - - Ok(Self::Error { - code: ErrorCode(code), - sql_state, - message, - }) - } - } - - pub fn expect_error(self) -> crate::Result { - match self { - ErrPacket::Progress { .. } => { - Err(protocol_err!("expected ErrPacket::Err, got {:?}", self).into()) - } - ErrPacket::Error { code, message, .. } => Err(Error { code, message }.into()), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_err_packet() -> io::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // int<1> 0xfe : EOF header - 0xFF_u8, - // int<2> error code - 0x84_u8, 0x04_u8, - // if (errorcode == 0xFFFF) /* progress reporting */ { - // int<1> stage - // int<1> max_stage - // int<3> progress - // string progress_info - // } else { - // if (next byte = '#') { - // string<1> sql state marker '#' - b"#", - // string<5>sql state - b"08S01", - // string error message - b"Got packets out of order" - // } else { - // string error message - // } - // } - ); - - let _message = ErrPacket::decode(&buf)?; - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/response/mod.rs b/sqlx-core/src/mysql/protocol/response/mod.rs deleted file mode 100644 index fd2be74c..00000000 --- a/sqlx-core/src/mysql/protocol/response/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod column_count; -mod column_def; -mod eof; -mod err; -mod ok; -mod row; - -pub use column_count::ColumnCountPacket; -pub use column_def::ColumnDefinitionPacket; -pub use eof::EofPacket; -pub use err::ErrPacket; -pub use ok::OkPacket; -pub use row::ResultRow; diff --git a/sqlx-core/src/mysql/protocol/response/ok.rs b/sqlx-core/src/mysql/protocol/response/ok.rs deleted file mode 100644 index a8e5377b..00000000 --- a/sqlx-core/src/mysql/protocol/response/ok.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::{ - io::Buf, - mysql::{ - io::BufExt, - protocol::{Capabilities, ServerStatusFlag}, - }, -}; -use byteorder::LittleEndian; -use std::io; - -// https://mariadb.com/kb/en/library/ok_packet/ -#[derive(Debug)] -pub struct OkPacket { - pub affected_rows: u64, - pub last_insert_id: u64, - pub server_status: ServerStatusFlag, - pub warning_count: u16, - pub info: Box, - pub session_state_info: Option>, - pub value_of_variable: Option>, -} - -impl OkPacket { - pub fn decode(mut buf: &[u8], capabilities: Capabilities) -> crate::Result { - let header = buf.get_u8()?; - if header != 0 && header != 0xFE { - return Err(protocol_err!( - "expected 0x00 or 0xFE; received 0x{:X}", - header - ))?; - } - - let affected_rows = buf.get_uint_lenenc::()?.unwrap_or(0); - let last_insert_id = buf.get_uint_lenenc::()?.unwrap_or(0); - let server_status = ServerStatusFlag::from_bits_truncate(buf.get_u16::()?); - let warning_count = buf.get_u16::()?; - - let info; - let mut session_state_info = None; - let mut value_of_variable = None; - - if capabilities.contains(Capabilities::CLIENT_SESSION_TRACK) { - info = buf - .get_str_lenenc::()? - .unwrap_or_default() - .to_owned() - .into(); - session_state_info = buf.get_bytes_lenenc::()?.map(Into::into); - value_of_variable = buf.get_str_lenenc::()?.map(Into::into); - } else { - info = buf.get_str_eof()?.to_owned().into(); - } - - Ok(Self { - affected_rows, - last_insert_id, - server_status, - warning_count, - info, - session_state_info, - value_of_variable, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::__bytes_builder; - - #[test] - fn it_decodes_ok_packet() -> crate::Result<()> { - #[rustfmt::skip] - let buf = __bytes_builder!( - // 0x00 : OK_Packet header or (0xFE if CLIENT_DEPRECATE_EOF is set) - 0u8, - // int affected rows - 0xFB_u8, - // int last insert id - 0xFB_u8, - // int<2> server status - 1u8, 1u8, - // int<2> warning count - 0u8, 0u8, - // if session_tracking_supported (see CLIENT_SESSION_TRACK) { - // string info - // if (status flags & SERVER_SESSION_STATE_CHANGED) { - // string session state info - // string value of variable - // } - // } else { - // string info - b"info" - // } - ); - - let message = OkPacket::decode(&buf, Capabilities::empty())?; - - assert_eq!(message.affected_rows, 0); - assert_eq!(message.last_insert_id, 0); - assert!(message - .server_status - .contains(ServerStatusFlag::SERVER_STATUS_IN_TRANS)); - assert_eq!(message.warning_count, 0); - assert_eq!(message.info, "info".into()); - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/response/row.rs b/sqlx-core/src/mysql/protocol/response/row.rs deleted file mode 100644 index 1731e500..00000000 --- a/sqlx-core/src/mysql/protocol/response/row.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::{ - io::Buf, - mysql::{ - io::BufExt, - protocol::{ColumnDefinitionPacket, FieldType}, - }, -}; -use byteorder::LittleEndian; -use std::{io, pin::Pin, ptr::NonNull}; - -/// A resultset row represents a database resultset unit, which is usually generated by -/// executing a statement that queries the database. -#[derive(Debug)] -pub struct ResultRow { - #[used] - buffer: Pin>, - pub values: Box<[Option>]>, -} - -// SAFE: Raw pointers point to pinned memory inside the struct -unsafe impl Send for ResultRow {} -unsafe impl Sync for ResultRow {} - -impl ResultRow { - pub fn decode(mut buf: &[u8], columns: &[ColumnDefinitionPacket]) -> crate::Result { - // 0x00 header : byte<1> - let header = buf.get_u8()?; - - if header != 0 { - return Err(protocol_err!("expected header 0x00, got: {:#04X}", header).into()); - } - - // NULL-Bitmap : byte<(number_of_columns + 9) / 8> - let null_len = (columns.len() + 9) / 8; - let null = &buf[..]; - buf.advance(null_len); - - let buffer: Pin> = Pin::new(buf.into()); - let mut buf = &*buffer; - - let mut values = Vec::with_capacity(columns.len()); - - for column_idx in 0..columns.len() { - if null[column_idx / 8] & (1 << (column_idx % 8) as u8) != 0 { - values.push(None); - } else { - match columns[column_idx].field_type { - FieldType::MYSQL_TYPE_TINY => { - values.push(Some(buf.get_bytes(1)?.into())); - } - - FieldType::MYSQL_TYPE_SHORT => { - values.push(Some(buf.get_bytes(2)?.into())); - } - - FieldType::MYSQL_TYPE_LONG => { - values.push(Some(buf.get_bytes(4)?.into())); - } - - FieldType::MYSQL_TYPE_LONGLONG => { - values.push(Some(buf.get_bytes(8)?.into())); - } - - FieldType::MYSQL_TYPE_TINY_BLOB - | FieldType::MYSQL_TYPE_MEDIUM_BLOB - | FieldType::MYSQL_TYPE_LONG_BLOB - | FieldType::MYSQL_TYPE_BLOB - | FieldType::MYSQL_TYPE_GEOMETRY - | FieldType::MYSQL_TYPE_STRING - | FieldType::MYSQL_TYPE_VARCHAR - | FieldType::MYSQL_TYPE_VAR_STRING => { - values.push(buf.get_bytes_lenenc::()?.map(Into::into)); - } - - type_ => { - unimplemented!("encountered unknown field type: {:?}", type_); - } - } - } - } - - Ok(Self { - buffer, - values: values.into_boxed_slice(), - }) - } -} diff --git a/sqlx-core/src/mysql/protocol/row.rs b/sqlx-core/src/mysql/protocol/row.rs new file mode 100644 index 00000000..4bddc316 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/row.rs @@ -0,0 +1,129 @@ +use std::ops::Range; + +use byteorder::{ByteOrder, LittleEndian}; + +use crate::io::Buf; +use crate::mysql::io::BufExt; +use crate::mysql::protocol::{Decode, Type}; + +pub struct Row { + buffer: Box<[u8]>, + values: Box<[Option>]>, + binary: bool, +} + +impl Row { + pub fn len(&self) -> usize { + self.values.len() + } + + pub fn get(&self, index: usize) -> Option<&[u8]> { + let range = self.values[index].as_ref()?; + + Some(&self.buffer[(range.start as usize)..(range.end as usize)]) + } +} + +fn get_lenenc_size(buf: &[u8]) -> usize { + match buf[0] { + 0xFB => 1, + + 0xFC => { + let len_size = 1 + 2; + let len = LittleEndian::read_u16(&buf[1..]); + + len_size + (len as usize) + } + + 0xFD => { + let len_size = 1 + 3; + let len = LittleEndian::read_u24(&buf[1..]); + + len_size + (len as usize) + } + + 0xFE => { + let len_size = 1 + 8; + let len = LittleEndian::read_u64(&buf[1..]); + + len_size + (len as usize) + } + + value => 1 + (value as usize), + } +} + +impl Row { + pub fn decode(mut buf: &[u8], columns: &[Type], binary: bool) -> crate::Result { + if !binary { + let buffer: Box<[u8]> = buf.into(); + let mut values = Vec::with_capacity(columns.len()); + let mut index = 0; + + for column_idx in 0..columns.len() { + let size = get_lenenc_size(&buf[index..]); + + values.push(Some(index..(index + size))); + + index += size; + buf.advance(size); + } + + return Ok(Self { + buffer, + values: values.into_boxed_slice(), + binary, + }); + } + + // 0x00 header : byte<1> + let header = buf.get_u8()?; + if header != 0 { + return Err(protocol_err!("expected ROW (0x00), got: {:#04X}", header).into()); + } + + // NULL-Bitmap : byte<(number_of_columns + 9) / 8> + let null_len = (columns.len() + 9) / 8; + let null_bitmap = &buf[..]; + buf.advance(null_len); + + let buffer: Box<[u8]> = buf.into(); + let mut values = Vec::with_capacity(columns.len()); + let mut index = 0; + + for column_idx in 0..columns.len() { + if null_bitmap[column_idx / 8] & (1 << (column_idx % 8) as u8) != 0 { + values.push(None); + } else { + let size = match columns[column_idx] { + Type::TINY => 1, + Type::SHORT => 2, + Type::LONG => 4, + Type::LONGLONG => 8, + + Type::TINY_BLOB + | Type::MEDIUM_BLOB + | Type::LONG_BLOB + | Type::BLOB + | Type::GEOMETRY + | Type::STRING + | Type::VARCHAR + | Type::VAR_STRING => get_lenenc_size(&buffer[index..]), + + r#type => { + unimplemented!("encountered unknown field type: {:?}", r#type); + } + }; + + values.push(Some(index..(index + size))); + index += size; + } + } + + Ok(Self { + buffer, + values: values.into_boxed_slice(), + binary, + }) + } +} diff --git a/sqlx-core/src/mysql/protocol/server_status.rs b/sqlx-core/src/mysql/protocol/server_status.rs deleted file mode 100644 index 5b973928..00000000 --- a/sqlx-core/src/mysql/protocol/server_status.rs +++ /dev/null @@ -1,45 +0,0 @@ -// https://mariadb.com/kb/en/library/mariadb-connectorc-types-and-definitions/#server-status -bitflags::bitflags! { - pub struct ServerStatusFlag: u16 { - // A transaction is currently active - const SERVER_STATUS_IN_TRANS = 1; - - // Autocommit mode is set - const SERVER_STATUS_AUTOCOMMIT = 2; - - // more results exists (more packet follow) - const SERVER_MORE_RESULTS_EXISTS = 8; - - const SERVER_QUERY_NO_GOOD_INDEX_USED = 16; - const SERVER_QUERY_NO_INDEX_USED = 32; - - // when using COM_STMT_FETCH, indicate that current cursor still has result - const SERVER_STATUS_CURSOR_EXISTS = 64; - - // when using COM_STMT_FETCH, indicate that current cursor has finished to send results - const SERVER_STATUS_LAST_ROW_SENT = 128; - - // database has been dropped - const SERVER_STATUS_DB_DROPPED = 1 << 8; - - // current escape mode is "no backslash escape" - const SERVER_STATUS_NO_BACKSLASH_ESAPES = 1 << 9; - - // A DDL change did have an impact on an existing PREPARE (an - // automatic reprepare has been executed) - const SERVER_STATUS_METADATA_CHANGED = 1 << 10; - - // Last statement took more than the time value specified in - // server variable long_query_time. - const SERVER_QUERY_WAS_SLOW = 1 << 11; - - // this resultset contain stored procedure output parameter - const SERVER_PS_OUT_PARAMS = 1 << 12; - - // current transaction is a read-only transaction - const SERVER_STATUS_IN_TRANS_READONLY = 1 << 13; - - // session state change. see Session change type for more information - const SERVER_SESSION_STATE_CHANGED = 1 << 14; - } -} diff --git a/sqlx-core/src/mysql/protocol/status.rs b/sqlx-core/src/mysql/protocol/status.rs new file mode 100644 index 00000000..0338c0df --- /dev/null +++ b/sqlx-core/src/mysql/protocol/status.rs @@ -0,0 +1,49 @@ +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a1d854e841086925be1883e4d7b4e8cad +// https://mariadb.com/kb/en/library/mariadb-connectorc-types-and-definitions/#server-status +bitflags::bitflags! { + pub struct Status: u16 { + // Is raised when a multi-statement transaction has been started, either explicitly, + // by means of BEGIN or COMMIT AND CHAIN, or implicitly, by the first + // transactional statement, when autocommit=off. + const SERVER_STATUS_IN_TRANS = 1; + + // Autocommit mode is set + const SERVER_STATUS_AUTOCOMMIT = 2; + + // Multi query - next query exists. + const SERVER_MORE_RESULTS_EXISTS = 8; + + const SERVER_QUERY_NO_GOOD_INDEX_USED = 16; + const SERVER_QUERY_NO_INDEX_USED = 32; + + // When using COM_STMT_FETCH, indicate that current cursor still has result + const SERVER_STATUS_CURSOR_EXISTS = 64; + + // When using COM_STMT_FETCH, indicate that current cursor has finished to send results + const SERVER_STATUS_LAST_ROW_SENT = 128; + + // Database has been dropped + const SERVER_STATUS_DB_DROPPED = (1 << 8); + + // Current escape mode is "no backslash escape" + const SERVER_STATUS_NO_BACKSLASH_ESCAPES = (1 << 9); + + // A DDL change did have an impact on an existing PREPARE (an automatic + // re-prepare has been executed) + const SERVER_STATUS_METADATA_CHANGED = (1 << 10); + + // Last statement took more than the time value specified + // in server variable long_query_time. + const SERVER_QUERY_WAS_SLOW = (1 << 11); + + // This result-set contain stored procedure output parameter. + const SERVER_PS_OUT_PARAMS = (1 << 12); + + // Current transaction is a read-only transaction. + const SERVER_STATUS_IN_TRANS_READONLY = (1 << 13); + + // This status flag, when on, implies that one of the state information has changed + // on the server because of the execution of the last statement. + const SERVER_SESSION_STATE_CHANGED = (1 << 14); + } +} diff --git a/sqlx-core/src/mysql/protocol/text/com_debug.rs b/sqlx-core/src/mysql/protocol/text/com_debug.rs deleted file mode 100644 index 59a053b5..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_debug.rs +++ /dev/null @@ -1,28 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; - -#[derive(Debug)] -pub struct ComDebug; - -impl Encode for ComDebug { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_DEBUG Header (0xOD) : int<1> - buf.put_u8(TextProtocol::ComDebug as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_debug() { - let mut buf = Vec::new(); - ComDebug.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x0D"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_init_db.rs b/sqlx-core/src/mysql/protocol/text/com_init_db.rs deleted file mode 100644 index 084e7eb8..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_init_db.rs +++ /dev/null @@ -1,36 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; - -pub struct ComInitDb<'a> { - pub schema_name: &'a str, -} - -impl Encode for ComInitDb<'_> { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_INIT_DB Header : int<1> - buf.put_u8(TextProtocol::ComInitDb as u8); - - // schema name : string - buf.put_str_nul(self.schema_name); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_init_db() { - let mut buf = Vec::new(); - - ComInitDb { - schema_name: "portal", - } - .encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x02portal\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_ping.rs b/sqlx-core/src/mysql/protocol/text/com_ping.rs deleted file mode 100644 index 66e6c0d1..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_ping.rs +++ /dev/null @@ -1,28 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; - -#[derive(Debug)] -pub struct ComPing; - -impl Encode for ComPing { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_PING Header : int<1> - buf.put_u8(TextProtocol::ComPing as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_ping() { - let mut buf = Vec::new(); - ComPing.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x0E"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_process_kill.rs b/sqlx-core/src/mysql/protocol/text/com_process_kill.rs deleted file mode 100644 index 3198636a..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_process_kill.rs +++ /dev/null @@ -1,35 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; -use byteorder::LittleEndian; - -/// Forces the server to terminate a specified connection. -pub struct ComProcessKill { - pub process_id: u32, -} - -impl Encode for ComProcessKill { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_PROCESS_KILL : int<1> - buf.put_u8(TextProtocol::ComProcessKill as u8); - - // process id : int<4> - buf.put_u32::(self.process_id); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_process_kill() { - let mut buf = Vec::new(); - - ComProcessKill { process_id: 1 }.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x0C\x01\0\0\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_query.rs b/sqlx-core/src/mysql/protocol/text/com_query.rs deleted file mode 100644 index ba58e7c9..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_query.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::{ - io::BufMut, - mysql::{ - io::BufMutExt, - protocol::{Capabilities, Encode}, - }, -}; - -/// Sends the server an SQL statement to be executed immediately. -pub struct ComQuery<'a> { - pub sql_statement: &'a str, -} - -impl<'a> Encode for ComQuery<'a> { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - buf.put_u8(super::TextProtocol::ComQuery as u8); - buf.put_str(&self.sql_statement); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_query() { - let mut buf = Vec::new(); - - ComQuery { - sql_statement: "SELECT * FROM users", - } - .encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf, b"\x03SELECT * FROM users"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_quit.rs b/sqlx-core/src/mysql/protocol/text/com_quit.rs deleted file mode 100644 index 5a231994..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_quit.rs +++ /dev/null @@ -1,29 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; - -pub struct ComQuit; - -impl Encode for ComQuit { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - buf.put_u8(TextProtocol::ComQuit as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_quit() -> std::io::Result<()> { - let mut buf = Vec::new(); - - ComQuit.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x01"); - - Ok(()) - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_reset_conn.rs b/sqlx-core/src/mysql/protocol/text/com_reset_conn.rs deleted file mode 100644 index a13aa91b..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_reset_conn.rs +++ /dev/null @@ -1,30 +0,0 @@ -use super::TextProtocol; -use crate::{ - io::BufMut, - mysql::protocol::{Capabilities, Encode}, -}; - -/// Resets a connection without re-authentication. -#[derive(Debug)] -pub struct ComResetConnection; - -impl Encode for ComResetConnection { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_RESET_CONNECTION Header : int<1> - buf.put_u8(TextProtocol::ComResetConnection as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_reset_conn() { - let mut buf = Vec::new(); - - ComResetConnection.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x1F"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_set_option.rs b/sqlx-core/src/mysql/protocol/text/com_set_option.rs deleted file mode 100644 index 2c0ed2ec..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_set_option.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::{ - io::BufMut, - mysql::protocol::{text::TextProtocol, Capabilities, Encode}, -}; -use byteorder::LittleEndian; - -#[derive(Debug, Copy, Clone)] -#[repr(u16)] -pub enum SetOptionOptions { - MySqlOptionMultiStatementsOn = 0x00, - MySqlOptionMultiStatementsOff = 0x01, -} - -/// Enables or disables server option. -#[derive(Debug)] -pub struct ComSetOption { - pub option: SetOptionOptions, -} - -impl Encode for ComSetOption { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_SET_OPTION : int<1> - buf.put_u8(TextProtocol::ComSetOption as u8); - - // option : int<2> - buf.put_u16::(self.option as u16); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_set_option() { - let mut buf = Vec::new(); - - ComSetOption { - option: SetOptionOptions::MySqlOptionMultiStatementsOff, - } - .encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x1B\x01\0"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_sleep.rs b/sqlx-core/src/mysql/protocol/text/com_sleep.rs deleted file mode 100644 index 79fe2297..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_sleep.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::{ - io::BufMut, - mysql::protocol::{text::TextProtocol, Capabilities, Encode}, -}; - -pub struct ComSleep; - -impl Encode for ComSleep { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_SLEEP : int<1> - buf.put_u8(TextProtocol::ComSleep as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_sleep() { - let mut buf = Vec::new(); - - ComSleep.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x00"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/com_statistics.rs b/sqlx-core/src/mysql/protocol/text/com_statistics.rs deleted file mode 100644 index a664da11..00000000 --- a/sqlx-core/src/mysql/protocol/text/com_statistics.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::{ - io::BufMut, - mysql::protocol::{text::TextProtocol, Capabilities, Encode}, -}; - -#[derive(Debug)] -pub struct ComStatistics; - -impl Encode for ComStatistics { - fn encode(&self, buf: &mut Vec, _: Capabilities) { - // COM_STATISTICS : int<1> - buf.put_u8(TextProtocol::ComStatistics as u8); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_com_statistics() { - let mut buf = Vec::new(); - - ComStatistics.encode(&mut buf, Capabilities::empty()); - - assert_eq!(&buf[..], b"\x09"); - } -} diff --git a/sqlx-core/src/mysql/protocol/text/mod.rs b/sqlx-core/src/mysql/protocol/text/mod.rs deleted file mode 100644 index c850ab6c..00000000 --- a/sqlx-core/src/mysql/protocol/text/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -mod com_debug; -mod com_init_db; -mod com_ping; -mod com_process_kill; -mod com_query; -mod com_quit; -mod com_reset_conn; -mod com_set_option; -mod com_sleep; -mod com_statistics; - -pub use com_debug::ComDebug; -pub use com_init_db::ComInitDb; -pub use com_ping::ComPing; -pub use com_process_kill::ComProcessKill; -pub use com_query::ComQuery; -pub use com_quit::ComQuit; -pub use com_reset_conn::ComResetConnection; -pub use com_set_option::{ComSetOption, SetOptionOptions}; -pub use com_sleep::ComSleep; -pub use com_statistics::ComStatistics; - -// This is an enum of text protocol packet tags. -// Tags are the 5th byte of the packet (1st byte of packet body) -// and are used to determine which type of query was sent. -// The name of the enum variant represents the type of query, and -// the value is the byte value required by the server. -enum TextProtocol { - ComChangeUser = 0x11, - ComDebug = 0x0D, - ComInitDb = 0x02, - ComPing = 0x0e, - ComProcessKill = 0x0C, - ComQuery = 0x03, - ComQuit = 0x01, - ComResetConnection = 0x1F, - ComSetOption = 0x1B, - ComShutdown = 0x0A, - ComSleep = 0x00, - ComStatistics = 0x09, -} diff --git a/sqlx-core/src/mysql/protocol/type.rs b/sqlx-core/src/mysql/protocol/type.rs new file mode 100644 index 00000000..7ccfe446 --- /dev/null +++ b/sqlx-core/src/mysql/protocol/type.rs @@ -0,0 +1,39 @@ +// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/binary__log__types_8h.html +// https://mariadb.com/kb/en/library/resultset/#field-types +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Type(pub u8); + +impl Type { + pub const BIT: Type = Type(16); + pub const BLOB: Type = Type(252); + pub const DATE: Type = Type(10); + pub const DATETIME: Type = Type(12); + pub const DECIMAL: Type = Type(0); + pub const DOUBLE: Type = Type(5); + pub const ENUM: Type = Type(247); + pub const FLOAT: Type = Type(4); + pub const GEOMETRY: Type = Type(255); + pub const INT24: Type = Type(9); + pub const JSON: Type = Type(245); // MySQL Only + pub const LONG: Type = Type(3); + pub const LONGLONG: Type = Type(8); + pub const LONG_BLOB: Type = Type(251); + pub const MEDIUM_BLOB: Type = Type(250); + pub const NULL: Type = Type(6); + pub const SET: Type = Type(248); + pub const SHORT: Type = Type(2); + pub const STRING: Type = Type(254); + pub const TIME: Type = Type(11); + pub const TIMESTAMP: Type = Type(7); + pub const TINY: Type = Type(1); + pub const TINY_BLOB: Type = Type(249); + pub const VARCHAR: Type = Type(15); + pub const VAR_STRING: Type = Type(253); + pub const YEAR: Type = Type(13); +} + +impl Default for Type { + fn default() -> Type { + Type::NULL + } +} diff --git a/sqlx-core/src/mysql/protocol/types.rs b/sqlx-core/src/mysql/protocol/types.rs deleted file mode 100644 index 73e616b2..00000000 --- a/sqlx-core/src/mysql/protocol/types.rs +++ /dev/null @@ -1,8 +0,0 @@ -pub enum SessionChangeType { - SessionTrackSystemVariables = 0, - SessionTrackSchema = 1, - SessionTrackStateChange = 2, - SessionTrackGTIDS = 3, - SessionTrackTransactionCharacteristics = 4, - SessionTrackTransactionState = 5, -} diff --git a/sqlx-core/src/mysql/query.rs b/sqlx-core/src/mysql/query.rs deleted file mode 100644 index 021fc253..00000000 --- a/sqlx-core/src/mysql/query.rs +++ /dev/null @@ -1,41 +0,0 @@ -use super::Connection; -use crate::{encode::{Encode, IsNull}, mysql::types::MySqlTypeMetadata, params::QueryParameters, types::HasSqlType, MySql}; - -#[derive(Default)] -pub struct MySqlDbParameters { - pub(crate) param_types: Vec, - pub(crate) params: Vec, - pub(crate) null_bitmap: Vec, -} - -impl QueryParameters for MySqlDbParameters { - type Backend = MySql; - - fn reserve(&mut self, binds: usize, bytes: usize) { - self.param_types.reserve(binds); - self.params.reserve(bytes); - - // ensure we have enough bytes in the bitmap to hold at least `binds` extra bits - // the second `& 7` gives us 0 spare bits when param_types.len() is a multiple of 8 - let spare_bits = (8 - (self.param_types.len()) & 7) & 7; - // ensure that if there are no spare bits left, `binds = 1` reserves another byte - self.null_bitmap.reserve( (binds + 7 - spare_bits) / 8); - } - - fn bind(&mut self, value: T) - where - Self: Sized, - Self::Backend: HasSqlType, - T: Encode, - { - let metadata = >::metadata(); - let index = self.param_types.len(); - - self.param_types.push(metadata); - self.null_bitmap.resize((index / 8) + 1, 0); - - if let IsNull::Yes = value.encode(&mut self.params) { - self.null_bitmap[index / 8] &= (1 << index % 8) as u8; - } - } -} diff --git a/sqlx-core/src/mysql/row.rs b/sqlx-core/src/mysql/row.rs index a2781fab..6d24326e 100644 --- a/sqlx-core/src/mysql/row.rs +++ b/sqlx-core/src/mysql/row.rs @@ -1,17 +1,58 @@ -use crate::{mysql::{protocol::ResultRow, Connection}, row::Row, MySql}; +use std::collections::HashMap; +use std::sync::Arc; -impl Row for ResultRow { - type Backend = MySql; +use crate::decode::Decode; +use crate::mysql::protocol; +use crate::mysql::MySql; +use crate::row::{Row, RowIndex}; +use crate::types::HasSqlType; + +pub struct MySqlRow { + pub(super) row: protocol::Row, + pub(super) columns: Arc, usize>>, +} + +impl Row for MySqlRow { + type Database = MySql; - #[inline] fn len(&self) -> usize { - self.values.len() + self.row.len() } - #[inline] - fn get_raw(&self, index: usize) -> Option<&[u8]> { - self.values[index] - .as_ref() - .map(|value| unsafe { value.as_ref() }) + fn get(&self, index: I) -> T + where + Self::Database: HasSqlType, + I: RowIndex, + T: Decode, + { + index.try_get(self).unwrap() } } + +impl RowIndex for usize { + fn try_get(&self, row: &MySqlRow) -> crate::Result + where + ::Database: HasSqlType, + T: Decode<::Database>, + { + Ok(Decode::decode_nullable(row.row.get(*self))?) + } +} + +impl RowIndex for &'_ str { + fn try_get(&self, row: &MySqlRow) -> crate::Result + where + ::Database: HasSqlType, + T: Decode<::Database>, + { + let index = row + .columns + .get(*self) + .ok_or_else(|| crate::Error::ColumnNotFound((*self).into()))?; + let value = Decode::decode_nullable(row.row.get(*index))?; + + Ok(value) + } +} + +impl_from_row_for_row!(MySqlRow); diff --git a/sqlx-core/src/mysql/types/binary.rs b/sqlx-core/src/mysql/types/binary.rs deleted file mode 100644 index b3c40771..00000000 --- a/sqlx-core/src/mysql/types/binary.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::{ - encode::IsNull, - mysql::{ - protocol::{FieldType, ParameterFlag}, - types::MySqlTypeMetadata, - }, - Decode, Encode, HasSqlType, MySql, -}; - -impl HasSqlType<[u8]> for MySql { - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - field_type: FieldType::MYSQL_TYPE_BLOB, - param_flag: ParameterFlag::empty(), - } - } -} - -impl HasSqlType> for MySql { - fn metadata() -> MySqlTypeMetadata { - >::metadata() - } -} - -impl Encode for [u8] { - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(self); - IsNull::No - } -} - -impl Encode for Vec { - fn encode(&self, buf: &mut Vec) -> IsNull { - <[u8] as Encode>::encode(self, buf) - } -} - -impl Decode for Vec { - fn decode(raw: Option<&[u8]>) -> Self { - raw.unwrap().into() - } -} diff --git a/sqlx-core/src/mysql/types/bool.rs b/sqlx-core/src/mysql/types/bool.rs new file mode 100644 index 00000000..4507b8c2 --- /dev/null +++ b/sqlx-core/src/mysql/types/bool.rs @@ -0,0 +1,25 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::mysql::protocol::Type; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +impl HasSqlType for MySql { + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::TINY) + } +} + +impl Encode for bool { + fn encode(&self, buf: &mut Vec) { + buf.push(*self as u8); + } +} + +impl Decode for bool { + fn decode(buf: &[u8]) -> Result { + // FIXME: Return an error if the buffer size is not (at least) 1 + Ok(buf[0] != 0) + } +} diff --git a/sqlx-core/src/mysql/types/boolean.rs b/sqlx-core/src/mysql/types/boolean.rs deleted file mode 100644 index 6edbe08c..00000000 --- a/sqlx-core/src/mysql/types/boolean.rs +++ /dev/null @@ -1,34 +0,0 @@ -use super::{MySql, MySqlTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - mysql::protocol::{FieldType, ParameterFlag}, - types::HasSqlType, -}; - -impl HasSqlType for MySql { - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_TINY - field_type: FieldType::MYSQL_TYPE_TINY, - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for bool { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.push(*self as u8); - - IsNull::No - } -} - -impl Decode for bool { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - // TODO: Handle optionals - buf.unwrap()[0] != 0 - } -} diff --git a/sqlx-core/src/mysql/types/character.rs b/sqlx-core/src/mysql/types/character.rs deleted file mode 100644 index fd82ec32..00000000 --- a/sqlx-core/src/mysql/types/character.rs +++ /dev/null @@ -1,60 +0,0 @@ -use super::{MySql, MySqlTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - mysql::protocol::{FieldType, ParameterFlag}, - types::HasSqlType, -}; -use std::str; -use crate::mysql::io::BufMutExt; -use byteorder::LittleEndian; - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_VAR_STRING - field_type: FieldType::MYSQL_TYPE_VAR_STRING, - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for str { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.put_str_lenenc::(self); - - IsNull::No - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - >::metadata() - } -} - -impl Encode for String { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(self.as_str(), buf) - } -} - -impl Decode for String { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - // TODO: Handle nulls - - let s = if cfg!(debug_assertions) { - str::from_utf8(buf.unwrap()).expect("mysql returned non UTF-8 data for VAR_STRING") - } else { - // TODO: Determine how to treat string if different collation - unsafe { str::from_utf8_unchecked(buf.unwrap()) } - }; - - s.to_owned() - } -} diff --git a/sqlx-core/src/mysql/types/chrono.rs b/sqlx-core/src/mysql/types/chrono.rs index 096235bb..9de8da48 100644 --- a/sqlx-core/src/mysql/types/chrono.rs +++ b/sqlx-core/src/mysql/types/chrono.rs @@ -1,48 +1,21 @@ -use crate::{HasSqlType, MySql, HasTypeMetadata, Encode, Decode}; -use chrono::{NaiveDateTime, Datelike, Timelike, NaiveTime, NaiveDate}; -use crate::mysql::types::MySqlTypeMetadata; -use crate::mysql::protocol::{FieldType, ParameterFlag}; -use crate::encode::IsNull; +use chrono::{NaiveDateTime, Timelike}; -use crate::io::Buf; - -use std::convert::{TryFrom, TryInto}; -use byteorder::{LittleEndian, ByteOrder}; -use chrono::format::Item::Literal; - -impl HasSqlType for MySql { - fn metadata() -> Self::TypeMetadata { - MySqlTypeMetadata { - field_type: FieldType::MYSQL_TYPE_DATETIME, - param_flag: ParameterFlag::empty() - } - } -} +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::mysql::MySql; impl Encode for NaiveDateTime { - fn encode(&self, buf: &mut Vec) -> IsNull { - // subtract the length byte - let length = Encode::::size_hint(self) - 1; - - buf.push(length as u8); - - encode_date(self.date(), buf); - - if length >= 7 { - buf.push(self.hour() as u8); - buf.push(self.minute() as u8); - buf.push(self.second() as u8); - } - - if length == 11 { - buf.extend_from_slice(&self.timestamp_subsec_micros().to_le_bytes()); - } - - IsNull::No + fn encode(&self, buf: &mut Vec) { + unimplemented!() } fn size_hint(&self) -> usize { - match (self.hour(), self.minute(), self.second(), self.timestamp_subsec_micros()) { + match ( + self.hour(), + self.minute(), + self.second(), + self.timestamp_subsec_micros(), + ) { // include the length byte (0, 0, 0, 0) => 5, (_, _, _, 0) => 8, @@ -52,126 +25,7 @@ impl Encode for NaiveDateTime { } impl Decode for NaiveDateTime { - fn decode(raw: Option<&[u8]>) -> Self { - let raw = raw.unwrap(); - let len = raw[0]; - assert_ne!(len, 0, "MySQL zero-dates are not supported"); - - let date = decode_date(&raw[1..]); - - if len >= 7 { - date.and_hms_micro( - raw[5] as u32, - raw[6] as u32, - raw[7] as u32, - if len == 11 { - LittleEndian::read_u32(&raw[8..]) - } else { - 0 - } - ) - } else { - date.and_hms(0, 0, 0) - } + fn decode(raw: &[u8]) -> Result { + unimplemented!() } } - -impl HasSqlType for MySql { - fn metadata() -> Self::TypeMetadata { - MySqlTypeMetadata { - field_type: FieldType::MYSQL_TYPE_DATE, - param_flag: ParameterFlag::empty() - } - } -} - -impl Encode for NaiveDate { - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.push(4); - encode_date(*self, buf); - IsNull::No - } - - fn size_hint(&self) -> usize { - 5 - } -} - -impl Decode for NaiveDate { - fn decode(raw: Option<&[u8]>) -> Self { - let raw = raw.unwrap(); - assert_eq!(raw[0], 4, "expected only 4 bytes"); - decode_date(&raw[1..]) - } -} - -fn encode_date(date: NaiveDate, buf: &mut Vec) { - // MySQL supports years from 1000 - 9999 - let year = u16::try_from(date.year()) - .unwrap_or_else(|_| panic!("NaiveDateTime out of range for Mysql: {}", date)); - - buf.extend_from_slice(&year.to_le_bytes()); - buf.push(date.month() as u8); - buf.push(date.day() as u8); -} - -fn decode_date(raw: &[u8]) -> NaiveDate { - NaiveDate::from_ymd( - LittleEndian::read_u16(raw) as i32, - raw[2] as u32, - raw[3] as u32 - ) -} - -#[test] -fn test_encode_date_time() { - let mut buf = Vec::new(); - - // test values from https://dev.mysql.com/doc/internals/en/binary-protocol-value.html - let date1: NaiveDateTime = "2010-10-17T19:27:30.000001".parse().unwrap(); - Encode::::encode(&date1, &mut buf); - assert_eq!(*buf, [11, 218, 7, 10, 17, 19, 27, 30, 1, 0, 0, 0]); - - buf.clear(); - - let date2: NaiveDateTime = "2010-10-17T19:27:30".parse().unwrap(); - Encode::::encode(&date2, &mut buf); - assert_eq!(*buf, [7, 218, 7, 10, 17, 19, 27, 30]); - - buf.clear(); - - let date3: NaiveDateTime = "2010-10-17T00:00:00".parse().unwrap(); - Encode::::encode(&date3, &mut buf); - assert_eq!(*buf, [4, 218, 7, 10, 17]); -} - -#[test] -fn test_decode_date_time() { - // test values from https://dev.mysql.com/doc/internals/en/binary-protocol-value.html - let buf = [11, 218, 7, 10, 17, 19, 27, 30, 1, 0, 0, 0]; - let date1 = >::decode(Some(&buf)); - assert_eq!(date1.to_string(), "2010-10-17 19:27:30.000001"); - - let buf = [7, 218, 7, 10, 17, 19, 27, 30]; - let date2 = >::decode(Some(&buf)); - assert_eq!(date2.to_string(), "2010-10-17 19:27:30"); - - let buf = [4, 218, 7, 10, 17]; - let date3 = >::decode(Some(&buf)); - assert_eq!(date3.to_string(), "2010-10-17 00:00:00"); -} - -#[test] -fn test_encode_date() { - let mut buf = Vec::new(); - let date: NaiveDate = "2010-10-17".parse().unwrap(); - Encode::::encode(&date, &mut buf); - assert_eq!(*buf, [4, 218, 7, 10, 17]); -} - -#[test] -fn test_decode_date() { - let buf = [4, 218, 7, 10, 17]; - let date = >::decode(Some(&buf)); - assert_eq!(date.to_string(), "2010-10-17"); -} diff --git a/sqlx-core/src/mysql/types/float.rs b/sqlx-core/src/mysql/types/float.rs new file mode 100644 index 00000000..e13dab92 --- /dev/null +++ b/sqlx-core/src/mysql/types/float.rs @@ -0,0 +1,47 @@ +use byteorder::LittleEndian; + +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::io::{Buf, BufMut}; +use crate::mysql::protocol::Type; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::FLOAT) + } +} + +impl Encode for f32 { + fn encode(&self, buf: &mut Vec) { + >::encode(&(self.to_bits() as i32), buf); + } +} + +impl Decode for f32 { + fn decode(mut buf: &[u8]) -> Result { + Ok(f32::from_bits(>::decode(buf)? as u32)) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::DOUBLE) + } +} + +impl Encode for f64 { + fn encode(&self, buf: &mut Vec) { + >::encode(&(self.to_bits() as i64), buf); + } +} + +impl Decode for f64 { + fn decode(mut buf: &[u8]) -> Result { + Ok(f64::from_bits(>::decode(buf)? as u64)) + } +} diff --git a/sqlx-core/src/mysql/types/int.rs b/sqlx-core/src/mysql/types/int.rs new file mode 100644 index 00000000..b8563bef --- /dev/null +++ b/sqlx-core/src/mysql/types/int.rs @@ -0,0 +1,87 @@ +use byteorder::LittleEndian; + +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::io::{Buf, BufMut}; +use crate::mysql::protocol::Type; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::TINY) + } +} + +impl Encode for i8 { + fn encode(&self, buf: &mut Vec) { + buf.push(*self as u8); + } +} + +impl Decode for i8 { + fn decode(mut buf: &[u8]) -> Result { + Ok(buf[0] as i8) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::SHORT) + } +} + +impl Encode for i16 { + fn encode(&self, buf: &mut Vec) { + buf.put_i16::(*self); + } +} + +impl Decode for i16 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_i16::().map_err(Into::into) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::LONG) + } +} + +impl Encode for i32 { + fn encode(&self, buf: &mut Vec) { + buf.put_i32::(*self); + } +} + +impl Decode for i32 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_i32::().map_err(Into::into) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::LONGLONG) + } +} + +impl Encode for i64 { + fn encode(&self, buf: &mut Vec) { + buf.put_u64::(*self as u64); + } +} + +impl Decode for i64 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_u64::() + .map_err(Into::into) + .map(|val| val as i64) + } +} diff --git a/sqlx-core/src/mysql/types/mod.rs b/sqlx-core/src/mysql/types/mod.rs index f7c669f0..fd2a4ad5 100644 --- a/sqlx-core/src/mysql/types/mod.rs +++ b/sqlx-core/src/mysql/types/mod.rs @@ -1,30 +1,42 @@ -use super::protocol::{FieldType, ParameterFlag}; -use crate::{ - mysql::MySql, - types::{HasTypeMetadata, TypeMetadata}, -}; +use crate::mysql::protocol::Type; +use crate::mysql::MySql; +use crate::types::HasTypeMetadata; -pub mod binary; -pub mod boolean; -pub mod character; -pub mod numeric; +mod bool; +mod float; +mod int; +mod str; +mod uint; #[cfg(feature = "chrono")] -pub mod chrono; +mod chrono; -#[derive(Debug)] +#[derive(Default, Debug)] pub struct MySqlTypeMetadata { - pub field_type: FieldType, - pub param_flag: ParameterFlag, + pub(crate) r#type: Type, + pub(crate) flag: u8, // 0 or 0x80 for unsigned +} + +impl MySqlTypeMetadata { + pub(crate) fn new(r#type: Type) -> Self { + Self { r#type, flag: 0 } + } + + pub(crate) fn unsigned(r#type: Type) -> Self { + Self { r#type, flag: 0x80 } + } } impl HasTypeMetadata for MySql { type TypeMetadata = MySqlTypeMetadata; + + type TableId = Box; + type TypeId = u8; } -impl TypeMetadata for MySqlTypeMetadata { - fn type_id_eq(&self, other: &u8) -> bool { - &self.field_type.0 == other +impl PartialEq for MySqlTypeMetadata { + fn eq(&self, other: &u8) -> bool { + &self.r#type.0 == other } } diff --git a/sqlx-core/src/mysql/types/numeric.rs b/sqlx-core/src/mysql/types/numeric.rs deleted file mode 100644 index 24934360..00000000 --- a/sqlx-core/src/mysql/types/numeric.rs +++ /dev/null @@ -1,272 +0,0 @@ -use super::{MySql, MySqlTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - mysql::protocol::{FieldType, ParameterFlag}, - types::HasSqlType, -}; -use byteorder::{ByteOrder, LittleEndian}; - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - field_type: FieldType::MYSQL_TYPE_TINY, - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for i8 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.push(*self as u8); - - IsNull::No - } -} - -impl Decode for i8 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - buf.unwrap()[0] as i8 - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - field_type: FieldType(1), - param_flag: ParameterFlag::UNSIGNED, - } - } -} - -impl Encode for u8 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.push(*self); - - IsNull::No - } -} - -impl Decode for u8 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - buf.unwrap()[0] - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONG - field_type: FieldType(2), - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for i16 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for i16 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_i16(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONG - field_type: FieldType(2), - param_flag: ParameterFlag::UNSIGNED, - } - } -} - -impl Encode for u16 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for u16 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_u16(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONG - field_type: FieldType(3), - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for i32 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for i32 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_i32(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONG - field_type: FieldType(3), - param_flag: ParameterFlag::UNSIGNED, - } - } -} - -impl Encode for u32 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for u32 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_u32(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONGLONG - field_type: FieldType(8), - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for i64 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for i64 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_i64(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_LONGLONG - field_type: FieldType(8), - param_flag: ParameterFlag::UNSIGNED, - } - } -} - -impl Encode for u64 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_le_bytes()); - - IsNull::No - } -} - -impl Decode for u64 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - LittleEndian::read_u64(buf.unwrap()) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_FLOAT - field_type: FieldType(4), - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for f32 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(&(self.to_bits() as i32), buf) - } -} - -impl Decode for f32 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - f32::from_bits(>::decode(buf) as u32) - } -} - -impl HasSqlType for MySql { - #[inline] - fn metadata() -> MySqlTypeMetadata { - MySqlTypeMetadata { - // MYSQL_TYPE_DOUBLE - field_type: FieldType(4), - param_flag: ParameterFlag::empty(), - } - } -} - -impl Encode for f64 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(&(self.to_bits() as i64), buf) - } -} - -impl Decode for f64 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - f64::from_bits(>::decode(buf) as u64) - } -} diff --git a/sqlx-core/src/mysql/types/str.rs b/sqlx-core/src/mysql/types/str.rs new file mode 100644 index 00000000..dda70bc4 --- /dev/null +++ b/sqlx-core/src/mysql/types/str.rs @@ -0,0 +1,41 @@ +use std::str; + +use byteorder::LittleEndian; + +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::mysql::io::{BufExt, BufMutExt}; +use crate::mysql::protocol::Type; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +impl HasSqlType for MySql { + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::new(Type::VAR_STRING) + } +} + +impl Encode for str { + fn encode(&self, buf: &mut Vec) { + buf.put_str_lenenc::(self); + } +} + +impl HasSqlType for MySql { + fn metadata() -> MySqlTypeMetadata { + >::metadata() + } +} + +impl Encode for String { + fn encode(&self, buf: &mut Vec) { + >::encode(self.as_str(), buf) + } +} + +impl Decode for String { + fn decode(mut buf: &[u8]) -> Result { + Ok(buf.get_str_lenenc::()?.unwrap_or_default().to_owned()) + } +} diff --git a/sqlx-core/src/mysql/types/uint.rs b/sqlx-core/src/mysql/types/uint.rs new file mode 100644 index 00000000..8dddf926 --- /dev/null +++ b/sqlx-core/src/mysql/types/uint.rs @@ -0,0 +1,85 @@ +use byteorder::LittleEndian; + +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::io::{Buf, BufMut}; +use crate::mysql::protocol::Type; +use crate::mysql::types::MySqlTypeMetadata; +use crate::mysql::MySql; +use crate::types::HasSqlType; + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::unsigned(Type::TINY) + } +} + +impl Encode for u8 { + fn encode(&self, buf: &mut Vec) { + buf.push(*self); + } +} + +impl Decode for u8 { + fn decode(mut buf: &[u8]) -> Result { + Ok(buf[0]) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::unsigned(Type::SHORT) + } +} + +impl Encode for u16 { + fn encode(&self, buf: &mut Vec) { + buf.put_u16::(*self); + } +} + +impl Decode for u16 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_u16::().map_err(Into::into) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::unsigned(Type::LONG) + } +} + +impl Encode for u32 { + fn encode(&self, buf: &mut Vec) { + buf.put_u32::(*self); + } +} + +impl Decode for u32 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_u32::().map_err(Into::into) + } +} + +impl HasSqlType for MySql { + #[inline] + fn metadata() -> MySqlTypeMetadata { + MySqlTypeMetadata::unsigned(Type::LONGLONG) + } +} + +impl Encode for u64 { + fn encode(&self, buf: &mut Vec) { + buf.put_u64::(*self); + } +} + +impl Decode for u64 { + fn decode(mut buf: &[u8]) -> Result { + buf.get_u64::().map_err(Into::into) + } +} diff --git a/sqlx-core/src/params.rs b/sqlx-core/src/params.rs deleted file mode 100644 index 0a61c5eb..00000000 --- a/sqlx-core/src/params.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::{backend::Backend, encode::Encode, types::HasSqlType}; - -pub trait QueryParameters: Default + Send { - type Backend: Backend; - - fn reserve(&mut self, binds: usize, bytes: usize); - - fn bind(&mut self, value: T) - where - Self::Backend: HasSqlType, - T: Encode; -} - -pub trait IntoQueryParameters -where - DB: Backend, -{ - fn into_params(self) -> DB::QueryParameters; -} - -#[allow(unused)] -macro_rules! impl_into_query_parameters { - ($B:ident: $( ($idx:tt) -> $T:ident );+;) => { - impl<$($T,)+> crate::params::IntoQueryParameters<$B> for ($($T,)+) - where - $($B: crate::types::HasSqlType<$T>,)+ - $($T: crate::encode::Encode<$B>,)+ - { - fn into_params(self) -> <$B as crate::backend::Backend>::QueryParameters { - use crate::params::QueryParameters; - - let mut params = <$B as crate::backend::Backend>::QueryParameters::default(); - - let binds = 0 $(+ { $idx; 1 } )+; - let bytes = 0 $(+ crate::encode::Encode::size_hint(&self.$idx))+; - - params.reserve(binds, bytes); - - $(crate::params::QueryParameters::bind(&mut params, self.$idx);)+ - - params - } - } - }; -} - -impl IntoQueryParameters for DB::QueryParameters -where - DB: Backend, -{ - #[inline] - fn into_params(self) -> DB::QueryParameters { - self - } -} - -#[allow(unused)] -macro_rules! impl_into_query_parameters_for_backend { - ($B:ident) => { - impl crate::params::IntoQueryParameters<$B> for () - { - #[inline] - fn into_params(self) -> <$B as crate::backend::Backend>::QueryParameters { - Default::default() - } - } - - impl_into_query_parameters!($B: - (0) -> T1; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - (7) -> T8; - ); - - impl_into_query_parameters!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - (7) -> T8; - (8) -> T9; - ); - } -} diff --git a/sqlx-core/src/pool/executor.rs b/sqlx-core/src/pool/executor.rs index ed5d0fec..34ad034d 100644 --- a/sqlx-core/src/pool/executor.rs +++ b/sqlx-core/src/pool/executor.rs @@ -1,33 +1,34 @@ -use crate::{backend::Backend, describe::Describe, executor::Executor, params::IntoQueryParameters, pool::Pool, row::FromRow, Error}; -use futures_core::{future::BoxFuture, stream::BoxStream, Future}; +use futures_core::{future::BoxFuture, stream::BoxStream}; use futures_util::StreamExt; -use std::pin::Pin; + +use crate::{describe::Describe, executor::Executor, pool::Pool, Database}; impl Executor for Pool where - DB: Backend, + DB: Database, { - type Backend = DB; + type Database = DB; + + fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { + Box::pin(async move { <&Pool as Executor>::send(&mut &*self, commands).await }) + } fn execute<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, + args: DB::Arguments, ) -> BoxFuture<'e, crate::Result> { - Box::pin(async move { <&Pool as Executor>::execute(&mut &*self, query, params).await }) + Box::pin(async move { <&Pool as Executor>::execute(&mut &*self, query, args).await }) } - fn fetch<'e, 'q: 'e, T: 'e>( + fn fetch<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, - ) -> BoxStream<'e, crate::Result> - where - T: FromRow + Send + Unpin, - { + args: DB::Arguments, + ) -> BoxStream<'e, crate::Result> { Box::pin(async_stream::try_stream! { let mut self_ = &*self; - let mut s = <&Pool as Executor>::fetch(&mut self_, query, params); + let mut s = <&Pool as Executor>::fetch(&mut self_, query, args); while let Some(row) = s.next().await.transpose()? { yield row; @@ -35,56 +36,50 @@ where }) } - fn fetch_optional<'e, 'q: 'e, T: 'e>( + fn fetch_optional<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send, - { - Box::pin(async move { - <&Pool as Executor>::fetch_optional(&mut &*self, query, params).await - }) + args: DB::Arguments, + ) -> BoxFuture<'e, crate::Result>> { + Box::pin( + async move { <&Pool as Executor>::fetch_optional(&mut &*self, query, args).await }, + ) } fn describe<'e, 'q: 'e>( &'e mut self, query: &'q str, - ) -> BoxFuture<'e, crate::Result>> { + ) -> BoxFuture<'e, crate::Result>> { Box::pin(async move { <&Pool as Executor>::describe(&mut &*self, query).await }) } - - fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { - Box::pin(async move { <&Pool as Executor>::send(&mut &*self, commands).await }) - } } impl Executor for &'_ Pool where - DB: Backend, + DB: Database, { - type Backend = DB; + type Database = DB; + + fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { + Box::pin(async move { self.acquire().await?.send(commands).await }) + } fn execute<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, + args: DB::Arguments, ) -> BoxFuture<'e, crate::Result> { - Box::pin(async move { self.acquire().await?.execute(query, params).await }) + Box::pin(async move { self.acquire().await?.execute(query, args).await }) } - fn fetch<'e, 'q: 'e, T: 'e>( + fn fetch<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, - ) -> BoxStream<'e, crate::Result> - where - T: FromRow + Send + Unpin, - { + args: DB::Arguments, + ) -> BoxStream<'e, crate::Result> { Box::pin(async_stream::try_stream! { let mut live = self.acquire().await?; - let mut s = live.fetch(query, params); + let mut s = live.fetch(query, args); while let Some(row) = s.next().await.transpose()? { yield row; @@ -92,25 +87,18 @@ where }) } - fn fetch_optional<'e, 'q: 'e, T: 'e>( + fn fetch_optional<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: DB::QueryParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send, - { - Box::pin(async move { self.acquire().await?.fetch_optional(query, params).await }) + args: DB::Arguments, + ) -> BoxFuture<'e, crate::Result>> { + Box::pin(async move { self.acquire().await?.fetch_optional(query, args).await }) } fn describe<'e, 'q: 'e>( &'e mut self, query: &'q str, - ) -> BoxFuture<'e, crate::Result>> { + ) -> BoxFuture<'e, crate::Result>> { Box::pin(async move { self.acquire().await?.describe(query).await }) } - - fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { - Box::pin(async move { self.acquire().await?.send(commands).await }) - } } diff --git a/sqlx-core/src/pool/inner.rs b/sqlx-core/src/pool/inner.rs index 425d39a2..ab7f2fbd 100644 --- a/sqlx-core/src/pool/inner.rs +++ b/sqlx-core/src/pool/inner.rs @@ -1,13 +1,10 @@ use std::{ cmp, - future::Future, - marker::PhantomData, - ops::{Deref, DerefMut}, sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, Arc, - atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering}, }, - time::{Duration, Instant}, + time::Instant, }; use async_std::{ @@ -15,20 +12,15 @@ use async_std::{ sync::{channel, Receiver, Sender}, task, }; -use futures_channel::oneshot; -use futures_core::{future::BoxFuture, stream::BoxStream}; -use futures_util::{ - future::{AbortHandle, AbortRegistration, FutureExt, TryFutureExt}, - stream::StreamExt, -}; +use futures_util::future::FutureExt; -use crate::{backend::Backend, Connection, error::Error, executor::Executor, params::IntoQueryParameters, row::{FromRow, Row}}; +use crate::{error::Error, Connection, Database}; -use super::{Raw, Idle, Options}; +use super::{Idle, Options, Raw}; pub(super) struct SharedPool where - DB: Backend, + DB: Database, { url: String, pool_rx: Receiver>, @@ -39,9 +31,13 @@ where impl SharedPool where - DB: Backend, DB::Connection: Connection + DB: Database, + DB::Connection: Connection, { - pub(super) async fn new_arc(url: &str, options: Options) -> crate::Result<(Arc, Sender>)> { + pub(super) async fn new_arc( + url: &str, + options: Options, + ) -> crate::Result<(Arc, Sender>)> { // TODO: Establish [min_idle] connections let (pool_tx, pool_rx) = channel(options.max_size as usize); @@ -86,7 +82,7 @@ where Some(None) => { log::warn!("was not able to close all connections"); break; - }, + } None => task::yield_now().await, } } @@ -119,7 +115,7 @@ where // get the time between the deadline and now and use that as our timeout let max_wait = deadline .checked_duration_since(Instant::now()) - .ok_or(Error::TimedOut)?; + .ok_or(Error::PoolTimedOut)?; // don't sleep forever let mut idle = match timeout(max_wait, self.pool_rx.recv()).await { @@ -172,8 +168,13 @@ where } // result here is `Result, TimeoutError>` - match timeout(deadline - Instant::now(), DB::connect(&self.url)).await { - Ok(Ok(inner)) => return Ok(Raw { inner, created: Instant::now() }), + match timeout(deadline - Instant::now(), DB::Connection::open(&self.url)).await { + Ok(Ok(inner)) => { + return Ok(Raw { + inner, + created: Instant::now(), + }) + } // error while connecting, this should definitely be logged Ok(Err(e)) => log::warn!("error establishing a connection: {}", e), // timed out @@ -182,17 +183,20 @@ where } self.size.fetch_sub(1, Ordering::AcqRel); - Err(Error::TimedOut) + Err(Error::PoolTimedOut) } } -impl Idle where DB::Connection: Connection { +impl Idle +where + DB::Connection: Connection, +{ async fn close(self) { let _ = self.raw.inner.close().await; } } -fn should_reap(idle: &Idle, options: &Options) -> bool { +fn should_reap(idle: &Idle, options: &Options) -> bool { // check if idle connection was within max lifetime (or not set) options.max_lifetime.map_or(true, |max| idle.raw.created.elapsed() < max) // and if connection wasn't idle too long (or not set) @@ -200,8 +204,9 @@ fn should_reap(idle: &Idle, options: &Options) -> bool { } /// if `max_lifetime` or `idle_timeout` is set, spawn a task that reaps senescent connections -fn conn_reaper(pool: &Arc>, pool_tx: &Sender>) - where DB::Connection: Connection +fn conn_reaper(pool: &Arc>, pool_tx: &Sender>) +where + DB::Connection: Connection, { if pool.options.max_lifetime.is_some() || pool.options.idle_timeout.is_some() { let pool = pool.clone(); diff --git a/sqlx-core/src/pool/mod.rs b/sqlx-core/src/pool/mod.rs index fc5806a1..dd69cf25 100644 --- a/sqlx-core/src/pool/mod.rs +++ b/sqlx-core/src/pool/mod.rs @@ -1,33 +1,19 @@ -use crate::{backend::Backend, error::Error, executor::Executor, params::IntoQueryParameters, row::FromRow, Row}; -use futures_channel::oneshot; -use futures_core::{future::BoxFuture, stream::BoxStream}; -use futures_util::{ - future::{AbortHandle, AbortRegistration, FutureExt, TryFutureExt}, - stream::StreamExt, -}; +//! **Pool** for SQLx database connections. + use std::{ - cmp, - future::Future, - marker::PhantomData, ops::{Deref, DerefMut}, - sync::{ - atomic::{AtomicBool, AtomicU32, AtomicUsize, Ordering}, - Arc, - }, + sync::Arc, time::{Duration, Instant}, }; -use async_std::{ - future::timeout, - sync::{channel, Receiver, Sender}, - task, -}; +use async_std::sync::Sender; +use futures_util::future::FutureExt; + +use crate::Database; use self::inner::SharedPool; - -use self::options::Options; - pub use self::options::Builder; +use self::options::Options; mod executor; mod inner; @@ -35,32 +21,32 @@ mod options; /// A pool of database connections. pub struct Pool - where - DB: Backend +where + DB: Database, { inner: Arc>, - pool_tx: Sender> + pool_tx: Sender>, } -/// A connection tied to a pool. When dropped it is released back to the pool. -pub struct Connection { +struct Connection { raw: Option>, pool_tx: Sender>, } -struct Raw { +struct Raw { inner: DB::Connection, created: Instant, } -struct Idle { +struct Idle { raw: Raw, since: Instant, } impl Pool where - DB: Backend, DB::Connection: crate::Connection + DB: Database, + DB::Connection: crate::Connection, { /// Creates a connection pool with the default configuration. pub async fn new(url: &str) -> crate::Result { @@ -81,16 +67,22 @@ where /// Retrieves a connection from the pool. /// /// Waits for at most the configured connection timeout before returning an error. - pub async fn acquire(&self) -> crate::Result> { - self.inner.acquire().await.map(|conn| Connection { raw: Some(conn), pool_tx: self.pool_tx.clone() }) + pub async fn acquire(&self) -> crate::Result> { + self.inner.acquire().await.map(|conn| Connection { + raw: Some(conn), + pool_tx: self.pool_tx.clone(), + }) } /// Attempts to retrieve a connection from the pool if there is one available. /// /// Returns `None` if there are no idle connections available in the pool. /// This method will not block waiting to establish a new connection. - pub fn try_acquire(&self) -> Option> { - self.inner.try_acquire().map(|conn| Connection { raw: Some(conn), pool_tx: self.pool_tx.clone() }) + pub fn try_acquire(&self) -> Option> { + self.inner.try_acquire().map(|conn| Connection { + raw: Some(conn), + pool_tx: self.pool_tx.clone(), + }) } /// Ends the use of a connection pool. Prevents any new connections @@ -140,7 +132,7 @@ where /// Returns a new [Pool] tied to the same shared connection pool. impl Clone for Pool where - DB: Backend, + DB: Database, { fn clone(&self) -> Self { Self { @@ -152,7 +144,7 @@ where const DEREF_ERR: &str = "(bug) connection already released to pool"; -impl Deref for Connection { +impl Deref for Connection { type Target = DB::Connection; fn deref(&self) -> &Self::Target { @@ -160,13 +152,13 @@ impl Deref for Connection { } } -impl DerefMut for Connection { +impl DerefMut for Connection { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.raw.as_mut().expect(DEREF_ERR).inner } } -impl Drop for Connection { +impl Drop for Connection { fn drop(&mut self) { if let Some(conn) = self.raw.take() { self.pool_tx diff --git a/sqlx-core/src/pool/options.rs b/sqlx-core/src/pool/options.rs index c63fb294..d562475f 100644 --- a/sqlx-core/src/pool/options.rs +++ b/sqlx-core/src/pool/options.rs @@ -1,13 +1,13 @@ use std::{marker::PhantomData, time::Duration}; -use crate::Backend; +use crate::Database; use super::Pool; #[derive(Default)] pub struct Builder where - DB: Backend, + DB: Database, { phantom: PhantomData, options: Options, @@ -15,7 +15,7 @@ where impl Builder where - DB: Backend, + DB: Database, { pub fn new() -> Self { Self { diff --git a/sqlx-core/src/postgres/arguments.rs b/sqlx-core/src/postgres/arguments.rs new file mode 100644 index 00000000..b952d8dd --- /dev/null +++ b/sqlx-core/src/postgres/arguments.rs @@ -0,0 +1,60 @@ +use byteorder::{ByteOrder, NetworkEndian}; + +use crate::arguments::Arguments; +use crate::encode::{Encode, IsNull}; +use crate::io::BufMut; +use crate::types::HasSqlType; +use crate::Postgres; + +#[derive(Default)] +pub struct PgArguments { + // OIDs of the bind parameters + pub(super) types: Vec, + + // Write buffer for serializing bind values + pub(super) values: Vec, +} + +impl Arguments for PgArguments { + type Database = super::Postgres; + + fn len(&self) -> usize { + self.types.len() + } + + fn size(&self) -> usize { + self.values.len() + } + + fn reserve(&mut self, len: usize, size: usize) { + self.types.reserve(len); + self.values.reserve(size); + } + + fn add(&mut self, value: T) + where + Self::Database: HasSqlType, + T: Encode, + { + // TODO: When/if we receive types that do _not_ support BINARY, we need to check here + // TODO: There is no need to be explicit unless we are expecting mixed BINARY / TEXT + + self.types.push(>::metadata().oid); + + let pos = self.values.len(); + + self.values.put_i32::(0); + + let len = if let IsNull::No = value.encode_nullable(&mut self.values) { + (self.values.len() - pos - 4) as i32 + } else { + // Write a -1 for the len to indicate NULL + // TODO: It is illegal for [encode] to write any data + // if IsSql::No; fail a debug assertion + -1 + }; + + // Write-back the len to the beginning of this frame (not including the len of len) + NetworkEndian::write_i32(&mut self.values[pos..], len as i32); + } +} diff --git a/sqlx-core/src/postgres/backend.rs b/sqlx-core/src/postgres/backend.rs deleted file mode 100644 index be889551..00000000 --- a/sqlx-core/src/postgres/backend.rs +++ /dev/null @@ -1,48 +0,0 @@ -use futures_core::{future::BoxFuture, stream::BoxStream}; - -use crate::{ - backend::Backend, - describe::{Describe, ResultField}, - params::QueryParameters, - postgres::{protocol::DataRow, query::PostgresQueryParameters}, - url::Url, -}; -use crate::cache::StatementCache; - -use super::{Connection, RawConnection, Postgres}; - -impl Backend for Postgres { - type Connection = Connection; - - type QueryParameters = PostgresQueryParameters; - - type Row = DataRow; - - type TableIdent = u32; - - fn connect(url: &str) -> BoxFuture<'static, crate::Result> { - let url = Url::parse(url); - - Box::pin(async move { - let url = url?; - let address = url.resolve(5432); - let mut conn = RawConnection::new(address).await?; - - conn.startup( - url.username(), - url.password().unwrap_or_default(), - url.database(), - ) - .await?; - - Ok(Connection { - conn, - statements: StatementCache::new(), - next_id: 0 - }) - }) - } -} - -impl_from_row_for_backend!(Postgres, DataRow); -impl_into_query_parameters_for_backend!(Postgres); diff --git a/sqlx-core/src/postgres/connection.rs b/sqlx-core/src/postgres/connection.rs index 701f1187..392e1714 100644 --- a/sqlx-core/src/postgres/connection.rs +++ b/sqlx-core/src/postgres/connection.rs @@ -1,57 +1,42 @@ -use crate::{ - io::{Buf, BufStream}, - postgres::{ - error::PostgresDatabaseError, - protocol::{self, Decode, Encode, Message}, - query::PostgresQueryParameters, - }, -}; -use async_std::net::TcpStream; -use byteorder::NetworkEndian; -use std::{ - io, - net::{Shutdown, SocketAddr}, -}; +use std::convert::TryInto; -pub struct Connection { - stream: BufStream, +use async_std::net::{Shutdown, TcpStream}; +use byteorder::NetworkEndian; +use futures_core::future::BoxFuture; + +use crate::cache::StatementCache; +use crate::connection::Connection; +use crate::io::{Buf, BufStream}; +use crate::postgres::protocol::{self, Decode, Encode, Message, StatementId}; +use crate::postgres::PgError; +use crate::url::Url; + +pub struct PgConnection { + pub(super) stream: BufStream, + + // Map of query to statement id + pub(super) statement_cache: StatementCache, + + // Next statement id + pub(super) next_statement_id: u32, // Process ID of the Backend process_id: u32, // Backend-unique key to use to send a cancel query message to the server secret_key: u32, + + // Is there a query in progress; are we ready to continue + pub(super) ready: bool, } -// [x] 52.2.1. Start-up -// [ ] 52.2.2. Simple Query -// [ ] 52.2.3. Extended Query -// [ ] 52.2.4. Function Call -// [ ] 52.2.5. COPY Operations -// [ ] 52.2.6. Asynchronous Operations -// [ ] 52.2.7. Canceling Requests in Progress -// [x] 52.2.8. Termination -// [ ] 52.2.9. SSL Session Encryption -// [ ] 52.2.10. GSSAPI Session Encryption +impl PgConnection { + // https://www.postgresql.org/docs/12/protocol-flow.html#id-1.10.5.7.3 + async fn startup(&mut self, url: Url) -> crate::Result<()> { + // Defaults to postgres@.../postgres + let username = url.username().unwrap_or("postgres"); + let database = url.database().unwrap_or("postgres"); -impl Connection { - pub(super) async fn new(address: SocketAddr) -> crate::Result { - let stream = TcpStream::connect(&address).await?; - - Ok(Self { - stream: BufStream::new(stream), - process_id: 0, - secret_key: 0, - }) - } - - // https://www.postgresql.org/docs/devel/protocol-flow.html#id-1.10.5.7.3 - pub(super) async fn startup( - &mut self, - username: &str, - password: &str, - database: &str, - ) -> crate::Result<()> { // See this doc for more runtime parameters // https://www.postgresql.org/docs/12/runtime-config-client.html let params = &[ @@ -82,16 +67,18 @@ impl Connection { // Do nothing. No password is needed to continue. } - protocol::Authentication::CleartextPassword => { - protocol::PasswordMessage::Cleartext(password) - .encode(self.stream.buffer_mut()); + protocol::Authentication::ClearTextPassword => { + protocol::PasswordMessage::ClearText( + url.password().unwrap_or_default(), + ) + .encode(self.stream.buffer_mut()); self.stream.flush().await?; } protocol::Authentication::Md5Password { salt } => { protocol::PasswordMessage::Md5 { - password, + password: url.password().unwrap_or_default(), user: username, salt, } @@ -111,8 +98,8 @@ impl Connection { } Message::BackendKeyData(body) => { - self.process_id = body.process_id(); - self.secret_key = body.secret_key(); + self.process_id = body.process_id; + self.secret_key = body.secret_key; } Message::ReadyForQuery(_) => { @@ -130,7 +117,7 @@ impl Connection { } // https://www.postgresql.org/docs/devel/protocol-flow.html#id-1.10.5.7.10 - pub(super) async fn terminate(mut self) -> crate::Result<()> { + async fn terminate(mut self) -> crate::Result<()> { protocol::Terminate.encode(self.stream.buffer_mut()); self.stream.flush().await?; @@ -139,98 +126,6 @@ impl Connection { Ok(()) } - pub(super) fn buffer_parse(&mut self, statement: &str, query: &str, params: &PostgresQueryParameters) { - protocol::Parse { - statement, - query, - param_types: &*params.types, - } - .encode(self.stream.buffer_mut()); - } - - pub(super) async fn try_parse(&mut self, statement: &str, query: &str, params: &PostgresQueryParameters) -> crate::Result<()> { - self.buffer_parse(statement, query, params); - self.sync().await?; - while let Some(_) = self.step().await? {} - Ok(()) - } - - pub(super) fn describe(&mut self, statement: &str) { - protocol::Describe { - kind: protocol::DescribeKind::PreparedStatement, - name: statement, - } - .encode(self.stream.buffer_mut()) - } - - pub(super) fn bind(&mut self, portal: &str, statement: &str, params: &PostgresQueryParameters) { - protocol::Bind { - portal, - statement, - formats: &[1], // [BINARY] - // TODO: Early error if there is more than i16 - values_len: params.types.len() as i16, - values: &*params.buf, - result_formats: &[1], // [BINARY] - } - .encode(self.stream.buffer_mut()); - } - - pub(super) fn execute(&mut self, portal: &str, limit: i32) { - protocol::Execute { portal, limit }.encode(self.stream.buffer_mut()); - } - - pub(super) async fn send(&mut self, commands: &str) -> crate::Result<()> { - protocol::Query(commands).encode(self.stream.buffer_mut()); - self.sync().await - } - - pub(super) async fn sync(&mut self) -> crate::Result<()> { - protocol::Sync.encode(self.stream.buffer_mut()); - - self.stream.flush().await?; - - Ok(()) - } - - pub(super) async fn step(&mut self) -> crate::Result> { - while let Some(message) = self.receive().await? { - match message { - Message::BindComplete - | Message::ParseComplete - | Message::PortalSuspended - | Message::CloseComplete => {} - - Message::CommandComplete(body) => { - return Ok(Some(Step::Command(body.affected_rows()))); - } - - Message::DataRow(body) => { - return Ok(Some(Step::Row(body))); - } - - Message::ReadyForQuery(_) => { - return Ok(None); - } - - Message::ParameterDescription(desc) => { - return Ok(Some(Step::ParamDesc(desc))); - } - - Message::RowDescription(desc) => { - return Ok(Some(Step::RowDesc(desc))); - } - - message => { - return Err(protocol_err!("received unexpected message: {:?}", message).into()); - } - } - } - - // Connection was (unexpectedly) closed - Err(io::Error::from(io::ErrorKind::UnexpectedEof).into()) - } - // Wait and return the next message to be received from Postgres. pub(super) async fn receive(&mut self) -> crate::Result> { loop { @@ -280,12 +175,12 @@ impl Connection { } Message::Response(body) => { - if body.severity().is_error() { + if body.severity.is_error() { // This is an error, stop the world and bubble as an error - return Err(PostgresDatabaseError(body).into()); + return Err(PgError(body).into()); } else { // This is a _warning_ - // TODO: Do we *want* to do anything with these + // TODO: Log the warning } } @@ -297,10 +192,36 @@ impl Connection { } } -#[derive(Debug)] -pub(super) enum Step { - Command(u64), - Row(protocol::DataRow), - ParamDesc(Box), - RowDesc(Box), +impl PgConnection { + async fn open(url: crate::Result) -> crate::Result { + let url = url?; + let stream = TcpStream::connect((url.host(), url.port(5432))).await?; + let mut self_ = Self { + stream: BufStream::new(stream), + process_id: 0, + secret_key: 0, + // Important to start at 1 as 0 means "unnamed" in our protocol + next_statement_id: 1, + statement_cache: StatementCache::new(), + ready: true, + }; + + self_.startup(url).await?; + + Ok(self_) + } +} + +impl Connection for PgConnection { + fn open(url: T) -> BoxFuture<'static, crate::Result> + where + T: TryInto, + Self: Sized, + { + Box::pin(PgConnection::open(url.try_into())) + } + + fn close(self) -> BoxFuture<'static, crate::Result<()>> { + Box::pin(self.terminate()) + } } diff --git a/sqlx-core/src/postgres/database.rs b/sqlx-core/src/postgres/database.rs new file mode 100644 index 00000000..f78d6097 --- /dev/null +++ b/sqlx-core/src/postgres/database.rs @@ -0,0 +1,12 @@ +use crate::database::Database; + +/// **Postgres** database driver. +pub struct Postgres; + +impl Database for Postgres { + type Connection = super::PgConnection; + + type Arguments = super::PgArguments; + + type Row = super::PgRow; +} diff --git a/sqlx-core/src/postgres/error.rs b/sqlx-core/src/postgres/error.rs index ef65c80d..e2147991 100644 --- a/sqlx-core/src/postgres/error.rs +++ b/sqlx-core/src/postgres/error.rs @@ -1,18 +1,53 @@ -use super::protocol::Response; -use crate::error::DatabaseError; +use crate::postgres::protocol::Response; use std::fmt::{self, Debug, Display}; -#[derive(Debug)] -pub struct PostgresDatabaseError(pub(super) Box); +pub struct PgError(pub(super) Box); -impl DatabaseError for PostgresDatabaseError { +impl crate::error::DatabaseError for PgError { fn message(&self) -> &str { - self.0.message() + &self.0.message + } + + fn details(&self) -> Option<&str> { + self.0.detail.as_ref().map(|s| &**s) + } + + fn hint(&self) -> Option<&str> { + self.0.hint.as_ref().map(|s| &**s) + } + + fn table_name(&self) -> Option<&str> { + self.0.table.as_ref().map(|s| &**s) + } + + fn column_name(&self) -> Option<&str> { + self.0.column.as_ref().map(|s| &**s) + } + + fn constraint_name(&self) -> Option<&str> { + self.0.constraint.as_ref().map(|s| &**s) } } -impl Display for PostgresDatabaseError { +impl Debug for PgError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use crate::error::DatabaseError; + + f.debug_struct("DatabaseError") + .field("message", &self.message()) + .field("details", &self.details()) + .field("hint", &self.hint()) + .field("table_name", &self.table_name()) + .field("column_name", &self.column_name()) + .field("constraint_name", &self.constraint_name()) + .finish() + } +} + +impl Display for PgError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use crate::error::DatabaseError; + f.pad(self.message()) } } diff --git a/sqlx-core/src/postgres/executor.rs b/sqlx-core/src/postgres/executor.rs index bb05b7d7..f1dcc80b 100644 --- a/sqlx-core/src/postgres/executor.rs +++ b/sqlx-core/src/postgres/executor.rs @@ -1,160 +1,334 @@ -use super::{connection::Step, Connection, Postgres}; -use crate::{backend::Backend, describe::{Describe, ResultField}, executor::Executor, params::{IntoQueryParameters, QueryParameters}, row::FromRow, url::Url, Error}; -use futures_core::{future::BoxFuture, stream::BoxStream, Future}; -use crate::postgres::query::PostgresQueryParameters; -use std::pin::Pin; +use std::collections::HashMap; +use std::io; +use std::sync::Arc; -impl Connection { - async fn prepare_cached(&mut self, query: &str, params: &PostgresQueryParameters) -> crate::Result { - fn get_stmt_name(id: u64) -> String { - format!("sqlx_postgres_stmt_{}", id) +use futures_core::future::BoxFuture; +use futures_core::stream::BoxStream; + +use crate::describe::{Column, Describe}; +use crate::postgres::protocol::{self, Encode, Message, StatementId}; +use crate::postgres::types::TypeFormat; +use crate::postgres::{PgArguments, PgRow, Postgres}; + +#[derive(Debug)] +enum Step { + Command(u64), + NoData, + Row(protocol::DataRow), + ParamDesc(Box), + RowDesc(Box), +} + +impl super::PgConnection { + fn write_prepare(&mut self, query: &str, args: &PgArguments) -> StatementId { + if let Some(&id) = self.statement_cache.get(query) { + id + } else { + let id = StatementId(self.next_statement_id); + self.next_statement_id += 1; + + protocol::Parse { + statement: id, + query, + param_types: &*args.types, + } + .encode(self.stream.buffer_mut()); + + self.statement_cache.put(query.to_owned(), id); + + id + } + } + + fn write_describe(&mut self, d: protocol::Describe) { + d.encode(self.stream.buffer_mut()) + } + + fn write_bind(&mut self, portal: &str, statement: StatementId, args: &PgArguments) { + protocol::Bind { + portal, + statement, + formats: &[TypeFormat::Binary], + // TODO: Early error if there is more than i16 + values_len: args.types.len() as i16, + values: &*args.values, + result_formats: &[TypeFormat::Binary], + } + .encode(self.stream.buffer_mut()); + } + + fn write_execute(&mut self, portal: &str, limit: i32) { + protocol::Execute { portal, limit }.encode(self.stream.buffer_mut()); + } + + fn write_sync(&mut self) { + protocol::Sync.encode(self.stream.buffer_mut()); + } + + async fn wait_until_ready(&mut self) -> crate::Result<()> { + if !self.ready { + while let Some(message) = self.receive().await? { + match message { + Message::ReadyForQuery(_) => { + self.ready = true; + break; + } + + _ => { + // Drain the stream + } + } + } } - let conn = &mut self.conn; - let next_id = &mut self.next_id; + Ok(()) + } - self.statements.map_or_compute( - query, - |&id| get_stmt_name(id), - || async { - let stmt_id = *next_id; - let stmt_name = get_stmt_name(stmt_id); - conn.try_parse(&stmt_name, query, params).await?; - *next_id += 1; - Ok((stmt_id, stmt_name)) - }).await + async fn step(&mut self) -> crate::Result> { + while let Some(message) = self.receive().await? { + match message { + Message::BindComplete + | Message::ParseComplete + | Message::PortalSuspended + | Message::CloseComplete => {} + + Message::CommandComplete(body) => { + return Ok(Some(Step::Command(body.affected_rows))); + } + + Message::NoData => { + return Ok(Some(Step::NoData)); + } + + Message::DataRow(body) => { + return Ok(Some(Step::Row(body))); + } + + Message::ReadyForQuery(_) => { + self.ready = true; + + return Ok(None); + } + + Message::ParameterDescription(desc) => { + return Ok(Some(Step::ParamDesc(desc))); + } + + Message::RowDescription(desc) => { + return Ok(Some(Step::RowDesc(desc))); + } + + message => { + return Err(protocol_err!("received unexpected message: {:?}", message).into()); + } + } + } + + // Connection was (unexpectedly) closed + Err(io::Error::from(io::ErrorKind::UnexpectedEof).into()) } } -impl Executor for Connection { - type Backend = Postgres; +impl super::PgConnection { + async fn send<'e, 'q: 'e>(&'e mut self, command: &'q str) -> crate::Result<()> { + protocol::Query(command).encode(self.stream.buffer_mut()); + + self.wait_until_ready().await?; + + self.stream.flush().await?; + self.ready = false; + + while let Some(_step) = self.step().await? { + // Drain the stream until ReadyForQuery + } + + Ok(()) + } + + async fn execute<'e, 'q: 'e>( + &'e mut self, + query: &'q str, + args: PgArguments, + ) -> crate::Result { + let statement = self.write_prepare(query, &args); + + self.write_bind("", statement, &args); + self.write_execute("", 1); + self.write_sync(); + + self.wait_until_ready().await?; + + self.stream.flush().await?; + self.ready = false; + + let mut affected = 0; + + while let Some(step) = self.step().await? { + if let Step::Command(cnt) = step { + affected = cnt; + } + } + + Ok(affected) + } + + // Initial part of [fetch]; write message to stream + fn write_fetch(&mut self, query: &str, args: &PgArguments) -> StatementId { + let statement = self.write_prepare(query, &args); + + self.write_bind("", statement, &args); + + if !self.statement_cache.has_columns(statement) { + self.write_describe(protocol::Describe::Portal("")); + } + + self.write_execute("", 0); + self.write_sync(); + + statement + } + + async fn get_columns( + &mut self, + statement: StatementId, + ) -> crate::Result, usize>>> { + if !self.statement_cache.has_columns(statement) { + let desc: Option<_> = 'outer: loop { + while let Some(step) = self.step().await? { + match step { + Step::RowDesc(desc) => break 'outer Some(desc), + + Step::NoData => break 'outer None, + + _ => {} + } + } + + unreachable!(); + }; + + let mut columns = HashMap::new(); + + if let Some(desc) = desc { + columns.reserve(desc.fields.len()); + + for (index, field) in desc.fields.iter().enumerate() { + if let Some(name) = &field.name { + columns.insert(name.clone(), index); + } + } + } + + self.statement_cache.put_columns(statement, columns); + } + + Ok(self.statement_cache.get_columns(statement)) + } + + fn fetch<'e, 'q: 'e>( + &'e mut self, + query: &'q str, + args: PgArguments, + ) -> BoxStream<'e, crate::Result> { + Box::pin(async_stream::try_stream! { + let statement = self.write_fetch(query, &args); + + self.wait_until_ready().await?; + + self.stream.flush().await?; + self.ready = false; + + let columns = self.get_columns(statement).await?; + + while let Some(step) = self.step().await? { + if let Step::Row(data) = step { + yield PgRow { data, columns: Arc::clone(&columns) }; + } + } + + // No more rows in the result set + }) + } + + async fn describe<'e, 'q: 'e>( + &'e mut self, + query: &'q str, + ) -> crate::Result> { + let statement = self.write_prepare(query, &Default::default()); + + self.write_describe(protocol::Describe::Statement(statement)); + self.write_sync(); + + self.stream.flush().await?; + self.wait_until_ready().await?; + + let params = match self.step().await? { + Some(Step::ParamDesc(desc)) => desc, + + step => { + return Err( + protocol_err!("expected ParameterDescription; received {:?}", step).into(), + ); + } + }; + + let result = match self.step().await? { + Some(Step::RowDesc(desc)) => desc, + + step => { + return Err(protocol_err!("expected RowDescription; received {:?}", step).into()); + } + }; + + Ok(Describe { + param_types: params.ids, + result_columns: result + .fields + .into_vec() + .into_iter() + // TODO: Should [Column] just wrap [protocol::Field] ? + .map(|field| Column { + name: field.name, + table_id: field.table_id, + type_id: field.type_id, + + _non_exhaustive: (), + }) + .collect::>() + .into_boxed_slice(), + + _non_exhaustive: (), + }) + } +} + +impl crate::Executor for super::PgConnection { + type Database = super::Postgres; + + fn send<'e, 'q: 'e>(&'e mut self, query: &'q str) -> BoxFuture<'e, crate::Result<()>> { + Box::pin(self.send(query)) + } fn execute<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: PostgresQueryParameters, + args: PgArguments, ) -> BoxFuture<'e, crate::Result> { - Box::pin(async move { - let stmt = self.prepare_cached(query, ¶ms).await?; - - self.conn.bind("", &stmt, ¶ms); - self.conn.execute("", 1); - self.conn.sync().await?; - - let mut affected = 0; - - while let Some(step) = self.conn.step().await? { - if let Step::Command(cnt) = step { - affected = cnt; - } - } - - Ok(affected) - }) + Box::pin(self.execute(query, args)) } - fn fetch<'e, 'q: 'e, T: 'e>( + fn fetch<'e, 'q: 'e>( &'e mut self, query: &'q str, - params: PostgresQueryParameters, - ) -> BoxStream<'e, crate::Result> - where - T: FromRow + Send + Unpin, - { - Box::pin(async_stream::try_stream! { - let stmt = self.prepare_cached(query, ¶ms).await?; - self.conn.bind("", &stmt, ¶ms); - self.conn.execute("", 0); - self.conn.sync().await?; - - while let Some(step) = self.conn.step().await? { - if let Step::Row(row) = step { - yield FromRow::from_row(row); - } - } - }) - } - - fn fetch_optional<'e, 'q: 'e, T: 'e>( - &'e mut self, - query: &'q str, - params: PostgresQueryParameters, - ) -> BoxFuture<'e, crate::Result>> - where - T: FromRow + Send, - { - Box::pin(async move { - let stmt = self.prepare_cached(query, ¶ms).await?; - self.conn.bind("", &stmt, ¶ms); - self.conn.execute("", 2); - self.conn.sync().await?; - - let mut row: Option<_> = None; - - while let Some(step) = self.conn.step().await? { - if let Step::Row(r) = step { - if row.is_some() { - return Err(crate::Error::FoundMoreThanOne); - } - - row = Some(FromRow::from_row(r)); - } - } - - Ok(row) - }) + args: PgArguments, + ) -> BoxStream<'e, crate::Result> { + self.fetch(query, args) } fn describe<'e, 'q: 'e>( &'e mut self, query: &'q str, - ) -> BoxFuture<'e, crate::Result>> { - Box::pin(async move { - let stmt = self.prepare_cached(query, &PostgresQueryParameters::default()).await?; - self.conn.describe(&stmt); - self.conn.sync().await?; - - let param_desc = loop { - let step = self - .conn.step() - .await? - .ok_or(protocol_err!("did not receive ParameterDescription")); - - if let Step::ParamDesc(desc) = step? { - break desc; - } - }; - - let row_desc = loop { - let step = self - .conn.step() - .await? - .ok_or(protocol_err!("did not receive RowDescription")); - - if let Step::RowDesc(desc) = step? { - break desc; - } - }; - - Ok(Describe { - param_types: param_desc.ids.into_vec(), - result_fields: row_desc - .fields - .into_vec() - .into_iter() - .map(|field| ResultField { - name: if field.name == "?column?" { None } else { Some(field.name) }, - table_id: if field.table_id > 0 { Some(field.table_id) } else { None }, - type_id: field.type_id, - _backcompat: (), - }) - .collect(), - _backcompat: (), - }) - }) - } - - fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> { - Box::pin(self.conn.send(commands)) + ) -> BoxFuture<'e, crate::Result>> { + Box::pin(self.describe(query)) } } diff --git a/sqlx-core/src/postgres/mod.rs b/sqlx-core/src/postgres/mod.rs index c5d96139..2ff1cee7 100644 --- a/sqlx-core/src/postgres/mod.rs +++ b/sqlx-core/src/postgres/mod.rs @@ -1,45 +1,23 @@ -use crate::postgres::connection::Connection as RawConnection; -use crate::cache::StatementCache; -use crate::{Error, Backend}; -use futures_core::Future; -use futures_core::future::BoxFuture; -use std::net::SocketAddr; -use std::pin::Pin; +//! **Postgres** database and connection types. -mod backend; +mod arguments; mod connection; +mod database; mod error; mod executor; -mod query; -mod row; - -#[cfg(not(feature = "unstable"))] mod protocol; +mod row; +mod types; -#[cfg(feature = "unstable")] -pub mod protocol; +pub use database::Postgres; -pub mod types; +pub use arguments::PgArguments; -/// The Postgres backend implementation. -pub enum Postgres {} +pub use connection::PgConnection; -impl Postgres { - /// Alias for [Backend::connect()](../trait.Backend.html#method.connect). - pub async fn connect(url: &str) -> crate::Result { - ::connect(url).await - } -} +pub use error::PgError; -/// A connection to a Postgres database. -pub struct Connection { - conn: RawConnection, - statements: StatementCache, - next_id: u64, -} +pub use row::PgRow; -impl crate::Connection for Connection { - fn close(self) -> BoxFuture<'static, crate::Result<()>> { - Box::pin(self.conn.terminate()) - } -} +/// An alias for [`Pool`], specialized for **Postgres**. +pub type PgPool = super::Pool; diff --git a/sqlx-core/src/postgres/protocol/authentication.rs b/sqlx-core/src/postgres/protocol/authentication.rs index faf3a3f8..d6eec80c 100644 --- a/sqlx-core/src/postgres/protocol/authentication.rs +++ b/sqlx-core/src/postgres/protocol/authentication.rs @@ -1,5 +1,5 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; use byteorder::NetworkEndian; use std::io; @@ -12,7 +12,7 @@ pub enum Authentication { KerberosV5, /// A clear-text password is required. - CleartextPassword, + ClearTextPassword, /// An MD5-encrypted password is required. Md5Password { salt: [u8; 4] }, @@ -49,7 +49,7 @@ impl Decode for Authentication { 2 => Authentication::KerberosV5, - 3 => Authentication::CleartextPassword, + 3 => Authentication::ClearTextPassword, 5 => { let mut salt = [0_u8; 4]; diff --git a/sqlx-core/src/postgres/protocol/backend_key_data.rs b/sqlx-core/src/postgres/protocol/backend_key_data.rs index 67ed8be6..6faa6886 100644 --- a/sqlx-core/src/postgres/protocol/backend_key_data.rs +++ b/sqlx-core/src/postgres/protocol/backend_key_data.rs @@ -5,23 +5,11 @@ use std::io; #[derive(Debug)] pub struct BackendKeyData { - /// The process ID of this backend. - process_id: u32, + /// The process ID of this database. + pub process_id: u32, - /// The secret key of this backend. - secret_key: u32, -} - -impl BackendKeyData { - #[inline] - pub fn process_id(&self) -> u32 { - self.process_id - } - - #[inline] - pub fn secret_key(&self) -> u32 { - self.secret_key - } + /// The secret key of this database. + pub secret_key: u32, } impl Decode for BackendKeyData { @@ -46,7 +34,7 @@ mod tests { fn it_decodes_backend_key_data() { let message = BackendKeyData::decode(BACKEND_KEY_DATA).unwrap(); - assert_eq!(message.process_id(), 10182); - assert_eq!(message.secret_key(), 2303903019); + assert_eq!(message.process_id, 10182); + assert_eq!(message.secret_key, 2303903019); } } diff --git a/sqlx-core/src/postgres/protocol/bind.rs b/sqlx-core/src/postgres/protocol/bind.rs index b48ad9c8..b57e129e 100644 --- a/sqlx-core/src/postgres/protocol/bind.rs +++ b/sqlx-core/src/postgres/protocol/bind.rs @@ -1,20 +1,23 @@ use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::StatementId; +use crate::postgres::types::TypeFormat; use byteorder::{ByteOrder, NetworkEndian}; +use std::num::NonZeroU32; pub struct Bind<'a> { /// The name of the destination portal (an empty string selects the unnamed portal). pub portal: &'a str, - /// The name of the source prepared statement (an empty string selects the unnamed prepared statement). - pub statement: &'a str, + /// The id of the source prepared statement (0 selects the unnamed statement). + pub statement: StatementId, /// The parameter format codes. Each must presently be zero (text) or one (binary). /// /// There can be zero to indicate that there are no parameters or that the parameters all use the /// default format (text); or one, in which case the specified format code is applied to all /// parameters; or it can equal the actual number of parameters. - pub formats: &'a [i16], + pub formats: &'a [TypeFormat], pub values_len: i16, pub values: &'a [u8], @@ -25,7 +28,7 @@ pub struct Bind<'a> { /// result columns should all use the default format (text); or one, in which /// case the specified format code is applied to all result columns (if any); /// or it can equal the actual number of result columns of the query. - pub result_formats: &'a [i16], + pub result_formats: &'a [TypeFormat], } impl Encode for Bind<'_> { @@ -36,12 +39,13 @@ impl Encode for Bind<'_> { buf.put_i32::(0); // skip over len buf.put_str_nul(self.portal); - buf.put_str_nul(self.statement); + + self.statement.encode(buf); buf.put_i16::(self.formats.len() as i16); for &format in self.formats { - buf.put_i16::(format); + buf.put_i16::(format as i16); } buf.put_i16::(self.values_len); @@ -51,7 +55,7 @@ impl Encode for Bind<'_> { buf.put_i16::(self.result_formats.len() as i16); for &format in self.result_formats { - buf.put_i16::(format); + buf.put_i16::(format as i16); } // Write-back the len to the beginning of this frame diff --git a/sqlx-core/src/postgres/protocol/cancel_request.rs b/sqlx-core/src/postgres/protocol/cancel_request.rs index 6bcaf02d..47d85c8f 100644 --- a/sqlx-core/src/postgres/protocol/cancel_request.rs +++ b/sqlx-core/src/postgres/protocol/cancel_request.rs @@ -7,10 +7,10 @@ use byteorder::NetworkEndian; /// /// https://www.postgresql.org/docs/devel/protocol-flow.html#id-1.10.5.7.9 pub struct CancelRequest { - /// The process ID of the target backend. + /// The process ID of the target database. pub process_id: i32, - /// The secret key for the target backend. + /// The secret key for the target database. pub secret_key: i32, } diff --git a/sqlx-core/src/postgres/protocol/close.rs b/sqlx-core/src/postgres/protocol/close.rs index 6fc93b3c..2fea9c84 100644 --- a/sqlx-core/src/postgres/protocol/close.rs +++ b/sqlx-core/src/postgres/protocol/close.rs @@ -1,48 +1,37 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; -#[repr(u8)] -pub enum CloseKind { - PreparedStatement, - Portal, -} - -pub struct Close<'a> { - kind: CloseKind, - - /// The name of the prepared statement or portal to close (an empty string selects the - /// unnamed prepared statement or portal). - name: &'a str, +pub enum Close<'a> { + Statement(&'a str), + Portal(&'a str), } impl Encode for Close<'_> { fn encode(&self, buf: &mut Vec) { buf.push(b'C'); + let (kind, name) = match self { + Close::Statement(name) => (b'S', name), + Close::Portal(name) => (b'P', name), + }; + // len + kind + nul + len(string) - buf.put_i32::((4 + 1 + 1 + self.name.len()) as i32); + buf.put_i32::((4 + 1 + 1 + name.len()) as i32); - buf.push(match self.kind { - CloseKind::PreparedStatement => b'S', - CloseKind::Portal => b'P', - }); - - buf.put_str_nul(self.name); + buf.push(kind); + buf.put_str_nul(name); } } #[cfg(test)] mod test { - use super::{Close, CloseKind, Encode}; + use super::{Close, Encode}; #[test] fn it_encodes_close_portal() { let mut buf = Vec::new(); - let m = Close { - kind: CloseKind::Portal, - name: "__sqlx_p_1", - }; + let m = Close::Portal("__sqlx_p_1"); m.encode(&mut buf); @@ -52,10 +41,7 @@ mod test { #[test] fn it_encodes_close_statement() { let mut buf = Vec::new(); - let m = Close { - kind: CloseKind::PreparedStatement, - name: "__sqlx_s_1", - }; + let m = Close::Statement("__sqlx_s_1"); m.encode(&mut buf); diff --git a/sqlx-core/src/postgres/protocol/command_complete.rs b/sqlx-core/src/postgres/protocol/command_complete.rs index 39ac2802..236818c9 100644 --- a/sqlx-core/src/postgres/protocol/command_complete.rs +++ b/sqlx-core/src/postgres/protocol/command_complete.rs @@ -1,26 +1,16 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; use std::io; #[derive(Debug)] pub struct CommandComplete { - affected_rows: u64, -} - -impl CommandComplete { - #[inline] - pub fn affected_rows(&self) -> u64 { - self.affected_rows - } + pub affected_rows: u64, } impl Decode for CommandComplete { fn decode(mut buf: &[u8]) -> crate::Result { - // TODO: Mysql/MySQL return 0 for affected rows in a SELECT .. statement. - // PostgreSQL returns a row count. Should we force return 0 for compatibilities sake? - // Attempt to parse the last word in the command tag as an integer - // If it can't be parased, the tag is probably "CREATE TABLE" or something + // If it can't be parsed, the tag is probably "CREATE TABLE" or something // and we should return 0 rows let rows = buf @@ -49,27 +39,27 @@ mod tests { fn it_decodes_command_complete_for_insert() { let message = CommandComplete::decode(COMMAND_COMPLETE_INSERT).unwrap(); - assert_eq!(message.affected_rows(), 1); + assert_eq!(message.affected_rows, 1); } #[test] fn it_decodes_command_complete_for_update() { let message = CommandComplete::decode(COMMAND_COMPLETE_UPDATE).unwrap(); - assert_eq!(message.affected_rows(), 512); + assert_eq!(message.affected_rows, 512); } #[test] fn it_decodes_command_complete_for_begin() { let message = CommandComplete::decode(COMMAND_COMPLETE_BEGIN).unwrap(); - assert_eq!(message.affected_rows(), 0); + assert_eq!(message.affected_rows, 0); } #[test] fn it_decodes_command_complete_for_create_table() { let message = CommandComplete::decode(COMMAND_COMPLETE_CREATE_TABLE).unwrap(); - assert_eq!(message.affected_rows(), 0); + assert_eq!(message.affected_rows, 0); } } diff --git a/sqlx-core/src/postgres/protocol/copy_data.rs b/sqlx-core/src/postgres/protocol/copy_data.rs deleted file mode 100644 index 4300d409..00000000 --- a/sqlx-core/src/postgres/protocol/copy_data.rs +++ /dev/null @@ -1,29 +0,0 @@ -use super::Encode; -use crate::io::BufMut; -use byteorder::NetworkEndian; - -// TODO: Implement Decode and think on an optimal representation - -/* -# Optimal for Encode -pub struct CopyData<'a> { data: &'a [u8] } - -# Optimal for Decode -pub struct CopyData { data: Bytes } - -# 1) Two structs (names?) -# 2) "Either" inner abstraction; removes ease of construction for Encode -*/ - -pub struct CopyData<'a> { - pub data: &'a [u8], -} - -impl Encode for CopyData<'_> { - fn encode(&self, buf: &mut Vec) { - buf.push(b'd'); - // len + nul + len(string) - buf.put_i32::((4 + 1 + self.data.len()) as i32); - buf.extend_from_slice(&self.data); - } -} diff --git a/sqlx-core/src/postgres/protocol/copy_done.rs b/sqlx-core/src/postgres/protocol/copy_done.rs deleted file mode 100644 index f95b13fd..00000000 --- a/sqlx-core/src/postgres/protocol/copy_done.rs +++ /dev/null @@ -1,15 +0,0 @@ -use super::Encode; -use crate::io::BufMut; -use byteorder::NetworkEndian; - -// TODO: Implement Decode - -pub struct CopyDone; - -impl Encode for CopyDone { - #[inline] - fn encode(&self, buf: &mut Vec) { - buf.push(b'c'); - buf.put_i32::(4); - } -} diff --git a/sqlx-core/src/postgres/protocol/copy_fail.rs b/sqlx-core/src/postgres/protocol/copy_fail.rs deleted file mode 100644 index 64bf0004..00000000 --- a/sqlx-core/src/postgres/protocol/copy_fail.rs +++ /dev/null @@ -1,16 +0,0 @@ -use super::Encode; -use crate::io::BufMut; -use byteorder::NetworkEndian; - -pub struct CopyFail<'a> { - pub error: &'a str, -} - -impl Encode for CopyFail<'_> { - fn encode(&self, buf: &mut Vec) { - buf.push(b'f'); - // len + nul + len(string) - buf.put_i32::((4 + 1 + self.error.len()) as i32); - buf.put_str_nul(&self.error); - } -} diff --git a/sqlx-core/src/postgres/protocol/data_row.rs b/sqlx-core/src/postgres/protocol/data_row.rs index 78c45ed5..c434e2f8 100644 --- a/sqlx-core/src/postgres/protocol/data_row.rs +++ b/sqlx-core/src/postgres/protocol/data_row.rs @@ -1,6 +1,7 @@ -use super::Decode; use crate::io::{Buf, ByteStr}; +use crate::postgres::protocol::Decode; use byteorder::NetworkEndian; +use std::ops::Range; use std::{ fmt::{self, Debug}, io, @@ -9,33 +10,42 @@ use std::{ }; pub struct DataRow { - #[used] - buffer: Pin>, - pub(crate) values: Box<[Option>]>, + buffer: Box<[u8]>, + values: Box<[Option>]>, } -// SAFE: Raw pointers point to pinned memory inside the struct -unsafe impl Send for DataRow {} -unsafe impl Sync for DataRow {} +impl DataRow { + pub fn len(&self) -> usize { + self.values.len() + } + + pub fn get(&self, index: usize) -> Option<&[u8]> { + let range = self.values[index].as_ref()?; + + Some(&self.buffer[(range.start as usize)..(range.end as usize)]) + } +} impl Decode for DataRow { fn decode(mut buf: &[u8]) -> crate::Result { - let cnt = buf.get_u16::()? as usize; - let buffer: Pin> = Pin::new(buf.into()); - let mut buf = &*buffer; - let mut values = Vec::with_capacity(cnt); + let len = buf.get_u16::()? as usize; + let buffer: Box<[u8]> = buf.into(); + let mut values = Vec::with_capacity(len); + let mut index = 4; - while values.len() < cnt { + while values.len() < len { // The length of the column value, in bytes (this count does not include itself). // Can be zero. As a special case, -1 indicates a NULL column value. // No value bytes follow in the NULL case. - let value_len = buf.get_i32::()?; + let size = buf.get_i32::()?; - if value_len == -1 { + if size == -1 { values.push(None); } else { - values.push(Some(buf[..(value_len as usize)].into())); - buf.advance(value_len as usize); + values.push(Some((index)..(index + (size as u32)))); + + index += (size as u32) + 4; + buf.advance(size as usize); } } @@ -52,8 +62,10 @@ impl Debug for DataRow { write!(f, "DataRow(")?; + let len = self.values.len(); + f.debug_list() - .entries((0..self.len()).map(|i| self.get_raw(i).map(ByteStr))) + .entries((0..len).map(|i| self.get(i).map(ByteStr))) .finish()?; write!(f, ")")?; @@ -73,11 +85,11 @@ mod tests { fn it_decodes_data_row() { let m = DataRow::decode(DATA_ROW).unwrap(); - assert_eq!(m.len(), 3); + assert_eq!(m.values.len(), 3); - assert_eq!(m.get_raw(0), Some(&b"1"[..])); - assert_eq!(m.get_raw(1), Some(&b"2"[..])); - assert_eq!(m.get_raw(2), Some(&b"3"[..])); + assert_eq!(m.get(0), Some(&b"1"[..])); + assert_eq!(m.get(1), Some(&b"2"[..])); + assert_eq!(m.get(2), Some(&b"3"[..])); assert_eq!( format!("{:?}", m), diff --git a/sqlx-core/src/postgres/protocol/decode.rs b/sqlx-core/src/postgres/protocol/decode.rs index 31c5bf4b..5a8dd601 100644 --- a/sqlx-core/src/postgres/protocol/decode.rs +++ b/sqlx-core/src/postgres/protocol/decode.rs @@ -1,7 +1,7 @@ use std::io; pub trait Decode { - fn decode(src: &[u8]) -> crate::Result + fn decode(buf: &[u8]) -> crate::Result where Self: Sized; } diff --git a/sqlx-core/src/postgres/protocol/describe.rs b/sqlx-core/src/postgres/protocol/describe.rs index d042c312..10e06327 100644 --- a/sqlx-core/src/postgres/protocol/describe.rs +++ b/sqlx-core/src/postgres/protocol/describe.rs @@ -1,45 +1,47 @@ -use super::Encode; use crate::io::BufMut; -use byteorder::NetworkEndian; +use crate::postgres::protocol::{Encode, StatementId}; +use byteorder::{ByteOrder, NetworkEndian}; -#[repr(u8)] -pub enum DescribeKind { - PreparedStatement, - Portal, -} - -pub struct Describe<'a> { - pub kind: DescribeKind, - - /// The name of the prepared statement or portal to describe (an empty string selects the - /// unnamed prepared statement or portal). - pub name: &'a str, +pub enum Describe<'a> { + Statement(StatementId), + Portal(&'a str), } impl Encode for Describe<'_> { fn encode(&self, buf: &mut Vec) { buf.push(b'D'); - // len + kind + nul + len(string) - buf.put_i32::((4 + 1 + 1 + self.name.len()) as i32); - buf.push(match self.kind { - DescribeKind::PreparedStatement => b'S', - DescribeKind::Portal => b'P', - }); - buf.put_str_nul(self.name); + + let pos = buf.len(); + buf.put_i32::(0); // skip over len + + match self { + Describe::Statement(id) => { + buf.push(b'S'); + id.encode(buf); + } + + Describe::Portal(name) => { + buf.push(b'P'); + buf.put_str_nul(name); + } + }; + + // Write-back the len to the beginning of this frame + let len = buf.len() - pos; + NetworkEndian::write_i32(&mut buf[pos..], len as i32); } } #[cfg(test)] mod test { - use super::{Describe, DescribeKind, Encode}; + use super::{Describe, Encode}; + use crate::io::ByteStr; + use crate::postgres::protocol::StatementId; #[test] fn it_encodes_describe_portal() { let mut buf = Vec::new(); - let m = Describe { - kind: DescribeKind::Portal, - name: "__sqlx_p_1", - }; + let m = Describe::Portal("__sqlx_p_1"); m.encode(&mut buf); @@ -49,13 +51,10 @@ mod test { #[test] fn it_encodes_describe_statement() { let mut buf = Vec::new(); - let m = Describe { - kind: DescribeKind::PreparedStatement, - name: "__sqlx_s_1", - }; + let m = Describe::Statement(StatementId(1)); m.encode(&mut buf); - assert_eq!(buf, b"D\0\0\0\x10S__sqlx_s_1\0"); + assert_eq!(buf, b"D\x00\x00\x00\x18S__sqlx_statement_1\x00"); } } diff --git a/sqlx-core/src/postgres/protocol/execute.rs b/sqlx-core/src/postgres/protocol/execute.rs index 9851af09..481fdaa0 100644 --- a/sqlx-core/src/postgres/protocol/execute.rs +++ b/sqlx-core/src/postgres/protocol/execute.rs @@ -1,5 +1,5 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; pub struct Execute<'a> { @@ -14,8 +14,10 @@ pub struct Execute<'a> { impl Encode for Execute<'_> { fn encode(&self, buf: &mut Vec) { buf.push(b'E'); + // len + nul + len(string) + limit buf.put_i32::((4 + 1 + self.portal.len() + 4) as i32); + buf.put_str_nul(&self.portal); buf.put_i32::(self.limit); } diff --git a/sqlx-core/src/postgres/protocol/flush.rs b/sqlx-core/src/postgres/protocol/flush.rs index dcb8eaa8..c47d6bd7 100644 --- a/sqlx-core/src/postgres/protocol/flush.rs +++ b/sqlx-core/src/postgres/protocol/flush.rs @@ -1,11 +1,10 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; pub struct Flush; impl Encode for Flush { - #[inline] fn encode(&self, buf: &mut Vec) { buf.push(b'H'); buf.put_i32::(4); diff --git a/sqlx-core/src/postgres/protocol/message.rs b/sqlx-core/src/postgres/protocol/message.rs index 4f84164d..a90b7021 100644 --- a/sqlx-core/src/postgres/protocol/message.rs +++ b/sqlx-core/src/postgres/protocol/message.rs @@ -1,8 +1,7 @@ -use super::{ +use crate::postgres::protocol::{ Authentication, BackendKeyData, CommandComplete, DataRow, NotificationResponse, - ParameterDescription, ParameterStatus, ReadyForQuery, Response, + ParameterDescription, ParameterStatus, ReadyForQuery, Response, RowDescription, }; -use crate::postgres::protocol::row_description::RowDescription; #[derive(Debug)] #[repr(u8)] diff --git a/sqlx-core/src/postgres/protocol/mod.rs b/sqlx-core/src/postgres/protocol/mod.rs index d9be891c..2e52dd73 100644 --- a/sqlx-core/src/postgres/protocol/mod.rs +++ b/sqlx-core/src/postgres/protocol/mod.rs @@ -1,18 +1,13 @@ -//! Low level PostgreSQL protocol. Defines the encoding and decoding of the messages communicated +//! Low level Postgres protocol. Defines the encoding and decoding of the messages communicated //! to and from the database server. -// Many protocol types are implemented but unused (currently). The hope is to eventually -// work them all into the (raw) connection type. +// There is much to the Postgres protocol that is not yet used. As we mature we'll be trimming +// the size of this module to exactly what is necessary. #![allow(unused)] -use std::{io, str}; - mod bind; mod cancel_request; mod close; -mod copy_data; -mod copy_done; -mod copy_fail; mod describe; mod encode; mod execute; @@ -21,33 +16,24 @@ mod parse; mod password_message; mod query; mod startup_message; +mod statement; mod sync; mod terminate; -// TODO: mod gss_enc_request; -// TODO: mod gss_response; -// TODO: mod sasl_initial_response; -// TODO: mod sasl_response; -// TODO: mod ssl_request; - -pub use self::{ - bind::Bind, - cancel_request::CancelRequest, - close::Close, - copy_data::CopyData, - copy_done::CopyDone, - copy_fail::CopyFail, - describe::{Describe, DescribeKind}, - encode::Encode, - execute::Execute, - flush::Flush, - parse::Parse, - password_message::PasswordMessage, - query::Query, - startup_message::StartupMessage, - sync::Sync, - terminate::Terminate, -}; +pub use bind::Bind; +pub use cancel_request::CancelRequest; +pub use close::Close; +pub use describe::Describe; +pub use encode::Encode; +pub use execute::Execute; +pub use flush::Flush; +pub use parse::Parse; +pub use password_message::PasswordMessage; +pub use query::Query; +pub use startup_message::StartupMessage; +pub use statement::StatementId; +pub use sync::Sync; +pub use terminate::Terminate; mod authentication; mod backend_key_data; @@ -61,33 +47,17 @@ mod ready_for_query; mod response; mod row_description; -// TODO: Audit backend protocol - mod message; -pub use self::{ - authentication::Authentication, - backend_key_data::BackendKeyData, - command_complete::CommandComplete, - data_row::DataRow, - decode::Decode, - message::Message, - notification_response::NotificationResponse, - parameter_description::ParameterDescription, - parameter_status::ParameterStatus, - ready_for_query::ReadyForQuery, - response::Response, - row_description::{RowDescription, RowField}, -}; - -fn read_string(buf: &mut &[u8]) -> io::Result { - let str_len = memchr::memchr(0u8, buf) - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "unterminated string"))?; - - let string = str::from_utf8(&buf[..str_len]) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; - - *buf = &buf[str_len + 1..]; - - Ok(string.to_owned()) -} +pub use authentication::Authentication; +pub use backend_key_data::BackendKeyData; +pub use command_complete::CommandComplete; +pub use data_row::DataRow; +pub use decode::Decode; +pub use message::Message; +pub use notification_response::NotificationResponse; +pub use parameter_description::ParameterDescription; +pub use parameter_status::ParameterStatus; +pub use ready_for_query::ReadyForQuery; +pub use response::Response; +pub use row_description::{Field, RowDescription}; diff --git a/sqlx-core/src/postgres/protocol/notification_response.rs b/sqlx-core/src/postgres/protocol/notification_response.rs index 22f2e9dc..6ca247a3 100644 --- a/sqlx-core/src/postgres/protocol/notification_response.rs +++ b/sqlx-core/src/postgres/protocol/notification_response.rs @@ -1,61 +1,22 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; use byteorder::NetworkEndian; use std::{fmt, io, pin::Pin, ptr::NonNull}; +#[derive(Debug)] pub struct NotificationResponse { - #[used] - buffer: Pin>, - pid: u32, - channel_name: NonNull, - message: NonNull, -} - -impl NotificationResponse { - #[inline] - pub fn pid(&self) -> u32 { - self.pid - } - - #[inline] - pub fn channel_name(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.channel_name.as_ref() } - } - - #[inline] - pub fn message(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.message.as_ref() } - } -} - -// SAFE: Raw pointers point to pinned memory inside the struct -unsafe impl Send for NotificationResponse {} -unsafe impl Sync for NotificationResponse {} - -impl fmt::Debug for NotificationResponse { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("NotificationResponse") - .field("pid", &self.pid()) - .field("channel_name", &self.channel_name()) - .field("message", &self.message()) - .finish() - } + pub pid: u32, + pub channel_name: String, + pub message: String, } impl Decode for NotificationResponse { fn decode(mut buf: &[u8]) -> crate::Result { let pid = buf.get_u32::()?; - - let buffer = Pin::new(buf.into()); - let mut buf: &[u8] = &*buffer; - - let channel_name = buf.get_str_nul()?.into(); - let message = buf.get_str_nul()?.into(); + let channel_name = buf.get_str_nul()?.to_owned(); + let message = buf.get_str_nul()?.to_owned(); Ok(Self { - buffer, pid, channel_name, message, @@ -73,14 +34,8 @@ mod tests { fn it_decodes_notification_response() { let message = NotificationResponse::decode(NOTIFICATION_RESPONSE).unwrap(); - assert_eq!(message.pid(), 0x34201002); - assert_eq!(message.channel_name(), "TEST-CHANNEL"); - assert_eq!(message.message(), "THIS IS A TEST"); - - assert_eq!( - format!("{:?}", message), - "NotificationResponse { pid: 874516482, channel_name: \"TEST-CHANNEL\", message: \ - \"THIS IS A TEST\" }" - ); + assert_eq!(message.pid, 0x34201002); + assert_eq!(message.channel_name, "TEST-CHANNEL"); + assert_eq!(message.message, "THIS IS A TEST"); } } diff --git a/sqlx-core/src/postgres/protocol/parameter_description.rs b/sqlx-core/src/postgres/protocol/parameter_description.rs index c5b31f91..c97f1351 100644 --- a/sqlx-core/src/postgres/protocol/parameter_description.rs +++ b/sqlx-core/src/postgres/protocol/parameter_description.rs @@ -1,5 +1,5 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; use byteorder::NetworkEndian; use std::io; diff --git a/sqlx-core/src/postgres/protocol/parameter_status.rs b/sqlx-core/src/postgres/protocol/parameter_status.rs index b8b03e86..23f714b3 100644 --- a/sqlx-core/src/postgres/protocol/parameter_status.rs +++ b/sqlx-core/src/postgres/protocol/parameter_status.rs @@ -1,60 +1,18 @@ -use super::decode::Decode; use crate::io::Buf; -use std::{ - fmt::{self, Debug}, - io, - pin::Pin, - ptr::NonNull, - str, -}; +use crate::postgres::protocol::Decode; +#[derive(Debug)] pub struct ParameterStatus { - #[used] - buffer: Pin>, - name: NonNull, - value: NonNull, -} - -// SAFE: Raw pointers point to pinned memory inside the struct -unsafe impl Send for ParameterStatus {} -unsafe impl Sync for ParameterStatus {} - -impl ParameterStatus { - #[inline] - pub fn name(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.name.as_ref() } - } - - #[inline] - pub fn value(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.value.as_ref() } - } + pub name: Box, + pub value: Box, } impl Decode for ParameterStatus { - fn decode(buf: &[u8]) -> crate::Result { - let buffer = Pin::new(buf.into()); - let mut buf: &[u8] = &*buffer; - + fn decode(mut buf: &[u8]) -> crate::Result { let name = buf.get_str_nul()?.into(); let value = buf.get_str_nul()?.into(); - Ok(Self { - buffer, - name, - value, - }) - } -} - -impl Debug for ParameterStatus { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("ParameterStatus") - .field("name", &self.name()) - .field("value", &self.value()) - .finish() + Ok(Self { name, value }) } } @@ -68,12 +26,7 @@ mod tests { fn it_decodes_param_status() { let message = ParameterStatus::decode(PARAM_STATUS).unwrap(); - assert_eq!(message.name(), "session_authorization"); - assert_eq!(message.value(), "postgres"); - - assert_eq!( - format!("{:?}", message), - "ParameterStatus { name: \"session_authorization\", value: \"postgres\" }" - ); + assert_eq!(&*message.name, "session_authorization"); + assert_eq!(&*message.value, "postgres"); } } diff --git a/sqlx-core/src/postgres/protocol/parse.rs b/sqlx-core/src/postgres/protocol/parse.rs index 680ae0fe..e7beb772 100644 --- a/sqlx-core/src/postgres/protocol/parse.rs +++ b/sqlx-core/src/postgres/protocol/parse.rs @@ -1,9 +1,9 @@ -use super::Encode; use crate::io::BufMut; -use byteorder::NetworkEndian; +use crate::postgres::protocol::{Encode, StatementId}; +use byteorder::{ByteOrder, NetworkEndian}; pub struct Parse<'a> { - pub statement: &'a str, + pub statement: StatementId, pub query: &'a str, pub param_types: &'a [u32], } @@ -12,13 +12,11 @@ impl Encode for Parse<'_> { fn encode(&self, buf: &mut Vec) { buf.push(b'P'); - // len + statement + nul + query + null + len(param_types) + param_types - let len = - 4 + self.statement.len() + 1 + self.query.len() + 1 + 2 + self.param_types.len() * 4; + let pos = buf.len(); + buf.put_i32::(0); // skip over len - buf.put_i32::(len as i32); + self.statement.encode(buf); - buf.put_str_nul(self.statement); buf.put_str_nul(self.query); buf.put_i16::(self.param_types.len() as i16); @@ -26,5 +24,9 @@ impl Encode for Parse<'_> { for &type_ in self.param_types { buf.put_u32::(type_); } + + // Write-back the len to the beginning of this frame + let len = buf.len() - pos; + NetworkEndian::write_i32(&mut buf[pos..], len as i32); } } diff --git a/sqlx-core/src/postgres/protocol/password_message.rs b/sqlx-core/src/postgres/protocol/password_message.rs index 8ace172b..4830fcc0 100644 --- a/sqlx-core/src/postgres/protocol/password_message.rs +++ b/sqlx-core/src/postgres/protocol/password_message.rs @@ -1,11 +1,11 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; use md5::{Digest, Md5}; -#[derive(Debug)] pub enum PasswordMessage<'a> { - Cleartext(&'a str), + ClearText(&'a str), + Md5 { password: &'a str, user: &'a str, @@ -18,7 +18,7 @@ impl Encode for PasswordMessage<'_> { buf.push(b'p'); match self { - PasswordMessage::Cleartext(s) => { + PasswordMessage::ClearText(s) => { // len + password + nul buf.put_u32::((4 + s.len() + 1) as u32); buf.put_str_nul(s); @@ -62,7 +62,7 @@ mod tests { #[test] fn it_encodes_password_clear() { let mut buf = Vec::new(); - let m = PasswordMessage::Cleartext("password"); + let m = PasswordMessage::ClearText("password"); m.encode(&mut buf); diff --git a/sqlx-core/src/postgres/protocol/query.rs b/sqlx-core/src/postgres/protocol/query.rs index 9938f570..596db5de 100644 --- a/sqlx-core/src/postgres/protocol/query.rs +++ b/sqlx-core/src/postgres/protocol/query.rs @@ -1,5 +1,5 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; pub struct Query<'a>(pub &'a str); diff --git a/sqlx-core/src/postgres/protocol/ready_for_query.rs b/sqlx-core/src/postgres/protocol/ready_for_query.rs index c2cd45db..44f2f1ce 100644 --- a/sqlx-core/src/postgres/protocol/ready_for_query.rs +++ b/sqlx-core/src/postgres/protocol/ready_for_query.rs @@ -1,7 +1,7 @@ -use super::Decode; +use crate::postgres::protocol::Decode; use std::io; -#[derive(Debug, PartialEq, Clone, Copy)] +#[derive(Debug)] #[repr(u8)] pub enum TransactionStatus { /// Not in a transaction block. @@ -14,19 +14,12 @@ pub enum TransactionStatus { Error = b'E', } -/// `ReadyForQuery` is sent whenever the backend is ready for a new query cycle. +/// `ReadyForQuery` is sent whenever the database is ready for a new query cycle. #[derive(Debug)] pub struct ReadyForQuery { status: TransactionStatus, } -impl ReadyForQuery { - #[inline] - pub fn status(&self) -> TransactionStatus { - self.status - } -} - impl Decode for ReadyForQuery { fn decode(buf: &[u8]) -> crate::Result { Ok(Self { @@ -50,6 +43,7 @@ impl Decode for ReadyForQuery { #[cfg(test)] mod tests { use super::{Decode, ReadyForQuery, TransactionStatus}; + use matches::assert_matches; const READY_FOR_QUERY: &[u8] = b"E"; @@ -57,6 +51,6 @@ mod tests { fn it_decodes_ready_for_query() { let message = ReadyForQuery::decode(READY_FOR_QUERY).unwrap(); - assert_eq!(message.status, TransactionStatus::Error); + assert_matches!(message.status, TransactionStatus::Error); } } diff --git a/sqlx-core/src/postgres/protocol/response.rs b/sqlx-core/src/postgres/protocol/response.rs index 8eedb6e5..420b937f 100644 --- a/sqlx-core/src/postgres/protocol/response.rs +++ b/sqlx-core/src/postgres/protocol/response.rs @@ -1,5 +1,5 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; use std::{ fmt, io, pin::Pin, @@ -7,7 +7,7 @@ use std::{ str::{self, FromStr}, }; -#[derive(Debug, PartialEq, PartialOrd, Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub enum Severity { Panic, Fatal, @@ -74,179 +74,47 @@ impl FromStr for Severity { } } +#[derive(Debug)] pub struct Response { - #[used] - buffer: Pin>, - severity: Severity, - code: NonNull, - message: NonNull, - detail: Option>, - hint: Option>, - position: Option, - internal_position: Option, - internal_query: Option>, - where_: Option>, - schema: Option>, - table: Option>, - column: Option>, - data_type: Option>, - constraint: Option>, - file: Option>, - line: Option, - routine: Option>, -} - -// SAFE: Raw pointers point to pinned memory inside the struct -unsafe impl Send for Response {} -unsafe impl Sync for Response {} - -impl Response { - #[inline] - pub fn severity(&self) -> Severity { - self.severity - } - - #[inline] - pub fn code(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.code.as_ref() } - } - - #[inline] - pub fn message(&self) -> &str { - // SAFE: Memory is pinned - unsafe { self.message.as_ref() } - } - - #[inline] - pub fn detail(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.detail.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn hint(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.hint.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn position(&self) -> Option { - self.position - } - - #[inline] - pub fn internal_position(&self) -> Option { - self.internal_position - } - - #[inline] - pub fn internal_query(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.internal_query.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn where_(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.where_.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn schema(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.schema.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn table(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.table.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn column(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.column.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn data_type(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.data_type.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn constraint(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.constraint.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn file(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.file.as_ref().map(|s| s.as_ref()) } - } - - #[inline] - pub fn line(&self) -> Option { - self.line - } - - #[inline] - pub fn routine(&self) -> Option<&str> { - // SAFE: Memory is pinned - unsafe { self.routine.as_ref().map(|s| s.as_ref()) } - } -} - -impl fmt::Debug for Response { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Response") - .field("severity", &self.severity) - .field("code", &self.code()) - .field("message", &self.message()) - .field("detail", &self.detail()) - .field("hint", &self.hint()) - .field("position", &self.position()) - .field("internal_position", &self.internal_position()) - .field("internal_query", &self.internal_query()) - .field("where_", &self.where_()) - .field("schema", &self.schema()) - .field("table", &self.table()) - .field("column", &self.column()) - .field("data_type", &self.data_type()) - .field("constraint", &self.constraint()) - .field("file", &self.file()) - .field("line", &self.line()) - .field("routine", &self.routine()) - .finish() - } + pub severity: Severity, + pub code: Box, + pub message: Box, + pub detail: Option>, + pub hint: Option>, + pub position: Option, + pub internal_position: Option, + pub internal_query: Option>, + pub where_: Option>, + pub schema: Option>, + pub table: Option>, + pub column: Option>, + pub data_type: Option>, + pub constraint: Option>, + pub file: Option>, + pub line: Option, + pub routine: Option>, } impl Decode for Response { - fn decode(buf: &[u8]) -> crate::Result { - let buffer: Pin> = Pin::new(buf.into()); - let mut buf: &[u8] = &*buffer; - - let mut code = None::>; - let mut message = None::>; - let mut severity = None::>; + fn decode(mut buf: &[u8]) -> crate::Result { + let mut code = None::>; + let mut message = None::>; + let mut severity = None::>; let mut severity_non_local = None::; - let mut detail = None::>; - let mut hint = None::>; + let mut detail = None::>; + let mut hint = None::>; let mut position = None::; let mut internal_position = None::; - let mut internal_query = None::>; - let mut where_ = None::>; - let mut schema = None::>; - let mut table = None::>; - let mut column = None::>; - let mut data_type = None::>; - let mut constraint = None::>; - let mut file = None::>; + let mut internal_query = None::>; + let mut where_ = None::>; + let mut schema = None::>; + let mut table = None::>; + let mut column = None::>; + let mut data_type = None::>; + let mut constraint = None::>; + let mut file = None::>; let mut line = None::; - let mut routine = None::>; + let mut routine = None::>; loop { let field_type = buf.get_u8()?; @@ -354,7 +222,7 @@ impl Decode for Response { } let severity = severity_non_local - .or_else(move || unsafe { severity?.as_ref() }.parse().ok()) + .or_else(move || severity?.as_ref().parse().ok()) .ok_or(protocol_err!( "did not receieve field `severity` for Response" ))?; @@ -365,7 +233,6 @@ impl Decode for Response { ))?; Ok(Self { - buffer, severity, code, message, @@ -390,6 +257,7 @@ impl Decode for Response { #[cfg(test)] mod tests { use super::{Decode, Response, Severity}; + use matches::assert_matches; const RESPONSE: &[u8] = b"SNOTICE\0VNOTICE\0C42710\0Mextension \"uuid-ossp\" already exists, \ skipping\0Fextension.c\0L1656\0RCreateExtension\0\0"; @@ -398,13 +266,13 @@ mod tests { fn it_decodes_response() { let message = Response::decode(RESPONSE).unwrap(); - assert_eq!(message.severity(), Severity::Notice); - assert_eq!(message.code(), "42710"); - assert_eq!(message.file(), Some("extension.c")); - assert_eq!(message.line(), Some(1656)); - assert_eq!(message.routine(), Some("CreateExtension")); + assert_matches!(message.severity, Severity::Notice); + assert_eq!(&*message.code, "42710"); + assert_eq!(&*message.file.unwrap(), "extension.c"); + assert_eq!(message.line, Some(1656)); + assert_eq!(&*message.routine.unwrap(), "CreateExtension"); assert_eq!( - message.message(), + &*message.message, "extension \"uuid-ossp\" already exists, skipping" ); } diff --git a/sqlx-core/src/postgres/protocol/row_description.rs b/sqlx-core/src/postgres/protocol/row_description.rs index 276f22c2..8b3c0c67 100644 --- a/sqlx-core/src/postgres/protocol/row_description.rs +++ b/sqlx-core/src/postgres/protocol/row_description.rs @@ -1,22 +1,23 @@ -use super::Decode; use crate::io::Buf; +use crate::postgres::protocol::Decode; +use crate::postgres::types::TypeFormat; use byteorder::NetworkEndian; use std::{io, io::BufRead}; #[derive(Debug)] pub struct RowDescription { - pub fields: Box<[RowField]>, + pub fields: Box<[Field]>, } #[derive(Debug)] -pub struct RowField { - pub name: String, - pub table_id: u32, - pub attr_num: i16, +pub struct Field { + pub name: Option>, + pub table_id: Option, + pub column_id: i16, pub type_id: u32, pub type_size: i16, pub type_mod: i32, - pub format_code: i16, + pub type_format: TypeFormat, } impl Decode for RowDescription { @@ -25,14 +26,25 @@ impl Decode for RowDescription { let mut fields = Vec::with_capacity(cnt); for _ in 0..cnt { - fields.push(RowField { - name: super::read_string(&mut buf)?, - table_id: buf.get_u32::()?, - attr_num: buf.get_i16::()?, + let name = buf.get_str_nul()?; + let name = if name == "?column?" { + None + } else { + Some(name.to_owned().into_boxed_str()) + }; + + let table_id = buf.get_u32::()?; + + fields.push(Field { + name, + + table_id: if table_id > 0 { Some(table_id) } else { None }, + + column_id: buf.get_i16::()?, type_id: buf.get_u32::()?, type_size: buf.get_i16::()?, type_mod: buf.get_i32::()?, - format_code: buf.get_i16::()?, + type_format: buf.get_i16::()?.into(), }); } @@ -49,7 +61,7 @@ mod test { #[test] fn it_decodes_row_description() { #[rustfmt::skip] - let buf = __bytes_builder! { + let buf = bytes! { // Number of Parameters 0_u8, 2_u8, diff --git a/sqlx-core/src/postgres/protocol/startup_message.rs b/sqlx-core/src/postgres/protocol/startup_message.rs index 2c29c6e1..4299efb1 100644 --- a/sqlx-core/src/postgres/protocol/startup_message.rs +++ b/sqlx-core/src/postgres/protocol/startup_message.rs @@ -1,5 +1,5 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::{BigEndian, ByteOrder, NetworkEndian}; pub struct StartupMessage<'a> { diff --git a/sqlx-core/src/postgres/protocol/statement.rs b/sqlx-core/src/postgres/protocol/statement.rs new file mode 100644 index 00000000..821b5e4a --- /dev/null +++ b/sqlx-core/src/postgres/protocol/statement.rs @@ -0,0 +1,18 @@ +use crate::io::BufMut; +use crate::postgres::protocol::Encode; + +#[derive(Copy, Clone, PartialOrd, PartialEq, Eq, Hash)] +pub struct StatementId(pub u32); + +impl Encode for StatementId { + fn encode(&self, buf: &mut Vec) { + if self.0 != 0 { + buf.put_str("__sqlx_statement_"); + + // TODO: Use [itoa] + buf.put_str_nul(&self.0.to_string()); + } else { + buf.put_str_nul(""); + } + } +} diff --git a/sqlx-core/src/postgres/protocol/sync.rs b/sqlx-core/src/postgres/protocol/sync.rs index 5f30e6ca..ffb4e04c 100644 --- a/sqlx-core/src/postgres/protocol/sync.rs +++ b/sqlx-core/src/postgres/protocol/sync.rs @@ -1,5 +1,5 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; pub struct Sync; diff --git a/sqlx-core/src/postgres/protocol/terminate.rs b/sqlx-core/src/postgres/protocol/terminate.rs index 0d2ab595..6c5b8e2c 100644 --- a/sqlx-core/src/postgres/protocol/terminate.rs +++ b/sqlx-core/src/postgres/protocol/terminate.rs @@ -1,5 +1,5 @@ -use super::Encode; use crate::io::BufMut; +use crate::postgres::protocol::Encode; use byteorder::NetworkEndian; pub struct Terminate; diff --git a/sqlx-core/src/postgres/query.rs b/sqlx-core/src/postgres/query.rs deleted file mode 100644 index 4ab83c36..00000000 --- a/sqlx-core/src/postgres/query.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::Postgres; -use crate::{ - encode::{Encode, IsNull}, - io::BufMut, - params::QueryParameters, - types::HasSqlType, -}; -use byteorder::{BigEndian, ByteOrder, NetworkEndian}; - -#[derive(Default)] -pub struct PostgresQueryParameters { - // OIDs of the bind parameters - pub(super) types: Vec, - // Write buffer for serializing bind values - pub(super) buf: Vec, -} - -impl QueryParameters for PostgresQueryParameters { - type Backend = Postgres; - - fn reserve(&mut self, binds: usize, bytes: usize) { - self.types.reserve(binds); - self.buf.reserve(bytes); - } - - fn bind(&mut self, value: T) - where - Self: Sized, - Self::Backend: HasSqlType, - T: Encode, - { - // TODO: When/if we receive types that do _not_ support BINARY, we need to check here - // TODO: There is no need to be explicit unless we are expecting mixed BINARY / TEXT - - self.types.push(>::metadata().oid); - - let pos = self.buf.len(); - self.buf.put_i32::(0); - - let len = if let IsNull::No = value.encode(&mut self.buf) { - (self.buf.len() - pos - 4) as i32 - } else { - // Write a -1 for the len to indicate NULL - // TODO: It is illegal for [encode] to write any data if IsSql::No; fail a debug assertion - -1 - }; - - // Write-back the len to the beginning of this frame (not including the len of len) - BigEndian::write_i32(&mut self.buf[pos..], len as i32); - } -} diff --git a/sqlx-core/src/postgres/raw.rs b/sqlx-core/src/postgres/raw.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/sqlx-core/src/postgres/row.rs b/sqlx-core/src/postgres/row.rs index d182a052..53b92ae1 100644 --- a/sqlx-core/src/postgres/row.rs +++ b/sqlx-core/src/postgres/row.rs @@ -1,16 +1,58 @@ -use super::{protocol::DataRow, Postgres}; -use crate::row::Row; +use std::collections::HashMap; +use std::sync::Arc; -impl Row for DataRow { - type Backend = Postgres; +use crate::decode::Decode; +use crate::postgres::protocol::DataRow; +use crate::postgres::Postgres; +use crate::row::{Row, RowIndex}; +use crate::types::HasSqlType; + +pub struct PgRow { + pub(super) data: DataRow, + pub(super) columns: Arc, usize>>, +} + +impl Row for PgRow { + type Database = Postgres; fn len(&self) -> usize { - self.values.len() + self.data.len() } - fn get_raw(&self, index: usize) -> Option<&[u8]> { - self.values[index] - .as_ref() - .map(|value| unsafe { value.as_ref() }) + fn get(&self, index: I) -> T + where + Self::Database: HasSqlType, + I: RowIndex, + T: Decode, + { + index.try_get(self).unwrap() } } + +impl RowIndex for usize { + fn try_get(&self, row: &PgRow) -> crate::Result + where + ::Database: HasSqlType, + T: Decode<::Database>, + { + Ok(Decode::decode_nullable(row.data.get(*self))?) + } +} + +impl RowIndex for &'_ str { + fn try_get(&self, row: &PgRow) -> crate::Result + where + ::Database: HasSqlType, + T: Decode<::Database>, + { + let index = row + .columns + .get(*self) + .ok_or_else(|| crate::Error::ColumnNotFound((*self).into()))?; + let value = Decode::decode_nullable(row.data.get(*index))?; + + Ok(value) + } +} + +impl_from_row_for_row!(PgRow); diff --git a/sqlx-core/src/postgres/types/binary.rs b/sqlx-core/src/postgres/types/binary.rs deleted file mode 100644 index 72d329bf..00000000 --- a/sqlx-core/src/postgres/types/binary.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::{ - encode::IsNull, - postgres::types::{PostgresTypeFormat, PostgresTypeMetadata}, - Decode, Encode, HasSqlType, Postgres, -}; - -impl HasSqlType<[u8]> for Postgres { - fn metadata() -> Self::TypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 17, - array_oid: 1001, - } - } -} - -impl HasSqlType> for Postgres { - fn metadata() -> Self::TypeMetadata { - >::metadata() - } -} - -impl Encode for [u8] { - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(self); - IsNull::No - } -} - -impl Encode for Vec { - fn encode(&self, buf: &mut Vec) -> IsNull { - <[u8] as Encode>::encode(self, buf) - } - - fn size_hint(&self) -> usize { - self.len() - } -} - -impl Decode for Vec { - fn decode(raw: Option<&[u8]>) -> Self { - raw.unwrap().into() - } -} diff --git a/sqlx-core/src/postgres/types/bool.rs b/sqlx-core/src/postgres/types/bool.rs new file mode 100644 index 00000000..4693c911 --- /dev/null +++ b/sqlx-core/src/postgres/types/bool.rs @@ -0,0 +1,24 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::postgres::Postgres; +use crate::types::HasSqlType; + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(16, 100) + } +} + +impl Encode for bool { + fn encode(&self, buf: &mut Vec) { + buf.push(*self as u8); + } +} + +impl Decode for bool { + fn decode(buf: &[u8]) -> Result { + // FIXME: Return an error if the buffer size is not (at least) 1 + Ok(buf[0] != 0) + } +} diff --git a/sqlx-core/src/postgres/types/boolean.rs b/sqlx-core/src/postgres/types/boolean.rs deleted file mode 100644 index e024ee44..00000000 --- a/sqlx-core/src/postgres/types/boolean.rs +++ /dev/null @@ -1,36 +0,0 @@ -use super::{Postgres, PostgresTypeFormat, PostgresTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - types::HasSqlType, -}; - -impl HasSqlType for Postgres { - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 16, - array_oid: 1000, - } - } -} - -impl Encode for bool { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.push(*self as u8); - - IsNull::No - } -} - -impl Decode for bool { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - // TODO: Handle optionals - buf.unwrap()[0] != 0 - } -} - -// TODO: #[derive(SqlType)] -// pub struct Bool(pub bool); diff --git a/sqlx-core/src/postgres/types/character.rs b/sqlx-core/src/postgres/types/character.rs deleted file mode 100644 index d01bef7f..00000000 --- a/sqlx-core/src/postgres/types/character.rs +++ /dev/null @@ -1,61 +0,0 @@ -use super::{Postgres, PostgresTypeFormat, PostgresTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - types::HasSqlType, -}; -use std::str; - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 25, - array_oid: 1009, - } - } -} - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - >::metadata() - } -} - -impl Encode for str { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(self.as_bytes()); - - IsNull::No - } -} - -impl Encode for String { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(self.as_str(), buf) - } - - fn size_hint(&self) -> usize { - self.len() - } -} - -impl Decode for String { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - // TODO: Handle nulls - - let s = if cfg!(debug_assertions) { - str::from_utf8(buf.unwrap()).expect("postgres returned non UTF-8 data for TEXT") - } else { - // SAFE: Postgres is required to return UTF-8 data - unsafe { str::from_utf8_unchecked(buf.unwrap()) } - }; - - s.to_owned() - } -} diff --git a/sqlx-core/src/postgres/types/chrono.rs b/sqlx-core/src/postgres/types/chrono.rs index b8cb0d97..4441c6e5 100644 --- a/sqlx-core/src/postgres/types/chrono.rs +++ b/sqlx-core/src/postgres/types/chrono.rs @@ -1,131 +1,141 @@ -use crate::{Decode, Postgres, Encode, HasSqlType, HasTypeMetadata}; -use chrono::{NaiveTime, Timelike, NaiveDate, TimeZone, DateTime, NaiveDateTime, Utc, Local, Duration, Date}; -use crate::postgres::types::{PostgresTypeMetadata, PostgresTypeFormat}; -use crate::encode::IsNull; - +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::postgres::Postgres; +use crate::types::HasSqlType; +use chrono::{DateTime, Duration, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; use std::convert::TryInto; +use std::mem; -use std::mem::size_of; +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(1083, 1183) + } +} -postgres_metadata!( - // time - NaiveTime: PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 1083, - array_oid: 1183 - }, - // date - NaiveDate: PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 1082, - array_oid: 1182 - }, - // timestamp - NaiveDateTime: PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 1114, - array_oid: 1115 - }, - // timestamptz - { Tz: TimeZone } DateTime: PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 1184, - array_oid: 1185 - }, - // Date is not covered as Postgres does not have a "date with timezone" type -); +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(1082, 1182) + } +} -fn decode>(raw: Option<&[u8]>) -> T { - Decode::::decode(raw) +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(1114, 1115) + } +} + +impl HasSqlType> for Postgres +where + Tz: TimeZone, +{ + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(1184, 1185) + } } impl Decode for NaiveTime { - fn decode(raw: Option<&[u8]>) -> Self { - let micros: i64 = decode(raw); - NaiveTime::from_hms(0, 0, 0) + Duration::microseconds(micros) + fn decode(raw: &[u8]) -> Result { + let micros: i64 = Decode::::decode(raw)?; + + Ok(NaiveTime::from_hms(0, 0, 0) + Duration::microseconds(micros)) } } impl Encode for NaiveTime { - fn encode(&self, buf: &mut Vec) -> IsNull { + fn encode(&self, buf: &mut Vec) { let micros = (*self - NaiveTime::from_hms(0, 0, 0)) .num_microseconds() .expect("shouldn't overflow"); - Encode::::encode(µs, buf) + Encode::::encode(µs, buf); } fn size_hint(&self) -> usize { - size_of::() + mem::size_of::() } } impl Decode for NaiveDate { - fn decode(raw: Option<&[u8]>) -> Self { - let days: i32 = decode(raw); - NaiveDate::from_ymd(2000, 1, 1) + Duration::days(days as i64) + fn decode(raw: &[u8]) -> Result { + let days: i32 = Decode::::decode(raw)?; + + Ok(NaiveDate::from_ymd(2000, 1, 1) + Duration::days(days as i64)) } } impl Encode for NaiveDate { - fn encode(&self, buf: &mut Vec) -> IsNull { - let days: i32 = self.signed_duration_since(NaiveDate::from_ymd(2000, 1, 1)) + fn encode(&self, buf: &mut Vec) { + let days: i32 = self + .signed_duration_since(NaiveDate::from_ymd(2000, 1, 1)) .num_days() .try_into() + // TODO: How does Diesel handle this? .unwrap_or_else(|_| panic!("NaiveDate out of range for Postgres: {:?}", self)); Encode::::encode(&days, buf) } fn size_hint(&self) -> usize { - size_of::() + mem::size_of::() } } impl Decode for NaiveDateTime { - fn decode(raw: Option<&[u8]>) -> Self { - let micros: i64 = decode(raw); - postgres_epoch().naive_utc() + fn decode(raw: &[u8]) -> Result { + let micros: i64 = Decode::::decode(raw)?; + + postgres_epoch() + .naive_utc() .checked_add_signed(Duration::microseconds(micros)) - .unwrap_or_else(|| panic!("Postgres timestamp out of range for NaiveDateTime: {:?}", micros)) + .ok_or_else(|| { + DecodeError::Message(Box::new(format!( + "Postgres timestamp out of range for NaiveDateTime: {:?}", + micros + ))) + }) } } impl Encode for NaiveDateTime { - fn encode(&self, buf: &mut Vec) -> IsNull { - let micros = self.signed_duration_since(postgres_epoch().naive_utc()) + fn encode(&self, buf: &mut Vec) { + let micros = self + .signed_duration_since(postgres_epoch().naive_utc()) .num_microseconds() .unwrap_or_else(|| panic!("NaiveDateTime out of range for Postgres: {:?}", self)); - Encode::::encode(µs, buf) + Encode::::encode(µs, buf); } fn size_hint(&self) -> usize { - size_of::() + mem::size_of::() } } impl Decode for DateTime { - fn decode(raw: Option<&[u8]>) -> Self { - let date_time = >::decode(raw); - DateTime::from_utc(date_time, Utc) + fn decode(raw: &[u8]) -> Result { + let date_time = Decode::::decode(raw)?; + Ok(DateTime::from_utc(date_time, Utc)) } } impl Decode for DateTime { - fn decode(raw: Option<&[u8]>) -> Self { - let date_time = >::decode(raw); - Local.from_utc_datetime(&date_time) + fn decode(raw: &[u8]) -> Result { + let date_time = Decode::::decode(raw)?; + Ok(Local.from_utc_datetime(&date_time)) } } -impl Encode for DateTime where Tz::Offset: Copy { - fn encode(&self, buf: &mut Vec) -> IsNull { - Encode::::encode(&self.naive_utc(), buf) +impl Encode for DateTime +where + Tz::Offset: Copy, +{ + fn encode(&self, buf: &mut Vec) { + Encode::::encode(&self.naive_utc(), buf); } fn size_hint(&self) -> usize { - size_of::() + mem::size_of::() } } @@ -150,7 +160,9 @@ fn test_encode_datetime() { // some random date let date3: NaiveDateTime = "2019-12-11T11:01:05".parse().unwrap(); - let expected = dbg!((date3 - postgres_epoch().naive_utc()).num_microseconds().unwrap()); + let expected = dbg!((date3 - postgres_epoch().naive_utc()) + .num_microseconds() + .unwrap()); Encode::::encode(&date3, &mut buf); assert_eq!(buf, expected.to_be_bytes()); buf.clear(); @@ -159,15 +171,15 @@ fn test_encode_datetime() { #[test] fn test_decode_datetime() { let buf = [0u8; 8]; - let date: NaiveDateTime = Decode::::decode(Some(&buf)); + let date: NaiveDateTime = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2000-01-01 00:00:00"); let buf = 3_600_000_000i64.to_be_bytes(); - let date: NaiveDateTime = Decode::::decode(Some(&buf)); + let date: NaiveDateTime = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2000-01-01 01:00:00"); let buf = 629_377_265_000_000i64.to_be_bytes(); - let date: NaiveDateTime = Decode::::decode(Some(&buf)); + let date: NaiveDateTime = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2019-12-11 11:01:05"); } @@ -195,14 +207,14 @@ fn test_encode_date() { #[test] fn test_decode_date() { let buf = [0; 4]; - let date: NaiveDate = Decode::::decode(Some(&buf)); + let date: NaiveDate = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2000-01-01"); let buf = 366i32.to_be_bytes(); - let date: NaiveDate = Decode::::decode(Some(&buf)); + let date: NaiveDate = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2001-01-01"); let buf = 7284i32.to_be_bytes(); - let date: NaiveDate = Decode::::decode(Some(&buf)); + let date: NaiveDate = Decode::::decode(&buf).unwrap(); assert_eq!(date.to_string(), "2019-12-11"); } diff --git a/sqlx-core/src/postgres/types/float.rs b/sqlx-core/src/postgres/types/float.rs new file mode 100644 index 00000000..4360be85 --- /dev/null +++ b/sqlx-core/src/postgres/types/float.rs @@ -0,0 +1,45 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::postgres::Postgres; +use crate::types::HasSqlType; + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(700, 1021) + } +} + +impl Encode for f32 { + fn encode(&self, buf: &mut Vec) { + >::encode(&(self.to_bits() as i32), buf) + } +} + +impl Decode for f32 { + fn decode(buf: &[u8]) -> Result { + Ok(f32::from_bits( + >::decode(buf)? as u32 + )) + } +} + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(701, 1022) + } +} + +impl Encode for f64 { + fn encode(&self, buf: &mut Vec) { + >::encode(&(self.to_bits() as i64), buf) + } +} + +impl Decode for f64 { + fn decode(buf: &[u8]) -> Result { + Ok(f64::from_bits( + >::decode(buf)? as u64 + )) + } +} diff --git a/sqlx-core/src/postgres/types/int.rs b/sqlx-core/src/postgres/types/int.rs new file mode 100644 index 00000000..3ebd29b8 --- /dev/null +++ b/sqlx-core/src/postgres/types/int.rs @@ -0,0 +1,60 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::postgres::Postgres; +use crate::types::HasSqlType; +use byteorder::{ByteOrder, NetworkEndian}; + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(21, 1005) + } +} + +impl Encode for i16 { + fn encode(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.to_be_bytes()); + } +} + +impl Decode for i16 { + fn decode(buf: &[u8]) -> Result { + Ok(NetworkEndian::read_i16(buf)) + } +} + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(23, 1007) + } +} + +impl Encode for i32 { + fn encode(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.to_be_bytes()); + } +} + +impl Decode for i32 { + fn decode(buf: &[u8]) -> Result { + Ok(NetworkEndian::read_i32(buf)) + } +} + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(20, 1016) + } +} + +impl Encode for i64 { + fn encode(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.to_be_bytes()); + } +} + +impl Decode for i64 { + fn decode(buf: &[u8]) -> Result { + Ok(NetworkEndian::read_i64(buf)) + } +} diff --git a/sqlx-core/src/postgres/types/mod.rs b/sqlx-core/src/postgres/types/mod.rs index 1e57396d..9b005d25 100644 --- a/sqlx-core/src/postgres/types/mod.rs +++ b/sqlx-core/src/postgres/types/mod.rs @@ -1,81 +1,64 @@ -//! PostgreSQL types. -//! -//! The following types are supported by this crate, -//! along with the corresponding Postgres types: -//! -//! ### Standard -//! -//! | Rust type | Postgres type(s) | -//! |-----------------------------------|--------------------------------------| -//! | `i16` | SMALLINT, SMALLSERIAL | -//! | `i32` | INT, SERIAL | -//! | `i64` | BIGINT, BIGSERIAL | -//! | `f32` | REAL | -//! | `f64` | DOUBLE PRECISION | -//! | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | -//! | `&[u8]`/`Vec` | BYTEA | -//! -//! ### PostgreSQL specific -//! -//! | Rust type | Postgres type(s) | -//! |-----------------------------------|--------------------------------------| -//! | `bool` | BOOL | -//! | `i8` | "char" | -//! | `u32` | OID | -//! | `&[u8]`/`Vec` | BYTEA | -//! | `HashMap>` | HSTORE | -//! | `IpAddr` | INET | -//! | `Uuid` (`uuid` feature) | UUID | - -use super::Postgres; -use crate::types::{HasTypeMetadata, TypeMetadata}; - -macro_rules! postgres_metadata { - ($($({ $($typarams:tt)* })? $type:path: $meta:expr),*$(,)?) => { - $( - impl$(<$($typarams)*>)? crate::types::HasSqlType<$type> for Postgres { - fn metadata() -> PostgresTypeMetadata { - $meta - } - } - )* - }; -} - -mod binary; -mod boolean; -mod character; -mod numeric; - -#[cfg(feature = "uuid")] -mod uuid; +mod bool; +mod float; +mod int; +mod str; #[cfg(feature = "chrono")] mod chrono; -pub enum PostgresTypeFormat { +#[cfg(feature = "uuid")] +mod uuid; + +#[derive(Debug, Copy, Clone)] +#[repr(i16)] +pub enum TypeFormat { Text = 0, Binary = 1, } +impl From for TypeFormat { + fn from(code: i16) -> TypeFormat { + match code { + 0 => TypeFormat::Text, + 1 => TypeFormat::Binary, + + _ => unreachable!(), + } + } +} + +impl crate::types::HasTypeMetadata for super::Postgres { + type TypeMetadata = PgTypeMetadata; + + type TableId = u32; + + type TypeId = u32; +} + /// Provides the OIDs for a SQL type and the expected format to be used for -/// transmission between Rust and PostgreSQL. +/// transmission between Rust and Postgres. /// /// While the BINARY format is preferred in most cases, there are scenarios /// where only the TEXT format may be available for a type. -pub struct PostgresTypeMetadata { - pub format: PostgresTypeFormat, - pub oid: u32, - pub array_oid: u32, +pub struct PgTypeMetadata { + #[allow(unused)] + pub(crate) format: TypeFormat, + pub(crate) oid: u32, + pub(crate) array_oid: u32, } -impl HasTypeMetadata for Postgres { - type TypeId = u32; - type TypeMetadata = PostgresTypeMetadata; -} - -impl TypeMetadata for PostgresTypeMetadata { - fn type_id_eq(&self, other: &u32) -> bool { - &self.oid == other || &self.array_oid == other +impl PgTypeMetadata { + const fn binary(oid: u32, array_oid: u32) -> Self { + Self { + format: TypeFormat::Binary, + oid, + array_oid, + } + } +} + +impl PartialEq for PgTypeMetadata { + fn eq(&self, other: &u32) -> bool { + self.oid == *other || self.array_oid == *other } } diff --git a/sqlx-core/src/postgres/types/numeric.rs b/sqlx-core/src/postgres/types/numeric.rs deleted file mode 100644 index f727c34d..00000000 --- a/sqlx-core/src/postgres/types/numeric.rs +++ /dev/null @@ -1,138 +0,0 @@ -use super::{Postgres, PostgresTypeFormat, PostgresTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - types::HasSqlType, -}; -use byteorder::{BigEndian, ByteOrder}; - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 21, - array_oid: 1005, - } - } -} - -impl Encode for i16 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_be_bytes()); - - IsNull::No - } -} - -impl Decode for i16 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - BigEndian::read_i16(buf.unwrap()) - } -} - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 23, - array_oid: 1007, - } - } -} - -impl Encode for i32 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_be_bytes()); - - IsNull::No - } -} - -impl Decode for i32 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - BigEndian::read_i32(buf.unwrap()) - } -} - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 20, - array_oid: 1016, - } - } -} - -impl Encode for i64 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - buf.extend_from_slice(&self.to_be_bytes()); - - IsNull::No - } -} - -impl Decode for i64 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - BigEndian::read_i64(buf.unwrap()) - } -} - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 700, - array_oid: 1021, - } - } -} - -impl Encode for f32 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(&(self.to_bits() as i32), buf) - } -} - -impl Decode for f32 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - f32::from_bits(>::decode(buf) as u32) - } -} - -impl HasSqlType for Postgres { - #[inline] - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 701, - array_oid: 1022, - } - } -} - -impl Encode for f64 { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { - >::encode(&(self.to_bits() as i64), buf) - } -} - -impl Decode for f64 { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - f64::from_bits(>::decode(buf) as u64) - } -} diff --git a/sqlx-core/src/postgres/types/str.rs b/sqlx-core/src/postgres/types/str.rs new file mode 100644 index 00000000..9bc3d947 --- /dev/null +++ b/sqlx-core/src/postgres/types/str.rs @@ -0,0 +1,44 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::types::HasSqlType; +use crate::Postgres; +use std::str; + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(25, 1009) + } +} + +impl HasSqlType for Postgres { + fn metadata() -> PgTypeMetadata { + >::metadata() + } +} + +impl Encode for str { + fn encode(&self, buf: &mut Vec) { + buf.extend_from_slice(self.as_bytes()); + } + + fn size_hint(&self) -> usize { + self.len() + } +} + +impl Encode for String { + fn encode(&self, buf: &mut Vec) { + >::encode(self.as_str(), buf) + } + + fn size_hint(&self) -> usize { + self.len() + } +} + +impl Decode for String { + fn decode(buf: &[u8]) -> Result { + Ok(str::from_utf8(buf)?.to_owned()) + } +} diff --git a/sqlx-core/src/postgres/types/uuid.rs b/sqlx-core/src/postgres/types/uuid.rs index fb6d784f..94f81d16 100644 --- a/sqlx-core/src/postgres/types/uuid.rs +++ b/sqlx-core/src/postgres/types/uuid.rs @@ -1,35 +1,24 @@ +use crate::decode::{Decode, DecodeError}; +use crate::encode::Encode; +use crate::postgres::types::PgTypeMetadata; +use crate::postgres::Postgres; +use crate::types::HasSqlType; use uuid::Uuid; -use super::{Postgres, PostgresTypeFormat, PostgresTypeMetadata}; -use crate::{ - decode::Decode, - encode::{Encode, IsNull}, - types::HasSqlType, -}; - impl HasSqlType for Postgres { - fn metadata() -> PostgresTypeMetadata { - PostgresTypeMetadata { - format: PostgresTypeFormat::Binary, - oid: 2950, - array_oid: 2951, - } + fn metadata() -> PgTypeMetadata { + PgTypeMetadata::binary(2950, 2951) } } impl Encode for Uuid { - #[inline] - fn encode(&self, buf: &mut Vec) -> IsNull { + fn encode(&self, buf: &mut Vec) { buf.extend_from_slice(self.as_bytes()); - - IsNull::No } } impl Decode for Uuid { - #[inline] - fn decode(buf: Option<&[u8]>) -> Self { - // TODO: Handle optionals, error - Uuid::from_slice(buf.unwrap()).unwrap() + fn decode(buf: &[u8]) -> Result { + Uuid::from_slice(buf).map_err(|err| DecodeError::Message(Box::new(err))) } } diff --git a/sqlx-core/src/query.rs b/sqlx-core/src/query.rs index 4cd4598b..ee42359c 100644 --- a/sqlx-core/src/query.rs +++ b/sqlx-core/src/query.rs @@ -1,64 +1,99 @@ -use crate::{backend::Backend, encode::Encode, error::Error, executor::Executor, params::{IntoQueryParameters, QueryParameters}, row::FromRow, types::HasSqlType, Row, Decode}; +use crate::arguments::Arguments; +use crate::arguments::IntoArguments; +use crate::database::Database; +use crate::encode::Encode; +use crate::executor::Executor; +use crate::row::FromRow; +use crate::types::HasSqlType; +use futures_core::future::BoxFuture; +use futures_core::stream::BoxStream; +use futures_util::TryFutureExt; +use futures_util::TryStreamExt; use std::marker::PhantomData; -use futures_core::{future::BoxFuture, stream::BoxStream}; -pub struct Query<'q, DB, P = ::QueryParameters, R = ::Row> +/// A SQL query with bind parameters and output type. +/// +/// Optionally type-safe if constructed through [query!]. +pub struct Query<'q, DB, T = ::Arguments, R = ::Row> where - DB: Backend, + DB: Database, { query: &'q str, - params: P, + arguments: T, record: PhantomData, - backend: PhantomData, + database: PhantomData, } impl<'q, DB, P: 'q, R: 'q> Query<'q, DB, P, R> where - DB: Backend, - DB::QueryParameters: 'q, - P: IntoQueryParameters + Send, - R: FromRow + Send + Unpin, + DB: Database, + DB::Arguments: 'q, + P: IntoArguments + Send, + R: FromRow + Send + Unpin, { - #[inline] - pub fn execute(self, executor: &'q mut E) -> BoxFuture<'q, crate::Result> + pub fn execute<'e, E>(self, executor: &'e mut E) -> BoxFuture<'e, crate::Result> where - E: Executor, + E: Executor, + 'q: 'e, { - executor.execute(self.query, self.params.into_params()) + executor.execute(self.query, self.arguments.into_arguments()) } - pub fn fetch(self, executor: &'q mut E) -> BoxStream<'q, crate::Result> + pub fn fetch<'e, E>(self, executor: &'e mut E) -> BoxStream<'e, crate::Result> where - E: Executor, + E: Executor, + DB::Row: 'e, + 'q: 'e, { - executor.fetch(self.query, self.params.into_params()) + Box::pin( + executor + .fetch(self.query, self.arguments.into_arguments()) + .map_ok(FromRow::from_row), + ) } - pub fn fetch_all(self, executor: &'q mut E) -> BoxFuture<'q, crate::Result>> + pub fn fetch_all<'e: 'q, E>(self, executor: &'e mut E) -> BoxFuture<'e, crate::Result>> where - E: Executor, + E: Executor, + DB::Row: 'e, + 'q: 'e, { - executor.fetch_all(self.query, self.params.into_params()) + Box::pin(self.fetch(executor).try_collect()) } - pub fn fetch_optional(self, executor: &'q mut E) -> BoxFuture<'q, crate::Result>> + pub fn fetch_optional<'e: 'q, E>( + self, + executor: &'e mut E, + ) -> BoxFuture<'e, crate::Result>> where - E: Executor, + E: Executor, + DB::Row: 'e, + 'q: 'e, { - executor.fetch_optional(self.query, self.params.into_params()) + Box::pin( + executor + .fetch_optional(self.query, self.arguments.into_arguments()) + .map_ok(|row| row.map(FromRow::from_row)), + ) } - pub fn fetch_one(self, executor: &'q mut E) -> BoxFuture<'q, crate::Result> + pub fn fetch_one<'e: 'q, E>(self, executor: &'e mut E) -> BoxFuture<'e, crate::Result> where - E: Executor, + E: Executor, + DB::Row: 'e, + 'q: 'e, { - executor.fetch_one(self.query, self.params.into_params()) + Box::pin( + executor + .fetch_one(self.query, self.arguments.into_arguments()) + .map_ok(FromRow::from_row), + ) } } impl<'q, DB> Query<'q, DB> where - DB: Backend, + DB: Database, { /// Bind a value for use with this SQL query. /// @@ -72,62 +107,30 @@ where DB: HasSqlType, T: Encode, { - self.params.bind(value); + self.arguments.add(value); self } - - /// Bind all query parameters at once. - /// - /// If any parameters were previously bound with `.bind()` they are discarded. - /// - /// # Logic Safety - /// - /// This function should be used with care, as SQLx cannot validate - /// that the value is of the right type nor can it validate that you have - /// passed the correct number of parameters. - pub fn bind_all(self, values: I) -> Query<'q, DB, I> where I: IntoQueryParameters { - Query { - query: self.query, - params: values, - record: PhantomData, - backend: PhantomData - } - } -} -//noinspection RsSelfConvention -impl<'q, DB, I, R> Query<'q, DB, I, R> where DB: Backend { - - /// Change the expected output type of the query to a single scalar value. - pub fn as_scalar(self) -> Query<'q, DB, I, R_> where R_: Decode { - Query { - query: self.query, - params: self.params, - record: PhantomData, - backend: PhantomData, - } - } - - /// Change the expected output of the query to a new type implementing `FromRow`. - pub fn as_record(self) -> Query<'q, DB, I, R_> where R_: FromRow { - Query { - query: self.query, - params: self.params, - record: PhantomData, - backend: PhantomData, - } - } } -/// Construct a full SQL query using raw SQL. -#[inline] -pub fn query(query: &str) -> Query<'_, DB> +/// Construct a full SQL query that can be chained to bind parameters and executed. +/// +/// # Examples +/// +/// ```ignore +/// let names: Vec = sqlx::query("SELECT name FROM users WHERE active = ?") +/// .bind(false) // [active = ?] +/// .fetch(&mut connection) // -> Stream +/// .map_ok(|row| row.name("name")) // -> Stream +/// .try_collect().await?; // -> Vec +/// ``` +pub fn query<'q, DB>(sql: &'q str) -> Query<'q, DB> where - DB: Backend, + DB: Database, { Query { - query, - params: Default::default(), + database: PhantomData, record: PhantomData, - backend: PhantomData, + arguments: Default::default(), + query: sql.as_ref(), } } diff --git a/sqlx-core/src/row.rs b/sqlx-core/src/row.rs index f3fc5765..adaefeb1 100644 --- a/sqlx-core/src/row.rs +++ b/sqlx-core/src/row.rs @@ -1,131 +1,55 @@ -use crate::{backend::Backend, decode::Decode, types::HasSqlType}; +//! Contains the Row and FromRow traits. -pub trait Row: Send { - type Backend: Backend; +use crate::database::Database; +use crate::decode::Decode; +use crate::types::HasSqlType; +pub trait RowIndex +where + R: Row, +{ + fn try_get(&self, row: &R) -> crate::Result + where + R::Database: HasSqlType, + T: Decode; +} + +/// Represents a single row of the result set. +pub trait Row: Unpin + Send + 'static { + type Database: Database + ?Sized; + + /// Returns `true` if the row contains no values. + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of values in the row. fn len(&self) -> usize; - fn get_raw(&self, index: usize) -> Option<&[u8]>; - - fn get(&self, index: usize) -> T + /// Returns the value at the `index`; can either be an integer ordinal or a column name. + fn get(&self, index: I) -> T where - Self::Backend: HasSqlType, - T: Decode, - { - T::decode(self.get_raw(index)) - } + Self::Database: HasSqlType, + I: RowIndex, + T: Decode; } -pub trait FromRow { - fn from_row(row: ::Row) -> Self; +/// A **record** that can be built from a row returned from by the database. +pub trait FromRow +where + R: Row, +{ + fn from_row(row: R) -> Self; } -#[allow(unused)] -macro_rules! impl_from_row { - ($B:ident: $( ($idx:tt) -> $T:ident );+;) => { - // Row -> (T1, T2, ...) - impl<$($T,)+> crate::row::FromRow<$B> for ($($T,)+) - where - $($B: crate::types::HasSqlType<$T>,)+ - $($T: crate::decode::Decode<$B>,)+ - { +#[allow(unused_macros)] +macro_rules! impl_from_row_for_row { + ($R:ty) => { + impl crate::row::FromRow<$R> for $R { #[inline] - fn from_row(row: <$B as crate::backend::Backend>::Row) -> Self { - use crate::row::Row; - - ($(row.get($idx),)+) + fn from_row(row: $R) -> Self { + row } } }; } - -/// Scalar conversions for rows -impl FromRow for T where DB: Backend + HasSqlType, T: Decode { - fn from_row(row: ::Row) -> Self { - row.get(0) - } -} - -#[allow(unused)] -macro_rules! impl_from_row_for_backend { - ($B:ident, $row:ident) => { - impl crate::row::FromRow<$B> for $row where $B: crate::Backend { - #[inline] - fn from_row(row: <$B as crate::backend::Backend>::Row) -> Self { - row - } - } - - impl_from_row!($B: - (0) -> T1; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - (7) -> T8; - ); - - impl_from_row!($B: - (0) -> T1; - (1) -> T2; - (2) -> T3; - (3) -> T4; - (4) -> T5; - (5) -> T6; - (6) -> T7; - (7) -> T8; - (8) -> T9; - ); - } -} diff --git a/sqlx-core/src/types.rs b/sqlx-core/src/types.rs index 8fcceb8a..d5d28f81 100644 --- a/sqlx-core/src/types.rs +++ b/sqlx-core/src/types.rs @@ -1,53 +1,47 @@ +//! Traits linking Rust types to SQL types. + +use std::fmt::Display; + #[cfg(feature = "uuid")] pub use uuid::Uuid; #[cfg(feature = "chrono")] pub mod chrono { - pub use chrono::{NaiveDate, NaiveTime, NaiveDateTime, DateTime, Utc}; + pub use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; } -use std::fmt::Display; - -/// Information about how a backend stores metadata about -/// given SQL types. +/// Information about how a database stores metadata about given SQL types. pub trait HasTypeMetadata { /// The actual type used to represent metadata. - type TypeMetadata: TypeMetadata; + type TypeMetadata: PartialEq; - /// The Rust type of type identifiers in `DESCRIBE` responses for the SQL backend. - type TypeId: Eq + Display; + /// The Rust type of table identifiers. + type TableId: Display; + + /// The Rust type of type identifiers. + type TypeId: Display; } -pub trait TypeMetadata { - /// Return `true` if the given type ID is contained in this metadata. - /// - /// What this means depends on the backend: - /// - /// * For Postgres, this should return true if the type ID or array type ID matches. - /// * For MySQL (and likely all other backends) this should just compare the type IDs. - fn type_id_eq(&self, other: &TypeId) -> bool; -} - -/// Indicates that a SQL type exists for a backend and defines -/// useful metadata for the backend. -pub trait HasSqlType: HasTypeMetadata { +/// Indicates that a SQL type is supported for a database. +pub trait HasSqlType: HasTypeMetadata { + /// Fetch the metadata for the given type. fn metadata() -> Self::TypeMetadata; } -impl HasSqlType<&'_ A> for DB +impl HasSqlType<&'_ T> for DB where - DB: HasSqlType, + DB: HasSqlType, { fn metadata() -> Self::TypeMetadata { - >::metadata() + >::metadata() } } -impl HasSqlType> for DB +impl HasSqlType> for DB where - DB: HasSqlType, + DB: HasSqlType, { fn metadata() -> Self::TypeMetadata { - >::metadata() + >::metadata() } } diff --git a/sqlx-core/src/url.rs b/sqlx-core/src/url.rs index 348ffaff..d3d310c9 100644 --- a/sqlx-core/src/url.rs +++ b/sqlx-core/src/url.rs @@ -1,38 +1,67 @@ -use std::net::{IpAddr, SocketAddr}; +use std::convert::{TryFrom, TryInto}; pub struct Url(url::Url); -impl Url { - pub fn parse(url: &str) -> crate::Result { - // TODO: Handle parse errors - Ok(Url(url::Url::parse(url).unwrap())) - } +impl TryFrom for Url { + type Error = crate::Error; + fn try_from(value: String) -> Result { + (&value).try_into() + } +} + +impl<'s> TryFrom<&'s str> for Url { + type Error = crate::Error; + + fn try_from(value: &'s str) -> Result { + Ok(Url(value.parse()?)) + } +} + +impl<'s> TryFrom<&'s String> for Url { + type Error = crate::Error; + + fn try_from(value: &'s String) -> Result { + (value.as_str()).try_into() + } +} + +impl Url { pub fn host(&self) -> &str { - self.0.host_str().unwrap_or("localhost") + let host = self.0.host_str(); + + match host { + Some(host) if !host.is_empty() => host, + + _ => "localhost", + } } pub fn port(&self, default: u16) -> u16 { self.0.port().unwrap_or(default) } - pub fn resolve(&self, default_port: u16) -> SocketAddr { - // TODO: DNS - let host: IpAddr = self.host().parse().unwrap(); - let port = self.port(default_port); + pub fn username(&self) -> Option<&str> { + let username = self.0.username(); - (host, port).into() - } - - pub fn username(&self) -> &str { - self.0.username() + if username.is_empty() { + None + } else { + Some(username) + } } pub fn password(&self) -> Option<&str> { self.0.password() } - pub fn database(&self) -> &str { - self.0.path().trim_start_matches('/') + pub fn database(&self) -> Option<&str> { + let database = self.0.path().trim_start_matches('/'); + + if database.is_empty() { + None + } else { + Some(database) + } } } diff --git a/sqlx-macros/Cargo.toml b/sqlx-macros/Cargo.toml index 29a54190..3a21d229 100644 --- a/sqlx-macros/Cargo.toml +++ b/sqlx-macros/Cargo.toml @@ -1,25 +1,33 @@ [package] name = "sqlx-macros" version = "0.1.0-pre" -authors = ["Austin Bonander "] edition = "2018" +authors = [ + "Ryan Leckey ", + "Austin Bonander " +] [lib] proc-macro = true -[dependencies] -async-std = "1.2.0" -dotenv = "0.15.0" -futures = "0.3.1" -proc-macro-hack = "0.5.11" -proc-macro2 = "1.0.6" -sqlx = { version = "0.1.0-pre", path = "../sqlx-core", package = "sqlx-core" } -syn = { version = "1.0.11", default-features = false } -quote = "1.0.2" -url = "2.1.0" - [features] -chrono = ["sqlx/chrono"] -mysql = ["sqlx/mysql"] -postgres = ["sqlx/postgres"] -uuid = ["sqlx/uuid"] +default = [] + +# database +mysql = [ "sqlx/mysql" ] +postgres = [ "sqlx/postgres" ] + +# type +chrono = [ "sqlx/chrono" ] +uuid = [ "sqlx/uuid" ] + +[dependencies] +async-std = { version = "1.2.0", default-features = false } +dotenv = { version = "0.15.0", default-features = false } +futures = { version = "0.3.1", default-features = false } +proc-macro-hack = { version = "0.5.11", default-features = false } +proc-macro2 = { version = "1.0.6", default-features = false } +sqlx = { version = "0.1.0-pre", default-features = false, path = "../sqlx-core", package = "sqlx-core" } +syn = { version = "1.0.11", default-features = false, features = [ "full" ] } +quote = { version = "1.0.2", default-features = false } +url = { version = "2.1.0", default-features = false } diff --git a/sqlx-macros/src/backend/mod.rs b/sqlx-macros/src/database/mod.rs similarity index 55% rename from sqlx-macros/src/backend/mod.rs rename to sqlx-macros/src/database/mod.rs index 2519600c..91941fa2 100644 --- a/sqlx-macros/src/backend/mod.rs +++ b/sqlx-macros/src/database/mod.rs @@ -1,18 +1,18 @@ -use sqlx::Backend; +use sqlx::Database; #[derive(PartialEq, Eq)] pub enum ParamChecking { Strong, - Weak + Weak, } -pub trait BackendExt: Backend { - const BACKEND_PATH: &'static str; +pub trait DatabaseExt: Database { + const DATABASE_PATH: &'static str; const PARAM_CHECKING: ParamChecking; fn quotable_path() -> syn::Path { - syn::parse_str(Self::BACKEND_PATH).unwrap() + syn::parse_str(Self::DATABASE_PATH).unwrap() } fn param_type_for_id(id: &Self::TypeId) -> Option<&'static str>; @@ -20,32 +20,28 @@ pub trait BackendExt: Backend { fn return_type_for_id(id: &Self::TypeId) -> Option<&'static str>; } -macro_rules! impl_backend_ext { - ($backend:path { $($(#[$meta:meta])? $ty:ty $(| $input:ty)?),*$(,)? }, ParamChecking::$param_checking:ident) => { - impl $crate::backend::BackendExt for $backend { - const BACKEND_PATH: &'static str = stringify!($backend); - const PARAM_CHECKING: $crate::backend::ParamChecking = $crate::backend::ParamChecking::$param_checking; +macro_rules! impl_database_ext { + ($database:path { $($(#[$meta:meta])? $ty:ty $(| $input:ty)?),*$(,)? }, ParamChecking::$param_checking:ident) => { + impl $crate::database::DatabaseExt for $database { + const DATABASE_PATH: &'static str = stringify!($database); + const PARAM_CHECKING: $crate::database::ParamChecking = $crate::database::ParamChecking::$param_checking; fn param_type_for_id(id: &Self::TypeId) -> Option<&'static str> { - use sqlx::types::TypeMetadata; - match () { $( // `if` statements cannot have attributes but these can $(#[$meta])? - _ if <$backend as sqlx::types::HasSqlType<$ty>>::metadata().type_id_eq(id) => Some(input_ty!($ty $(, $input)?)), + _ if <$database as sqlx::types::HasSqlType<$ty>>::metadata().eq(id) => Some(input_ty!($ty $(, $input)?)), )* _ => None } } fn return_type_for_id(id: &Self::TypeId) -> Option<&'static str> { - use sqlx::types::TypeMetadata; - match () { $( $(#[$meta])? - _ if <$backend as sqlx::types::HasSqlType<$ty>>::metadata().type_id_eq(id) => return Some(stringify!($ty)), + _ if <$database as sqlx::types::HasSqlType<$ty>>::metadata().eq(id) => return Some(stringify!($ty)), )* _ => None } diff --git a/sqlx-macros/src/backend/mysql.rs b/sqlx-macros/src/database/mysql.rs similarity index 87% rename from sqlx-macros/src/backend/mysql.rs rename to sqlx-macros/src/database/mysql.rs index 6e00f4dc..0f653d9f 100644 --- a/sqlx-macros/src/backend/mysql.rs +++ b/sqlx-macros/src/database/mysql.rs @@ -1,4 +1,4 @@ -impl_backend_ext! { +impl_database_ext! { sqlx::MySql { bool, String | &str, diff --git a/sqlx-macros/src/backend/postgres.rs b/sqlx-macros/src/database/postgres.rs similarity index 96% rename from sqlx-macros/src/backend/postgres.rs rename to sqlx-macros/src/database/postgres.rs index ad178211..106f14f4 100644 --- a/sqlx-macros/src/backend/postgres.rs +++ b/sqlx-macros/src/database/postgres.rs @@ -1,4 +1,4 @@ -impl_backend_ext! { +impl_database_ext! { sqlx::Postgres { bool, String | &str, diff --git a/sqlx-macros/src/lib.rs b/sqlx-macros/src/lib.rs index 07899c09..a8657855 100644 --- a/sqlx-macros/src/lib.rs +++ b/sqlx-macros/src/lib.rs @@ -1,16 +1,16 @@ -#![cfg_attr(not(any(feature = "postgres", feature = "mysql")), allow(dead_code, unused_macros, unused_imports))] +#![cfg_attr( + not(any(feature = "postgres", feature = "mysql")), + allow(dead_code, unused_macros, unused_imports) +)] extern crate proc_macro; use proc_macro::TokenStream; use proc_macro_hack::proc_macro_hack; -use quote::{quote}; +use quote::quote; -use syn::{ - parse, - parse_macro_input, -}; +use syn::{parse, parse_macro_input}; use async_std::task; @@ -19,19 +19,21 @@ use url::Url; type Error = Box; type Result = std::result::Result; -mod backend; +mod database; mod query; macro_rules! with_database( ($db:ident => $expr:expr) => { async { + use sqlx::Connection; + let db_url = Url::parse(&dotenv::var("DATABASE_URL").map_err(|_| "DATABASE_URL not set")?)?; match db_url.scheme() { #[cfg(feature = "postgres")] "postgresql" | "postgres" => { - let $db = sqlx::Postgres::connect(db_url.as_str()) + let $db = sqlx::postgres::PgConnection::open(db_url.as_str()) .await .map_err(|e| format!("failed to connect to database: {}", e))?; @@ -45,7 +47,7 @@ macro_rules! with_database( ).into()), #[cfg(feature = "mysql")] "mysql" | "mariadb" => { - let $db = sqlx::MySql::connect(db_url.as_str()) + let $db = sqlx::mysql::MySqlConnection::open(db_url.as_str()) .await .map_err(|e| format!("failed to connect to database: {}", e))?; @@ -65,6 +67,7 @@ macro_rules! with_database( #[proc_macro_hack] pub fn query(input: TokenStream) -> TokenStream { + #[allow(unused_variables)] let input = parse_macro_input!(input as query::MacroInput); match task::block_on(with_database!(db => query::process_sql(input, db))) { diff --git a/sqlx-macros/src/query.rs b/sqlx-macros/src/query.rs index 6135a8a9..2a3650b5 100644 --- a/sqlx-macros/src/query.rs +++ b/sqlx-macros/src/query.rs @@ -3,16 +3,16 @@ use std::fmt::Display; use proc_macro2::Span; use proc_macro2::TokenStream; use syn::{ - Expr, - ExprLit, - Ident, - Lit, parse::{self, Parse, ParseStream}, punctuated::Punctuated, spanned::Spanned, Token, + parse::{self, Parse, ParseStream}, + punctuated::Punctuated, + spanned::Spanned, + Expr, ExprLit, Ident, Lit, Token, }; use quote::{format_ident, quote, quote_spanned, ToTokens}; -use sqlx::{Connection, HasTypeMetadata}; +use sqlx::{describe::Describe, types::HasTypeMetadata, Connection}; -use crate::backend::{BackendExt, ParamChecking}; +use crate::database::{DatabaseExt, ParamChecking}; pub struct MacroInput { sql: String, @@ -26,8 +26,8 @@ impl Parse for MacroInput { let sql = match args.next() { Some(Expr::Lit(ExprLit { - lit: Lit::Str(sql), .. - })) => sql, + lit: Lit::Str(sql), .. + })) => sql, Some(other_expr) => { return Err(parse::Error::new_spanned( other_expr, @@ -50,9 +50,9 @@ pub async fn process_sql( input: MacroInput, mut conn: C, ) -> crate::Result - where - C::Backend: BackendExt, - ::TypeId: Display +where + C::Database: DatabaseExt + Sized, + ::TypeId: Display, { let describe = conn .describe(&input.sql) @@ -68,7 +68,7 @@ pub async fn process_sql( input.args.len() ), ) - .into()); + .into()); } let param_types = describe @@ -79,7 +79,7 @@ pub async fn process_sql( get_type_override(expr) .or_else(|| { Some( - ::param_type_for_id(type_)? + ::param_type_for_id(type_)? .parse::() .unwrap(), ) @@ -95,19 +95,24 @@ pub async fn process_sql( }); let query = &input.sql; - let backend_path = C::Backend::quotable_path(); + let database_path = C::Database::quotable_path(); // record_type will be wrapped in parens which the compiler ignores without a trailing comma // e.g. (Foo) == Foo but (Foo,) = one-element tuple // and giving an empty stream for record_type makes it unit `()` - let (record_type, record) = if describe.result_fields.is_empty() { + let (record_type, record) = if describe.result_columns.is_empty() { (TokenStream::new(), TokenStream::new()) } else { let record_type = Ident::new("Record", Span::call_site()); - (record_type.to_token_stream(), generate_record_def(&describe, &record_type)?) + ( + record_type.to_token_stream(), + generate_record_def(&describe, &record_type)?, + ) }; - let params = if ::PARAM_CHECKING == ParamChecking::Weak || input.args.is_empty() { + let params = if ::PARAM_CHECKING == ParamChecking::Weak + || input.args.is_empty() + { quote! { let params = (); } @@ -130,22 +135,31 @@ pub async fn process_sql( #params - sqlx::query::<#backend_path>(#query) + sqlx::query::<#database_path>(#query) .bind_all(params) .as_record::<#record_type>() }}) } -fn generate_record_def(describe: &sqlx::Describe, type_name: &Ident) -> crate::Result { - let fields = describe.result_fields.iter().enumerate() +fn generate_record_def( + describe: &Describe, + type_name: &Ident, +) -> crate::Result { + let fields = describe + .result_columns + .iter() + .enumerate() .map(|(i, column)| { - let name = column.name.as_deref() + let name = column + .name + .as_ref() + .map(|col| &**col) .ok_or_else(|| format!("column at position {} must have a name", i))?; let name = syn::parse_str::(name) .map_err(|_| format!("{:?} is not a valid Rust identifier", name))?; - let type_ = ::return_type_for_id(&column.type_id) + let type_ = ::return_type_for_id(&column.type_id) .ok_or_else(|| format!("unknown field type ID: {}", &column.type_id))? .parse::() .unwrap(); @@ -153,20 +167,27 @@ fn generate_record_def(describe: &sqlx::Describe, type_name: Ok((name, type_)) }) .collect::, String>>() - .map_err(|e| format!("all SQL result columns must be named with valid Rust identifiers: {}", e))?; + .map_err(|e| { + format!( + "all SQL result columns must be named with valid Rust identifiers: {}", + e + ) + })?; let row_param = format_ident!("row"); - let record_fields = fields.iter() + let record_fields = fields + .iter() .map(|(name, type_)| quote!(#name: #type_,)) .collect::(); - let instantiations = fields.iter() + let instantiations = fields + .iter() .enumerate() .map(|(i, (name, _))| quote!(#name: #row_param.get(#i),)) .collect::(); - let backend = DB::quotable_path(); + let database = DB::quotable_path(); Ok(quote! { #[derive(Debug)] @@ -174,8 +195,8 @@ fn generate_record_def(describe: &sqlx::Describe, type_name: #record_fields } - impl sqlx::FromRow<#backend> for #type_name { - fn from_row(#row_param: <#backend as sqlx::Backend>::Row) -> Self { + impl sqlx::FromRow<#database> for #type_name { + fn from_row(#row_param: <#database as sqlx::Database>::Row) -> Self { use sqlx::Row as _; #type_name { diff --git a/src/lib.rs b/src/lib.rs index 8f74e81c..1c5c8983 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,21 @@ -#[doc(inline)] -pub use sqlx_core::*; +// Modules +pub use sqlx_core::{arguments, decode, describe, encode, error, pool, row, types}; + +// Types +pub use sqlx_core::{Connection, Database, Error, Executor, FromRow, Pool, Query, Result, Row}; + +// Functions +pub use sqlx_core::query; + +#[cfg(feature = "mysql")] +pub use sqlx_core::mysql::{self, MySql}; + +#[cfg(feature = "postgres")] +pub use sqlx_core::postgres::{self, Postgres}; + +#[cfg(feature = "macros")] +#[doc(hidden)] +pub use sqlx_core::{TyCons, TyConsExt}; #[cfg(feature = "macros")] #[proc_macro_hack::proc_macro_hack(fake_call_site)] diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..76535275 --- /dev/null +++ b/test.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -e + +# Core +cargo test -p sqlx-core --all-features + +# Postgres +env DATABASE_URL="postgres://" cargo test -p sqlx --no-default-features --features 'postgres uuid chrono' + +# MySQL +env DATABASE_URL="mysql:///sqlx" cargo test -p sqlx --no-default-features --features 'mysql' diff --git a/tests/mysql-types.rs b/tests/mysql-types.rs index cb06f6f3..f9ae3c03 100644 --- a/tests/mysql-types.rs +++ b/tests/mysql-types.rs @@ -1,23 +1,25 @@ -use sqlx::{MySql, Row}; +use sqlx::{mysql::MySqlConnection, Connection as _, Row}; macro_rules! test { ($name:ident: $ty:ty: $($text:literal == $value:expr),+) => { #[async_std::test] async fn $name () -> sqlx::Result<()> { let mut conn = - MySql::connect( + MySqlConnection::open( &dotenv::var("DATABASE_URL").expect("DATABASE_URL must be set") ).await?; $( - let row = sqlx::query(&format!("SELECT {} = ?, ?", $text)) + let row = sqlx::query(&format!("SELECT {} = ?, ? as _1", $text)) .bind($value) .bind($value) .fetch_one(&mut conn) .await?; - assert_eq!(row.get::(0), 1); - let value = row.get::<$ty>(1); + assert_eq!(row.get::(0), 1); + + let value = row.get::<$ty, _>("_1"); + assert!($value == value); )+ diff --git a/tests/mysql.rs b/tests/mysql.rs new file mode 100644 index 00000000..a82865c5 --- /dev/null +++ b/tests/mysql.rs @@ -0,0 +1,50 @@ +use futures::TryStreamExt; +use sqlx::{mysql::MySqlConnection, Connection as _, Executor as _, Row as _}; + +#[async_std::test] +async fn it_connects() -> anyhow::Result<()> { + let mut conn = connect().await?; + + let row = sqlx::query("select 1 + 1").fetch_one(&mut conn).await?; + + assert_eq!(2, row.get(0)); + + conn.close().await?; + + Ok(()) +} + +#[async_std::test] +async fn it_executes() -> anyhow::Result<()> { + let mut conn = connect().await?; + + let _ = conn + .send( + r#" +CREATE TEMPORARY TABLE users (id INTEGER PRIMARY KEY) + "#, + ) + .await?; + + for index in 1..=10_i32 { + let cnt = sqlx::query("INSERT INTO users (id) VALUES (?)") + .bind(index) + .execute(&mut conn) + .await?; + + assert_eq!(cnt, 1); + } + + let sum: i32 = sqlx::query("SELECT id FROM users") + .fetch(&mut conn) + .try_fold(0_i32, |acc, x| async move { Ok(acc + x.get::("id")) }) + .await?; + + assert_eq!(sum, 55); + + Ok(()) +} + +async fn connect() -> anyhow::Result { + Ok(MySqlConnection::open(dotenv::var("DATABASE_URL")?).await?) +} diff --git a/tests/postgres-types.rs b/tests/postgres-types.rs index 6793ea3f..2f198a68 100644 --- a/tests/postgres-types.rs +++ b/tests/postgres-types.rs @@ -1,23 +1,23 @@ -use sqlx::{Postgres, Row}; +use sqlx::{postgres::PgConnection, Connection as _, Row}; + +async fn connect() -> anyhow::Result { + Ok(PgConnection::open(dotenv::var("DATABASE_URL")?).await?) +} macro_rules! test { ($name:ident: $ty:ty: $($text:literal == $value:expr),+) => { #[async_std::test] - async fn $name () -> Result<(), String> { - let mut conn = - Postgres::connect( - &dotenv::var("DATABASE_URL").expect("DATABASE_URL must be set") - ).await.map_err(|e| format!("failed to connect to Postgres: {}", e))?; + async fn $name () -> anyhow::Result<()> { + let mut conn = connect().await?; $( - let row = sqlx::query(&format!("SELECT {} = $1, $1", $text)) + let row = sqlx::query(&format!("SELECT {} = $1, $1 as _1", $text)) .bind($value) .fetch_one(&mut conn) - .await - .map_err(|e| format!("failed to run query: {}", e))?; + .await?; - assert!(row.get::(0)); - assert!($value == row.get::<$ty>(1)); + assert!(row.get::(0)); + assert!($value == row.get::<$ty, _>("_1")); )+ Ok(()) diff --git a/tests/postgres.rs b/tests/postgres.rs new file mode 100644 index 00000000..56cad8e9 --- /dev/null +++ b/tests/postgres.rs @@ -0,0 +1,74 @@ +use futures::TryStreamExt; +use sqlx::{postgres::PgConnection, Connection as _, Executor as _, Row as _}; + +#[async_std::test] +async fn it_connects() -> anyhow::Result<()> { + let mut conn = connect().await?; + + let row = sqlx::query("select 1 + 1").fetch_one(&mut conn).await?; + + assert_eq!(2, row.get(0)); + + conn.close().await?; + + Ok(()) +} + +#[async_std::test] +async fn it_connects_to_database_user() -> anyhow::Result<()> { + let mut conn = connect().await?; + + let row = sqlx::query("select current_database()") + .fetch_one(&mut conn) + .await?; + + let current_db: String = row.get(0); + + let row = sqlx::query("select current_user") + .fetch_one(&mut conn) + .await?; + + let current_user: String = row.get(0); + + assert_eq!(current_db, "postgres"); + assert_eq!(current_user, "postgres"); + + conn.close().await?; + + Ok(()) +} + +#[async_std::test] +async fn it_executes() -> anyhow::Result<()> { + let mut conn = connect().await?; + + let _ = conn + .send( + r#" +CREATE TEMPORARY TABLE users (id INTEGER PRIMARY KEY); + "#, + ) + .await?; + + for index in 1..=10_i32 { + let cnt = sqlx::query("INSERT INTO users (id) VALUES ($1)") + .bind(index) + .execute(&mut conn) + .await?; + + assert_eq!(cnt, 1); + } + + let sum: i32 = sqlx::query("SELECT id FROM users") + .fetch(&mut conn) + .try_fold(0_i32, |acc, x| async move { Ok(acc + x.get::("id")) }) + .await?; + + assert_eq!(sum, 55); + + Ok(()) +} + +async fn connect() -> anyhow::Result { + Ok(PgConnection::open(dotenv::var("DATABASE_URL")?).await?) +}