Audit MySql and Postgres protocols

This commit is contained in:
Ryan Leckey 2019-12-27 17:31:01 -08:00
parent 5c7661985e
commit d76b1357da
176 changed files with 4629 additions and 6407 deletions

3
.gitignore vendored
View File

@ -12,6 +12,3 @@ Cargo.lock
# Environment
.env
# rustfmt backup files
**/*.rs.bk

View File

@ -3,13 +3,16 @@ members = [
".",
"sqlx-core",
"sqlx-macros",
"examples/realworld"
"examples/realworld-postgres"
]
[package]
name = "sqlx"
version = "0.1.1-pre"
license = "MIT OR Apache-2.0"
readme = "README.md"
repository = "https://github.com/launchbadge/sqlx"
documentation = "https://docs.rs/sqlx"
description = "The Rust SQL Toolkit."
edition = "2018"
authors = [
@ -19,11 +22,14 @@ authors = [
[features]
default = [ "macros" ]
unstable = [ "sqlx-core/unstable" ]
macros = [ "sqlx-macros", "proc-macro-hack" ]
# database
postgres = [ "sqlx-core/postgres", "sqlx-macros/postgres" ]
mysql = [ "sqlx-core/mysql", "sqlx-macros/mysql" ]
macros = [ "sqlx-macros", "proc-macro-hack" ]
chrono = ["sqlx-core/chrono", "sqlx-macros/chrono"]
# types
chrono = [ "sqlx-core/chrono", "sqlx-macros/chrono" ]
uuid = [ "sqlx-core/uuid", "sqlx-macros/uuid" ]
[dependencies]
@ -32,15 +38,23 @@ sqlx-macros = { version = "0.1.0-pre", path = "sqlx-macros", optional = true }
proc-macro-hack = { version = "0.5.11", optional = true }
[dev-dependencies]
anyhow = "1.0.25"
futures = "0.3.1"
async-std = { version = "1.2.0", features = [ "attributes" ] }
dotenv = "0.15.0"
matches = "0.1.8"
criterion = "0.3.0"
[[test]]
name = "macros"
required-features = [ "postgres", "uuid", "macros" ]
[[test]]
name = "mysql"
required-features = [ "mysql" ]
[[test]]
name = "postgres"
required-features = [ "postgres" ]
[[test]]
name = "postgres-types"
required-features = [ "postgres" ]
@ -48,8 +62,3 @@ required-features = [ "postgres" ]
[[test]]
name = "mysql-types"
required-features = [ "mysql" ]
[[bench]]
name = "postgres-protocol"
required-features = [ "postgres", "unstable" ]
harness = false

201
LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 LaunchBadge, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2019 LaunchBadge, LLC
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,16 +1,47 @@
# SQLx
<h1 align="center">SQLx</h1>
<div align="center">
<strong>
🧰 The Rust SQL Toolkit
</strong>
</div>
The Rust SQL Toolkit.
* **Asynchronous**. Handle thousands of database connections from a single thread.
* **Fast**. _TO BE WRITTEN_
* **Native**. SQLx is a pure Rust<sub></sub> toolkit for SQL. Where possible, drivers are written from scratch, in Rust, utilizing the modern ecosystem for asynchronous network services development.
* **Agnostic**. SQLx is agnostic over the database engine and can operate against a variety of database backends with the backend chosen **at compile-time** through generic constraints **or at runtime** with a slight performance loss (due to dynamic dispatch).
<br />
<sub><sup>† The SQLite driver (which does not yet exist) will use the libsqlite3 C library as SQLite is an embedded database (the only way we could be pure Rust for SQLite is by porting _all_ of SQLite to Rust).</sup></sub>
<div align="center">
<!-- Crates version -->
<a href="https://crates.io/crates/sqlx">
<img src="https://img.shields.io/crates/v/sqlx.svg?style=flat-square"
alt="Crates.io version" />
</a>
<!-- Downloads -->
<a href="https://crates.io/crates/sqlx">
<img src="https://img.shields.io/crates/d/sqlx.svg?style=flat-square"
alt="Download" />
</a>
<!-- docs.rs docs -->
<a href="https://docs.rs/sqlx">
<img src="https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square"
alt="docs.rs docs" />
</a>
</div>
<div align="center">
<sub>Built with ❤️ by <a href="https://github.com/http-rs">The LaunchBadge team</a>
</div>
<br />
SQLx is a modern SQL client built from the ground up for Rust, in Rust.
* **Asynchronous**.
* **Native**. SQLx is a pure Rust toolkit for SQL. Where possible, drivers are written from scratch, in Rust, utilizing the modern ecosystem for asynchronous network services development.
* **Type-safe**. SQLx is built upon the novel idea of preparing SQL statements before or duing compilation to provide strong type safety while not getting in your way with a custom DSL.
## Safety
This crate uses `#[deny(unsafe_code)]` to ensure everything is implemented in 100% Safe Rust.
## License

View File

@ -1,37 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use sqlx::postgres::protocol::{Bind, DataRow, Decode, Encode, RowDescription};
fn bench(c: &mut Criterion) {
c.bench_function("decode_data_row", |b| {
b.iter(|| {
let _ = DataRow::decode(&black_box(b"\0\x03\0\0\0\x011\0\0\0\x012\0\0\0\x013")[..]);
});
});
c.bench_function( "decode_row_description",|b| {
b.iter(|| {
let _ = RowDescription::decode(&black_box(b"\0\x02user_id\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0number_of_pages\0\0\0\0\0\0\0\0\0\x05\0\0\0\0\0\0\0\0\0")[..]);
});
});
c.bench_function("encode_bind", |b| {
let mut buf = Vec::new();
b.iter(|| {
black_box(Bind {
portal: "__sqlx_portal_5121",
statement: "__sqlx_statement_5121",
formats: &[1],
values_len: 2,
values: &[(-1_i8) as _, 0, 0, 0, 1, 0, 0, 0, 25],
result_formats: &[1],
})
.encode(&mut buf);
buf.clear();
});
});
}
criterion_group!(benches, bench);
criterion_main!(benches);

View File

@ -7,8 +7,8 @@ workspace = "../.."
[dependencies]
anyhow = "1.0.25"
dotenv = "0.15.0"
async-std = "1.2.0"
async-std = { version = "1.2.0", features = [ "attributes" ] }
tide = "0.4.0"
sqlx = { path = "../..", features = [ "postgres" ] }
serde = { version = "1.0.103", features = [ "derive"] }
serde = { version = "1.0.103", features = [ "derive" ] }
futures = "0.3.1"

View File

@ -3,5 +3,5 @@
# Get current directory (of this script)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# Run SQL files in schema/ directory
psql -d "$DATABASE_URL" -f $DIR/schema/*.sql
# Run schema file
psql -d "$DATABASE_URL" -f schema.sql

View File

@ -52,4 +52,4 @@ async fn register(mut req: Request<Pool<Postgres>>) -> Response {
Response::new(200)
.body_json(&RegisterResponseBody { id: user_id })
.unwrap()
}
}

View File

@ -1,2 +0,0 @@
unstable_features = true
merge_imports = true

View File

@ -2,7 +2,6 @@
name = "sqlx-core"
version = "0.1.0-pre"
license = "MIT OR Apache-2.0"
description = "The Rust SQL Toolkit."
edition = "2018"
authors = [
"Ryan Leckey <leckey.ryan@gmail.com>",
@ -16,21 +15,18 @@ postgres = []
mysql = []
[dependencies]
async-stream = { version = "0.2.0", default-features = false }
async-std = { version = "1.2.0", default-features = false, features = [ "unstable" ] }
async-stream = "0.2.0"
bitflags = "1.2.1"
byteorder = { version = "1.3.2", default-features = false }
chrono = { version = "0.4", optional = true }
futures-channel = "0.3.1"
futures-core = "0.3.1"
futures-util = "0.3.1"
log = "0.4.8"
md-5 = "0.8.0"
memchr = "2.2.1"
url = "2.1.0"
uuid = { version = "0.8.1", optional = true }
bitflags = { version = "1.2.1", default-features = false }
futures-core = { version = "0.3.1", default-features = false }
futures-util = { version = "0.3.1", default-features = false }
log = { version = "0.4", default-features = false }
url = { version = "2.1.0", default-features = false }
byteorder = { version ="1.3.2", default-features = false }
memchr = { version = "2.2.1", default-features = false }
md-5 = { version = "0.8.0", default-features = false }
uuid = { version = "0.8.1", default-features = false, optional = true }
chrono = { version = "0.4.10", default-features = false, features = [ "clock" ], optional = true }
[dev-dependencies]
matches = "0.1.8"
bytes = "0.5.2"
async-std = { version = "1.2.0", default-features = false, features = [ "attributes" ] }

160
sqlx-core/src/arguments.rs Normal file
View File

@ -0,0 +1,160 @@
//! Traits for passing arguments to SQL queries.
use crate::database::Database;
use crate::encode::Encode;
use crate::types::HasSqlType;
/// A tuple of arguments to be sent to the database.
pub trait Arguments: Send + Sized + Default + 'static {
type Database: Database + ?Sized;
/// Returns `true` if there are no values.
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the number of values.
fn len(&self) -> usize;
/// Returns the size of the arguments, in bytes.
fn size(&self) -> usize;
/// Reserves the capacity for at least `len` more values (of `size` bytes) to
/// be added to the arguments without a reallocation.
fn reserve(&mut self, len: usize, size: usize);
/// Add the value to the end of the arguments.
fn add<T>(&mut self, value: T)
where
Self::Database: HasSqlType<T>,
T: Encode<Self::Database>;
}
pub trait IntoArguments<DB>
where
DB: Database,
{
fn into_arguments(self) -> DB::Arguments;
}
impl<DB> IntoArguments<DB> for DB::Arguments
where
DB: Database,
{
#[inline]
fn into_arguments(self) -> DB::Arguments {
self
}
}
#[allow(unused)]
macro_rules! impl_into_arguments {
($B:ident: $( ($idx:tt) -> $T:ident );+;) => {
impl<$($T,)+> crate::arguments::IntoArguments<$B> for ($($T,)+)
where
$($B: crate::types::HasSqlType<$T>,)+
$($T: crate::encode::Encode<$B>,)+
{
fn into_arguments(self) -> <$B as crate::database::Database>::Arguments {
use crate::arguments::Arguments;
let mut arguments = <$B as crate::database::Database>::Arguments::default();
let binds = 0 $(+ { $idx; 1 } )+;
let bytes = 0 $(+ crate::encode::Encode::size_hint(&self.$idx))+;
arguments.reserve(binds, bytes);
$(crate::arguments::Arguments::bind(&mut arguments, self.$idx);)+
arguments
}
}
};
}
#[allow(unused)]
macro_rules! impl_into_arguments_for_database {
($B:ident) => {
impl crate::arguments::IntoArguments<$B> for ()
{
#[inline]
fn into_arguments(self) -> <$B as crate::database::Database>::Arguments {
Default::default()
}
}
impl_into_arguments!($B:
(0) -> T1;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
(4) -> T5;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
(4) -> T5;
(5) -> T6;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
(4) -> T5;
(5) -> T6;
(6) -> T7;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
(4) -> T5;
(5) -> T6;
(6) -> T7;
(7) -> T8;
);
impl_into_arguments!($B:
(0) -> T1;
(1) -> T2;
(2) -> T3;
(3) -> T4;
(4) -> T5;
(5) -> T6;
(6) -> T7;
(7) -> T8;
(8) -> T9;
);
}
}

View File

@ -1,28 +0,0 @@
use crate::{
describe::Describe, executor::Executor, params::QueryParameters, row::Row,
types::HasTypeMetadata,
};
use futures_core::future::BoxFuture;
/// A database backend.
///
/// Represents a connection to the database and further provides auxillary but
/// important related traits as associated types.
///
/// This trait is not intended to be used directly.
pub trait Backend: HasTypeMetadata + Send + Sync + Sized + 'static {
type Connection: crate::Connection<Backend = Self>;
/// The concrete `QueryParameters` implementation for this backend.
type QueryParameters: QueryParameters<Backend = Self>;
/// The concrete `Row` implementation for this backend.
type Row: Row<Backend = Self>;
/// The identifier for tables; in Postgres this is an `oid` while
/// in MySQL/MariaDB this is the qualified name of the table.
type TableIdent;
/// Establish a new connection to the database server.
fn connect(url: &str) -> BoxFuture<'static, crate::Result<Self::Connection>>;
}

View File

@ -1,6 +1,6 @@
use std::collections::hash_map::{HashMap, Entry};
use std::cmp::Ordering;
use futures_core::Future;
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::Arc;
// TODO: figure out a cache eviction strategy
// we currently naively cache all prepared statements which could live-leak memory
@ -11,44 +11,39 @@ use futures_core::Future;
/// Per-connection prepared statement cache.
pub struct StatementCache<Id> {
statements: HashMap<String, Id>
statements: HashMap<String, Id>,
columns: HashMap<Id, Arc<HashMap<Box<str>, usize>>>,
}
impl<Id> StatementCache<Id> {
impl<Id> StatementCache<Id>
where
Id: Eq + Hash,
{
pub fn new() -> Self {
StatementCache {
statements: HashMap::with_capacity(10),
columns: HashMap::with_capacity(10),
}
}
#[cfg(feature = "mysql")]
pub async fn get_or_compute<'a, E, Fut>(&'a mut self, query: &str, compute: impl FnOnce() -> Fut)
-> Result<&'a Id, E>
where
Fut: Future<Output = Result<Id, E>>
{
match self.statements.entry(query.to_string()) {
Entry::Occupied(occupied) => Ok(occupied.into_mut()),
Entry::Vacant(vacant) => {
Ok(vacant.insert(compute().await?))
}
}
pub fn has_columns(&self, id: Id) -> bool {
self.columns.contains_key(&id)
}
// for Postgres so it can return the synthetic statement name instead of formatting twice
#[cfg(feature = "postgres")]
pub async fn map_or_compute<R, E, Fut>(&mut self, query: &str, map: impl FnOnce(&Id) -> R, compute: impl FnOnce() -> Fut)
-> Result<R, E>
where
Fut: Future<Output = Result<(Id, R), E>> {
pub fn get(&self, query: &str) -> Option<&Id> {
self.statements.get(query)
}
match self.statements.entry(query.to_string()) {
Entry::Occupied(occupied) => Ok(map(occupied.get())),
Entry::Vacant(vacant) => {
let (id, ret) = compute().await?;
vacant.insert(id);
Ok(ret)
}
}
// It is a logical error to call this without first calling [put_columns]
pub fn get_columns(&self, id: Id) -> Arc<HashMap<Box<str>, usize>> {
Arc::clone(&self.columns[&id])
}
pub fn put(&mut self, query: String, id: Id) {
self.statements.insert(query, id);
}
pub fn put_columns(&mut self, id: Id, columns: HashMap<Box<str>, usize>) {
self.columns.insert(id, Arc::new(columns));
}
}

View File

@ -1,7 +1,25 @@
use crate::executor::Executor;
use crate::url::Url;
use futures_core::future::BoxFuture;
use crate::Executor;
use futures_util::TryFutureExt;
use std::convert::TryInto;
pub trait Connection: Executor + Sized {
/// Gracefully close the connection.
/// Represents a single database connection rather than a pool of database connections.
///
/// Prefer running queries from [Pool] unless there is a specific need for a single, continuous
/// connection.
pub trait Connection: Executor + Send + 'static {
/// Establish a new database connection.
fn open<T>(url: T) -> BoxFuture<'static, crate::Result<Self>>
where
T: TryInto<Url, Error = crate::Error>,
Self: Sized;
/// Close this database connection.
fn close(self) -> BoxFuture<'static, crate::Result<()>>;
/// Verifies a connection to the database is still alive.
fn ping(&mut self) -> BoxFuture<crate::Result<()>> {
Box::pin(self.execute("SELECT 1", Default::default()).map_ok(|_| ()))
}
}

19
sqlx-core/src/database.rs Normal file
View File

@ -0,0 +1,19 @@
use crate::arguments::Arguments;
use crate::connection::Connection;
use crate::row::Row;
use crate::types::HasTypeMetadata;
/// A database driver.
///
/// This trait encapsulates a complete driver implementation to a specific
/// database (e.g., MySQL, Postgres).
pub trait Database: HasTypeMetadata + 'static {
/// The concrete `Connection` implementation for this database.
type Connection: Connection<Database = Self>;
/// The concrete `Arguments` implementation for this database.
type Arguments: Arguments<Database = Self>;
/// The concrete `Row` implementation for this database.
type Row: Row<Database = Self>;
}

View File

@ -1,18 +1,86 @@
//! Types and traits related to deserializing values from the database.
use crate::{backend::Backend, types::HasSqlType};
//! Types and traits for decoding values from the database.
// TODO: Allow decode to return an error (that can be unified)
use std::error::Error as StdError;
use std::fmt::{self, Display};
pub trait Decode<DB: Backend> {
fn decode(raw: Option<&[u8]>) -> Self;
use crate::database::Database;
use crate::types::HasSqlType;
pub enum DecodeError {
/// An unexpected `NULL` was encountered while decoding.
UnexpectedNull,
Message(Box<dyn Display + Send + Sync>),
Other(Box<dyn StdError + Send + Sync>),
}
/// Decode a single value from the database.
pub trait Decode<DB>: Sized
where
DB: Database + ?Sized,
{
fn decode(raw: &[u8]) -> Result<Self, DecodeError>;
/// Creates a new value of this type from a `NULL` SQL value.
///
/// The default implementation returns [DecodeError::UnexpectedNull].
fn decode_null() -> Result<Self, DecodeError> {
return Err(DecodeError::UnexpectedNull);
}
fn decode_nullable(raw: Option<&[u8]>) -> Result<Self, DecodeError> {
if let Some(raw) = raw {
Self::decode(raw)
} else {
Self::decode_null()
}
}
}
impl<T, DB> Decode<DB> for Option<T>
where
DB: Backend + HasSqlType<T>,
DB: Database + HasSqlType<T>,
T: Decode<DB>,
{
fn decode(raw: Option<&[u8]>) -> Self {
Some(T::decode(Some(raw?)))
fn decode(buf: &[u8]) -> Result<Self, DecodeError> {
T::decode(buf).map(Some)
}
fn decode_null() -> Result<Self, DecodeError> {
Ok(None)
}
}
impl fmt::Debug for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("DecodeError(")?;
match self {
DecodeError::UnexpectedNull => write!(f, "unexpected null for non-null column")?,
DecodeError::Message(err) => write!(f, "{}", err)?,
DecodeError::Other(err) => write!(f, "{:?}", err)?,
}
f.write_str(")")
}
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DecodeError::UnexpectedNull => f.write_str("unexpected null for non-null column"),
DecodeError::Message(err) => write!(f, "{}", err),
DecodeError::Other(err) => write!(f, "{}", err),
}
}
}
impl<E> From<E> for DecodeError
where
E: StdError + Send + Sync + 'static,
{
fn from(err: E) -> DecodeError {
DecodeError::Other(Box::new(err))
}
}

View File

@ -1,46 +1,59 @@
use crate::Backend;
//! Types for returning SQL type information about queries.
use crate::types::HasTypeMetadata;
use crate::Database;
use std::fmt::{self, Debug};
use std::fmt;
/// The return type of [Executor::describe].
pub struct Describe<DB>
where
DB: Database + ?Sized,
{
/// The expected types for the parameters of the query.
pub param_types: Box<[<DB as HasTypeMetadata>::TypeId]>,
/// The result of running prepare + describe for the given backend.
pub struct Describe<DB: Backend> {
/// The expected type IDs of bind parameters.
pub param_types: Vec<<DB as HasTypeMetadata>::TypeId>,
///
pub result_fields: Vec<ResultField<DB>>,
pub(crate) _backcompat: (),
/// The type and table information, if any for the results of the query.
pub result_columns: Box<[Column<DB>]>,
// TODO: Remove and use #[non_exhaustive] when we can
pub(crate) _non_exhaustive: (),
}
impl<DB: Backend> fmt::Debug for Describe<DB>
impl<DB> Debug for Describe<DB>
where
<DB as HasTypeMetadata>::TypeId: fmt::Debug,
ResultField<DB>: fmt::Debug,
DB: Database,
<DB as HasTypeMetadata>::TypeId: Debug,
Column<DB>: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Describe")
.field("param_types", &self.param_types)
.field("result_fields", &self.result_fields)
.field("result_columns", &self.result_columns)
.finish()
}
}
pub struct ResultField<DB: Backend> {
pub name: Option<String>,
pub table_id: Option<<DB as Backend>::TableIdent>,
/// The type ID of this result column.
/// A single column of a result set.
pub struct Column<DB>
where
DB: Database + ?Sized,
{
pub name: Option<Box<str>>,
pub table_id: Option<<DB as HasTypeMetadata>::TableId>,
pub type_id: <DB as HasTypeMetadata>::TypeId,
pub(crate) _backcompat: (),
// TODO: Remove and use #[non_exhaustive] when we can
pub(crate) _non_exhaustive: (),
}
impl<DB: Backend> fmt::Debug for ResultField<DB>
impl<DB> Debug for Column<DB>
where
<DB as Backend>::TableIdent: fmt::Debug,
<DB as HasTypeMetadata>::TypeId: fmt::Debug,
DB: Database + ?Sized,
<DB as HasTypeMetadata>::TableId: Debug,
<DB as HasTypeMetadata>::TypeId: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ResultField")
f.debug_struct("Column")
.field("name", &self.name)
.field("table_id", &self.table_id)
.field("type_id", &self.type_id)

View File

@ -1,72 +1,82 @@
//! Types and traits related to serializing values for the database.
use crate::{backend::Backend, types::HasSqlType};
//! Types and traits for encoding values to the database.
use crate::database::Database;
use crate::types::HasSqlType;
use std::mem;
/// Annotates the result of [Encode] to differentiate between an empty value and a null value.
/// The return type of [Encode::encode].
pub enum IsNull {
/// The value was null (and no data was written to the buffer).
/// The value is null; no data was written.
Yes,
/// The value was not null.
/// The value is not null.
///
/// This does not necessarily mean that any data was written to the buffer.
/// This does not mean that data was written.
No,
}
/// Serializes a single value to be sent to the database.
///
/// The data must be written to the buffer in the expected format
/// for the given backend.
///
/// When possible, implementations of this trait should prefer using an
/// existing implementation, rather than writing to `buf` directly.
pub trait Encode<DB: Backend> {
/// Writes the value of `self` into `buf` as the expected format
/// for the given backend.
///
/// The return value indicates if this value should be represented as `NULL`.
/// If this is the case, implementations **must not** write anything to `out`.
fn encode(&self, buf: &mut Vec<u8>) -> IsNull;
/// Encode a single value to be sent to the database.
pub trait Encode<DB>
where
DB: Database + ?Sized,
{
/// Writes the value of `self` into `buf` in the expected format for the database.
fn encode(&self, buf: &mut Vec<u8>);
fn encode_nullable(&self, buf: &mut Vec<u8>) -> IsNull {
self.encode(buf);
IsNull::No
}
/// Calculate the number of bytes this type will use when encoded.
fn size_hint(&self) -> usize {
mem::size_of_val(self)
}
}
/// [Encode] is implemented for `Option<T>` where `T` implements `Encode`. An `Option<T>`
/// represents a nullable SQL value.
impl<T, DB> Encode<DB> for Option<T>
where
DB: Backend + HasSqlType<T>,
T: Encode<DB>,
{
#[inline]
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
if let Some(self_) = self {
self_.encode(buf)
} else {
IsNull::Yes
}
}
fn size_hint(&self) -> usize {
if self.is_some() { mem::size_of::<T>() } else { 0 }
}
}
impl<T: ?Sized, DB> Encode<DB> for &'_ T
where
DB: Backend + HasSqlType<T>,
DB: Database + HasSqlType<T>,
T: Encode<DB>,
{
#[inline]
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
fn encode(&self, buf: &mut Vec<u8>) {
(*self).encode(buf)
}
fn encode_nullable(&self, buf: &mut Vec<u8>) -> IsNull {
(*self).encode_nullable(buf)
}
fn size_hint(&self) -> usize {
(*self).size_hint()
}
}
impl<T, DB> Encode<DB> for Option<T>
where
DB: Database + HasSqlType<T>,
T: Encode<DB>,
{
fn encode(&self, buf: &mut Vec<u8>) {
// Forward to [encode_nullable] and ignore the result
let _ = self.encode_nullable(buf);
}
fn encode_nullable(&self, buf: &mut Vec<u8>) -> IsNull {
if let Some(self_) = self {
self_.encode(buf);
IsNull::No
} else {
IsNull::Yes
}
}
fn size_hint(&self) -> usize {
if self.is_some() {
mem::size_of::<T>()
} else {
0
}
}
}

View File

@ -1,49 +1,51 @@
use std::{
error::Error as StdError,
fmt::{self, Debug, Display},
io,
};
//! Error and Result types.
use async_std::future::TimeoutError;
use crate::decode::DecodeError;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
/// A convenient Result instantiation appropriate for SQLx.
pub type Result<T> = std::result::Result<T, Error>;
/// A specialized `Result` type for SQLx.
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// A generic error that represents all the ways a method can fail inside of SQLx.
#[derive(Debug)]
pub enum Error {
/// Error communicating with the database backend.
///
/// Some reasons for this to be caused:
///
/// - [io::ErrorKind::ConnectionRefused] - Database backend is most likely behind a firewall.
///
/// - [io::ErrorKind::ConnectionReset] - Database backend dropped the client connection (perhaps from an administrator action).
/// Error communicating with the database.
Io(io::Error),
/// An error was returned by the database backend.
Database(Box<dyn DatabaseError + Send + Sync>),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// No rows were returned by a query expected to return at least one row.
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No rows were returned by a query that expected to return at least one row.
NotFound,
/// More than one row was returned by a query expected to return exactly one row.
/// More than one row was returned by a query that expected to return exactly one row.
FoundMoreThanOne,
/// Unexpected or invalid data was encountered. This would indicate that we received data that we were not
/// expecting or it was in a format we did not understand. This generally means either there is a programming error in a SQLx driver or
/// something with the connection or the database backend itself is corrupted.
/// Column was not found in Row during [Row::try_get].
ColumnNotFound(Box<str>),
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a SQLx driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A `Pool::acquire()` timed out due to connections not becoming available or
/// A [Pool::acquire] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
TimedOut,
PoolTimedOut,
/// `Pool::close()` was called while we were waiting in `Pool::acquire()`.
/// [Pool::close] was called while we were waiting in [Pool::acquire].
PoolClosed,
Decode(DecodeError),
// TODO: Remove and replace with `#[non_exhaustive]` when possible
#[doc(hidden)]
__Nonexhaustive,
@ -54,6 +56,10 @@ impl StdError for Error {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::Decode(DecodeError::Other(error)) => Some(&**error),
_ => None,
}
}
@ -64,17 +70,25 @@ impl Display for Error {
match self {
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::NotFound => f.write_str("found no rows when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::FoundMoreThanOne => {
f.write_str("found more than one row when we expected exactly one")
}
Error::Protocol(ref err) => f.write_str(err),
Error::TimedOut => f.write_str("timed out while waiting for an open connection"),
Error::PoolTimedOut => f.write_str("timed out while waiting for an open connection"),
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
@ -90,9 +104,24 @@ impl From<io::Error> for Error {
}
}
impl From<TimeoutError> for Error {
fn from(_: TimeoutError) -> Self {
Error::TimedOut
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<DecodeError> for Error {
#[inline]
fn from(err: DecodeError) -> Self {
Error::Decode(err)
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
@ -113,9 +142,20 @@ where
}
}
/// An error that was returned by the database backend.
/// An error that was returned by the database.
pub trait DatabaseError: Display + Debug + Send + Sync {
/// The primary, human-readable error message.
fn message(&self) -> &str;
fn details(&self) -> Option<&str>;
fn hint(&self) -> Option<&str>;
fn table_name(&self) -> Option<&str>;
fn column_name(&self) -> Option<&str>;
fn constraint_name(&self) -> Option<&str>;
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
@ -124,6 +164,7 @@ pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[cfg(any(feature = "mysql", feature = "postgres"))]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }

View File

@ -1,81 +1,60 @@
use crate::{
backend::Backend,
describe::Describe,
error::Error,
params::{IntoQueryParameters, QueryParameters},
row::FromRow,
};
use futures_core::{future::BoxFuture, stream::BoxStream};
use futures_util::{TryFutureExt, TryStreamExt};
use crate::database::Database;
use crate::describe::Describe;
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use futures_util::TryStreamExt;
pub trait Executor: Send {
type Backend: Backend;
/// Encapsulates query execution on the database.
///
/// Implemented by [Pool], [Connection], and [Transaction].
pub trait Executor {
type Database: Database + ?Sized;
/// Verifies a connection to the database is still alive.
fn ping<'e>(&'e mut self) -> BoxFuture<'e, crate::Result<()>> {
Box::pin(
self.execute(
"SELECT 1",
Default::default(),
)
.map_ok(|_| ()),
)
}
/// Send a raw SQL command to the database.
///
/// This is intended for queries that cannot or should not be prepared (ex. `BEGIN`).
///
/// Does not support fetching results.
fn send<'e, 'q: 'e>(&'e mut self, command: &'q str) -> BoxFuture<'e, crate::Result<()>>;
/// Execute the query, returning the number of rows affected.
fn execute<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: <Self::Backend as Backend>::QueryParameters,
args: <Self::Database as Database>::Arguments,
) -> BoxFuture<'e, crate::Result<u64>>;
fn fetch<'e, 'q: 'e, T: 'e>(
/// Executes the query and returns a [Stream] of [Row].
fn fetch<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: <Self::Backend as Backend>::QueryParameters,
) -> BoxStream<'e, crate::Result<T>>
where
T: FromRow<Self::Backend> + Send + Unpin;
args: <Self::Database as Database>::Arguments,
) -> BoxStream<'e, crate::Result<<Self::Database as Database>::Row>>;
fn fetch_all<'e, 'q: 'e, T: 'e>(
/// Executes the query and returns up to resulting record.
/// * `Error::FoundMoreThanOne` will be returned if the query produced more than 1 row.
fn fetch_optional<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: <Self::Backend as Backend>::QueryParameters,
) -> BoxFuture<'e, crate::Result<Vec<T>>>
where
T: FromRow<Self::Backend> + Send + Unpin,
{
Box::pin(self.fetch(query, params).try_collect())
args: <Self::Database as Database>::Arguments,
) -> BoxFuture<'e, crate::Result<Option<<Self::Database as Database>::Row>>> {
let mut s = self.fetch(query, args);
Box::pin(async move { s.try_next().await })
}
fn fetch_optional<'e, 'q: 'e, T: 'e>(
/// Execute the query and return at most one resulting record.
fn fetch_one<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: <Self::Backend as Backend>::QueryParameters,
) -> BoxFuture<'e, crate::Result<Option<T>>>
where
T: FromRow<Self::Backend> + Send;
fn fetch_one<'e, 'q: 'e, T: 'e>(
&'e mut self,
query: &'q str,
params: <Self::Backend as Backend>::QueryParameters,
) -> BoxFuture<'e, crate::Result<T>>
where
T: FromRow<Self::Backend> + Send,
{
let fut = self.fetch_optional(query, params);
Box::pin(async move { fut.await?.ok_or(Error::NotFound) })
args: <Self::Database as Database>::Arguments,
) -> BoxFuture<'e, crate::Result<<Self::Database as Database>::Row>> {
let mut s = self.fetch(query, args);
Box::pin(async move { s.try_next().await?.ok_or(crate::Error::NotFound) })
}
/// Analyze the SQL statement and report the inferred bind parameter types and returned
/// columns.
/// Analyze the SQL query and report the inferred bind parameter types and returned columns.
fn describe<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
) -> BoxFuture<'e, crate::Result<Describe<Self::Backend>>>;
/// Send a semicolon-delimited series of arbitrary SQL commands to the server.
///
/// Does not support fetching results.
fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>>;
) -> BoxFuture<'e, crate::Result<Describe<Self::Database>>>;
}

View File

@ -5,6 +5,8 @@ use std::{io, slice, str};
pub trait Buf {
fn advance(&mut self, cnt: usize);
fn get_uint<T: ByteOrder>(&mut self, n: usize) -> io::Result<u64>;
fn get_u8(&mut self) -> io::Result<u8>;
fn get_u16<T: ByteOrder>(&mut self) -> io::Result<u16>;
@ -22,6 +24,8 @@ pub trait Buf {
fn get_str(&mut self, len: usize) -> io::Result<&str>;
fn get_str_nul(&mut self) -> io::Result<&str>;
fn get_bytes(&mut self, len: usize) -> io::Result<&[u8]>;
}
impl<'a> Buf for &'a [u8] {
@ -29,9 +33,15 @@ impl<'a> Buf for &'a [u8] {
*self = &self[cnt..];
}
fn get_uint<T: ByteOrder>(&mut self, n: usize) -> io::Result<u64> {
let val = T::read_uint(*self, n);
self.advance(n);
Ok(val)
}
fn get_u8(&mut self) -> io::Result<u8> {
let val = self[0];
self.advance(1);
Ok(val)
@ -51,16 +61,16 @@ impl<'a> Buf for &'a [u8] {
Ok(val)
}
fn get_i32<T: ByteOrder>(&mut self) -> io::Result<i32> {
let val = T::read_i32(*self);
self.advance(4);
fn get_u24<T: ByteOrder>(&mut self) -> io::Result<u32> {
let val = T::read_u24(*self);
self.advance(3);
Ok(val)
}
fn get_u24<T: ByteOrder>(&mut self) -> io::Result<u32> {
let val = T::read_u24(*self);
self.advance(3);
fn get_i32<T: ByteOrder>(&mut self) -> io::Result<i32> {
let val = T::read_i32(*self);
self.advance(4);
Ok(val)
}
@ -80,15 +90,8 @@ impl<'a> Buf for &'a [u8] {
}
fn get_str(&mut self, len: usize) -> io::Result<&str> {
let buf = &self[..len];
self.advance(len);
if cfg!(debug_asserts) {
str::from_utf8(buf).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
} else {
Ok(unsafe { str::from_utf8_unchecked(buf) })
}
str::from_utf8(self.get_bytes(len)?)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
}
fn get_str_nul(&mut self) -> io::Result<&str> {
@ -97,6 +100,13 @@ impl<'a> Buf for &'a [u8] {
Ok(s)
}
fn get_bytes(&mut self, len: usize) -> io::Result<&[u8]> {
let buf = &self[..len];
self.advance(len);
Ok(buf)
}
}
pub trait ToBuf {
@ -104,9 +114,13 @@ pub trait ToBuf {
}
impl ToBuf for [u8] {
fn to_buf(&self) -> &[u8] { self }
fn to_buf(&self) -> &[u8] {
self
}
}
impl ToBuf for u8 {
fn to_buf(&self) -> &[u8] { slice::from_ref(self) }
fn to_buf(&self) -> &[u8] {
slice::from_ref(self)
}
}

View File

@ -18,6 +18,10 @@ pub trait BufMut {
fn put_u64<T: ByteOrder>(&mut self, val: u64);
fn put_bytes(&mut self, val: &[u8]);
fn put_str(&mut self, val: &str);
fn put_str_nul(&mut self, val: &str);
}
@ -30,18 +34,18 @@ impl BufMut for Vec<u8> {
self.push(val);
}
fn put_i16<T: ByteOrder>(&mut self, val: i16) {
let mut buf = [0; 2];
T::write_i16(&mut buf, val);
self.extend_from_slice(&buf);
}
fn put_u16<T: ByteOrder>(&mut self, val: u16) {
let mut buf = [0; 2];
T::write_u16(&mut buf, val);
self.extend_from_slice(&buf);
}
fn put_i16<T: ByteOrder>(&mut self, val: i16) {
let mut buf = [0; 2];
T::write_i16(&mut buf, val);
self.extend_from_slice(&buf);
}
fn put_u24<T: ByteOrder>(&mut self, val: u32) {
let mut buf = [0; 3];
T::write_u24(&mut buf, val);
@ -66,8 +70,16 @@ impl BufMut for Vec<u8> {
self.extend_from_slice(&buf);
}
fn put_str_nul(&mut self, val: &str) {
fn put_bytes(&mut self, val: &[u8]) {
self.extend_from_slice(val);
}
fn put_str(&mut self, val: &str) {
self.extend_from_slice(val.as_bytes());
}
fn put_str_nul(&mut self, val: &str) {
self.put_str(val);
self.push(0);
}
}

View File

@ -2,7 +2,6 @@ use async_std::io::{
prelude::{ReadExt, WriteExt},
Read, Write,
};
use std::mem::MaybeUninit;
use std::io;
pub struct BufStream<S> {
@ -66,7 +65,9 @@ where
// If we have enough bytes in our read buffer,
// return immediately
if self.rbuf_windex >= (self.rbuf_rindex + cnt) {
return Ok(Some(&self.rbuf[self.rbuf_rindex..(self.rbuf_rindex + cnt)]));
let buf = &self.rbuf[self.rbuf_rindex..(self.rbuf_rindex + cnt)];
return Ok(Some(buf));
}
// If we are out of space to write to in the read buffer,

View File

@ -5,4 +5,24 @@ mod buf;
mod buf_mut;
mod byte_str;
pub use self::{buf::{Buf, ToBuf}, buf_mut::BufMut, buf_stream::BufStream, byte_str::ByteStr};
pub use self::{
buf::{Buf, ToBuf},
buf_mut::BufMut,
buf_stream::BufStream,
byte_str::ByteStr,
};
#[cfg(test)]
#[doc(hidden)]
macro_rules! bytes (
($($b: expr), *) => {{
use $crate::io::ToBuf;
let mut buf = Vec::new();
$(
buf.extend_from_slice($b.to_buf());
)*
buf
}}
);

View File

@ -1,73 +1,60 @@
#![recursion_limit = "256"]
#![allow(unused_imports)]
#[macro_use]
mod macros;
#![deny(unsafe_code)]
#[macro_use]
pub mod error;
#[cfg(any(feature = "postgres", feature = "mysql"))]
#[cfg(any(feature = "mysql", feature = "postgres"))]
#[macro_use]
mod io;
mod backend;
pub mod decode;
#[cfg(any(feature = "postgres", feature = "mysql"))]
mod url;
#[macro_use]
mod row;
mod connection;
mod executor;
mod pool;
#[macro_use]
pub mod params;
pub mod encode;
mod query;
pub mod types;
mod describe;
#[cfg(any(feature = "mysql", feature = "postgres"))]
mod cache;
#[doc(inline)]
pub use self::{
backend::Backend,
connection::Connection,
decode::Decode,
encode::Encode,
error::{Error, Result},
executor::Executor,
pool::Pool,
query::{query, Query},
row::{FromRow, Row},
types::HasSqlType,
};
mod connection;
mod database;
mod executor;
mod query;
mod url;
#[doc(hidden)]
pub use types::HasTypeMetadata;
pub mod arguments;
pub mod decode;
pub mod describe;
pub mod encode;
pub mod pool;
pub mod types;
#[doc(hidden)]
pub use describe::{Describe, ResultField};
#[macro_use]
pub mod row;
#[cfg(feature = "mysql")]
pub mod mysql;
#[cfg(feature = "postgres")]
pub mod postgres;
pub use database::Database;
#[doc(inline)]
pub use error::{Error, Result};
pub use connection::Connection;
pub use executor::Executor;
pub use query::{query, Query};
#[doc(inline)]
pub use pool::Pool;
#[doc(inline)]
pub use row::{FromRow, Row};
#[cfg(feature = "mysql")]
#[doc(inline)]
pub use mysql::MySql;
#[cfg(feature = "postgres")]
pub mod postgres;
#[cfg(feature = "postgres")]
#[doc(inline)]
pub use self::postgres::Postgres;
pub use postgres::Postgres;
use std::marker::PhantomData;

View File

@ -1,14 +0,0 @@
#[cfg(test)]
#[doc(hidden)]
#[macro_export]
macro_rules! __bytes_builder (
($($b: expr), *) => {{
use $crate::io::ToBuf;
let mut buf = Vec::new();
$(
buf.extend_from_slice($b.to_buf());
)*
buf
}}
);

View File

@ -0,0 +1,51 @@
use crate::arguments::Arguments;
use crate::encode::{Encode, IsNull};
use crate::mysql::types::MySqlTypeMetadata;
use crate::mysql::MySql;
use crate::types::HasSqlType;
#[derive(Default)]
pub struct MySqlArguments {
pub(crate) param_types: Vec<MySqlTypeMetadata>,
pub(crate) params: Vec<u8>,
pub(crate) null_bitmap: Vec<u8>,
}
impl Arguments for MySqlArguments {
type Database = MySql;
fn len(&self) -> usize {
self.param_types.len()
}
fn size(&self) -> usize {
self.params.len()
}
fn reserve(&mut self, len: usize, size: usize) {
self.param_types.reserve(len);
self.params.reserve(size);
// ensure we have enough size in the bitmap to hold at least `len` extra bits
// the second `& 7` gives us 0 spare bits when param_types.len() is a multiple of 8
let spare_bits = (8 - (self.param_types.len()) & 7) & 7;
// ensure that if there are no spare bits left, `len = 1` reserves another byte
self.null_bitmap.reserve((len + 7 - spare_bits) / 8);
}
fn add<T>(&mut self, value: T)
where
Self::Database: HasSqlType<T>,
T: Encode<Self::Database>,
{
let metadata = <MySql as HasSqlType<T>>::metadata();
let index = self.param_types.len();
self.param_types.push(metadata);
self.null_bitmap.resize((index / 8) + 1, 0);
if let IsNull::Yes = value.encode_nullable(&mut self.params) {
self.null_bitmap[index / 8] &= (1 << index % 8) as u8;
}
}
}

View File

@ -1,34 +0,0 @@
use futures_core::{future::BoxFuture, stream::BoxStream};
use crate::{
backend::Backend,
describe::{Describe, ResultField},
mysql::{protocol::ResultRow, query::MySqlDbParameters},
url::Url,
};
use super::{Connection, RawConnection};
use super::MySql;
use crate::cache::StatementCache;
impl Backend for MySql {
type Connection = Connection;
type QueryParameters = MySqlDbParameters;
type Row = ResultRow;
type TableIdent = String;
fn connect(url: &str) -> BoxFuture<'static, crate::Result<Connection>> {
let url = Url::parse(url);
Box::pin(async move {
let url = url?;
Ok(Connection {
conn: RawConnection::open(url).await?,
cache: StatementCache::new(),
})
})
}
}
impl_from_row_for_backend!(MySql, ResultRow);
impl_into_query_parameters_for_backend!(MySql);

View File

@ -1,121 +1,41 @@
use std::{
io,
net::{IpAddr, SocketAddr},
};
use std::net::Shutdown;
use std::convert::TryInto;
use std::io;
use async_std::net::TcpStream;
use async_std::net::{Shutdown, TcpStream};
use byteorder::{ByteOrder, LittleEndian};
use futures_util::AsyncWriteExt;
use futures_core::future::BoxFuture;
use crate::{Describe, Error, io::{Buf, BufMut, BufStream}, mysql::{
protocol::{
Capabilities, ColumnCountPacket, ColumnDefinitionPacket, ComPing, ComQuit,
ComSetOption, ComStmtExecute,
ComStmtPrepare, ComStmtPrepareOk, Encode, EofPacket, ErrPacket, OkPacket,
ResultRow, SetOptionOptions, StmtExecFlag,
},
query::MySqlDbParameters,
}, Result, ResultField, url::Url};
use crate::mysql::MySql;
use crate::mysql::protocol::ComQuery;
use crate::cache::StatementCache;
use crate::connection::Connection;
use crate::io::{Buf, BufMut, BufStream, ByteStr};
use crate::mysql::error::MySqlError;
use crate::mysql::protocol::{
Capabilities, Decode, Encode, EofPacket, ErrPacket, Handshake, HandshakeResponse, OkPacket,
};
use crate::url::Url;
use super::establish;
pub struct MySqlConnection {
pub(super) stream: BufStream<TcpStream>,
pub type StatementId = u32;
pub(super) capabilities: Capabilities,
pub(super) statement_cache: StatementCache<u32>,
rbuf: Vec<u8>,
pub struct Connection {
pub(crate) stream: BufStream<TcpStream>,
pub(crate) rbuf: Vec<u8>,
pub(crate) capabilities: Capabilities,
next_seq_no: u8,
pub(super) ready: bool,
}
impl Connection {
pub async fn open(url: Url) -> Result<Self> {
// TODO: Handle errors
let host = url.host();
let port = url.port(3306);
// TODO: handle errors
let host: IpAddr = host.parse().unwrap();
let addr: SocketAddr = (host, port).into();
let stream = TcpStream::connect(&addr).await?;
let mut conn = Self {
stream: BufStream::new(stream),
rbuf: Vec::with_capacity(8 * 1024),
capabilities: Capabilities::empty(),
next_seq_no: 0,
};
establish::establish(&mut conn, &url).await?;
Ok(conn)
}
pub async fn close(mut self) -> Result<()> {
// Send the quit command
self.start_sequence();
self.write(ComQuit);
self.stream.flush().await?;
self.stream.stream.shutdown(Shutdown::Both)?;
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
// Send the ping command and wait for (and drop) an OK packet
self.start_sequence();
self.write(ComPing);
self.stream.flush().await?;
let _ = self.receive_ok_or_err().await?;
Ok(())
}
pub(crate) async fn receive(&mut self) -> Result<&[u8]> {
Ok(self
.try_receive()
.await?
.ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))?)
}
async fn try_receive(&mut self) -> Result<Option<&[u8]>> {
// Read the packet header which contains the length and the sequence number
// https://mariadb.com/kb/en/library/0-packet/#standard-packet
let mut header = ret_if_none!(self.stream.peek(4).await?);
let len = header.get_u24::<LittleEndian>()? as usize;
self.next_seq_no = header.get_u8()? + 1;
self.stream.consume(4);
// Read the packet body and copy it into our internal buf
// We must have a separate buffer around the stream as we can't operate directly
// on bytes returend from the stream. We have compression, split, etc. to
// unpack.
let body = ret_if_none!(self.stream.peek(len).await?);
self.rbuf.clear();
self.rbuf.extend_from_slice(body);
self.stream.consume(len);
Ok(Some(&self.rbuf[..len]))
}
pub(super) fn start_sequence(&mut self) {
// At the start of a command sequence we reset our understanding
// of [next_seq_no]. In a sequence our initial command must be 0, followed
// by the server response that is 1, then our response to that response (if any),
// would be 2
impl MySqlConnection {
pub(super) fn begin_command_phase(&mut self) {
// At the start of the *command phase*, the sequence ID sent from the client
// must be 0
self.next_seq_no = 0;
}
pub(crate) fn write<T: Encode>(&mut self, packet: T) {
pub(super) fn write(&mut self, packet: impl Encode + std::fmt::Debug) {
let buf = self.stream.buffer_mut();
// Allocate room for the header that we write after the packet;
@ -137,19 +57,16 @@ impl Connection {
// Take the last sequence number received, if any, and increment by 1
// If there was no sequence number, we only increment if we split packets
header[3] = self.next_seq_no;
self.next_seq_no += 1;
self.next_seq_no = self.next_seq_no.wrapping_add(1);
}
// Decode an OK packet or bubble an ERR packet as an error
// to terminate immediately
pub(crate) async fn receive_ok_or_err(&mut self) -> Result<OkPacket> {
let capabilities = self.capabilities;
let buf = self.receive().await?;
Ok(match buf[0] {
0xfe | 0x00 => OkPacket::decode(buf, capabilities)?,
async fn receive_ok(&mut self) -> crate::Result<OkPacket> {
let packet = self.receive().await?;
Ok(match packet[0] {
0xfe | 0x00 => OkPacket::decode(packet)?,
0xff => {
return ErrPacket::decode(buf)?.expect_error();
return Err(MySqlError(ErrPacket::decode(packet)?).into());
}
id => {
@ -163,185 +80,124 @@ impl Connection {
})
}
async fn check_eof(&mut self) -> Result<()> {
pub(super) async fn receive_eof(&mut self) -> crate::Result<()> {
// When (legacy) EOFs are enabled, the fixed number column definitions are further
// terminated by an EOF packet
if !self
.capabilities
.contains(Capabilities::CLIENT_DEPRECATE_EOF)
{
if !self.capabilities.contains(Capabilities::DEPRECATE_EOF) {
let _eof = EofPacket::decode(self.receive().await?)?;
}
Ok(())
}
async fn send_prepare<'c>(
&'c mut self,
statement: &'c str,
) -> Result<ComStmtPrepareOk> {
self.stream.flush().await?;
self.start_sequence();
self.write(ComStmtPrepare { statement });
self.stream.flush().await?;
// COM_STMT_PREPARE returns COM_STMT_PREPARE_OK (0x00) or ERR (0xFF)
let packet = self.receive().await?;
if packet[0] == 0xFF {
return ErrPacket::decode(packet)?.expect_error();
}
let ok = ComStmtPrepareOk::decode(packet)?;
Ok(ok)
pub(super) async fn receive(&mut self) -> crate::Result<&[u8]> {
Ok(self
.try_receive()
.await?
.ok_or(io::ErrorKind::UnexpectedEof)?)
}
// MySQL/Mysql responds with statement metadata for every PREPARE command
// sometimes we care, sometimes we don't
pub(super) async fn prepare_ignore_describe(&mut self, statement: &str) -> Result<StatementId> {
let ok = self.send_prepare(statement).await?;
pub(super) async fn try_receive(&mut self) -> crate::Result<Option<&[u8]>> {
self.rbuf.clear();
if ok.params > 0 {
// Input parameters
for _ in 0..ok.params {
// TODO: Maybe do something with this data ?
let _column = ColumnDefinitionPacket::decode(self.receive().await?)?;
}
// Read the packet header which contains the length and the sequence number
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_packets.html
// https://mariadb.com/kb/en/library/0-packet/#standard-packet
let mut header = ret_if_none!(self.stream.peek(4).await?);
let payload_len = header.get_uint::<LittleEndian>(3)? as usize;
self.next_seq_no = header.get_u8()?.wrapping_add(1);
self.stream.consume(4);
self.check_eof().await?;
}
// Read the packet body and copy it into our internal buf
// We must have a separate buffer around the stream as we can't operate directly
// on bytes returned from the stream. We have various kinds of payload manipulation
// that must be handled before decoding.
let mut payload = ret_if_none!(self.stream.peek(payload_len).await?);
self.rbuf.extend_from_slice(payload);
self.stream.consume(payload_len);
if ok.columns > 0 {
// Output parameters
for _ in 0..ok.columns {
// TODO: Maybe do something with this data ?
let _column = ColumnDefinitionPacket::decode(self.receive().await?)?;
}
// TODO: Implement packet compression
// TODO: Implement packet joining
self.check_eof().await?;
}
Ok(ok.statement_id)
Ok(Some(&self.rbuf[..payload_len]))
}
}
pub(super) async fn prepare_describe(&mut self, statement: &str) -> Result<Describe<MySql>> {
let ok = self.send_prepare(statement).await?;
impl MySqlConnection {
// TODO: Authentication ?!
async fn open(url: crate::Result<Url>) -> crate::Result<Self> {
let url = url?;
let stream = TcpStream::connect((url.host(), url.port(3306))).await?;
let mut param_types = Vec::with_capacity(ok.params as usize);
let mut result_fields= Vec::with_capacity(ok.columns as usize);
let mut self_ = Self {
stream: BufStream::new(stream),
capabilities: Capabilities::empty(),
rbuf: Vec::with_capacity(8192),
next_seq_no: 0,
statement_cache: StatementCache::new(),
ready: true,
};
// Input parameters
for _ in 0..ok.params {
let param = ColumnDefinitionPacket::decode(self.receive().await?)?;
param_types.push(param.field_type.0);
}
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_connection_phase.html
// https://mariadb.com/kb/en/connection/
self.check_eof().await?;
// First, we receive the Handshake
// Output parameters
for _ in 0..ok.columns {
let column = ColumnDefinitionPacket::decode(self.receive().await?)?;
result_fields.push(ResultField {
name: column.column_alias.or(column.column),
table_id: column.table_alias.or(column.table),
type_id: column.field_type.0,
_backcompat: ()
});
}
let handshake_packet = self_.receive().await?;
let handshake = Handshake::decode(handshake_packet)?;
self.check_eof().await?;
// TODO: Capabilities::SECURE_CONNECTION
// TODO: Capabilities::CONNECT_ATTRS
// TODO: Capabilities::PLUGIN_AUTH
// TODO: Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA
// TODO: Capabilities::TRANSACTIONS
// TODO: Capabilities::CLIENT_DEPRECATE_EOF
// TODO: Capabilities::COMPRESS
// TODO: Capabilities::ZSTD_COMPRESSION_ALGORITHM
let client_capabilities = Capabilities::PROTOCOL_41
| Capabilities::IGNORE_SPACE
| Capabilities::FOUND_ROWS
| Capabilities::CONNECT_WITH_DB;
Ok(Describe {
param_types,
result_fields,
_backcompat: (),
})
}
// Fails if [Capabilities::PROTOCOL_41] is not in [server_capabilities]
self_.capabilities =
(client_capabilities & handshake.server_capabilities) | Capabilities::PROTOCOL_41;
pub(super) async fn result_column_defs(&mut self) -> Result<Vec<ColumnDefinitionPacket>> {
let packet = self.receive().await?;
// Next we send the response
// A Resultset starts with a [ColumnCountPacket] which is a single field that encodes
// how many columns we can expect when fetching rows from this statement
if packet[0] == 255 {
ErrPacket::decode(packet)?.expect_error()?;
}
let column_count: u64 = ColumnCountPacket::decode(packet)?.columns;
// Next we have a [ColumnDefinitionPacket] which verbosely explains each minute
// detail about the column in question including table, aliasing, and type
// TODO: This information was *already* returned by PREPARE .., is there a way to suppress generation
let mut columns = vec![];
for _ in 0..column_count {
let column = ColumnDefinitionPacket::decode(self.receive().await?)?;
columns.push(column);
}
self.check_eof().await?;
Ok(columns)
}
pub(super) async fn send_execute(
&mut self,
statement_id: u32,
params: MySqlDbParameters,
) -> Result<()> {
// TODO: EXECUTE(READ_ONLY) => FETCH instead of EXECUTE(NO)
// SEND ================
self.start_sequence();
self.write(ComStmtExecute {
statement_id,
params: &params.params,
null: &params.null_bitmap,
flags: StmtExecFlag::NO_CURSOR,
param_types: &params.param_types,
self_.write(HandshakeResponse {
client_collation: 192, // utf8_unicode_ci
max_packet_size: 1024,
username: url.username().unwrap_or("root"),
// TODO: Remove the panic!
database: url.database().expect("required database"),
});
self.stream.flush().await?;
// =====================
Ok(())
self_.stream.flush().await?;
let _ok = self_.receive_ok().await?;
Ok(self_)
}
async fn expect_eof_or_err(&mut self) -> crate::Result<()> {
let packet = self.receive().await?;
match packet[0] {
0xFE => { EofPacket::decode(packet)?; },
0xFF => { ErrPacket::decode(packet)?.expect_error()?; },
_ => return Err(protocol_err!("expected EOF or ERR, got {:02X}", packet[0]).into()),
}
Ok(())
}
pub(super) async fn send_raw(
&mut self,
commands: &str
) -> Result<()> {
async fn close(mut self) -> crate::Result<()> {
self.stream.flush().await?;
self.start_sequence();
// enable multi-statement only for this query
self.write(ComSetOption { option: SetOptionOptions::MySqlOptionMultiStatementsOn });
self.write(ComQuery { sql_statement: commands });
self.write(ComSetOption { option: SetOptionOptions::MySqlOptionMultiStatementsOff });
self.stream.flush().await?;
self.expect_eof_or_err().await?;
let packet = self.receive().await?;
if packet[0] == 0xFF { return ErrPacket::decode(packet)?.expect_error() }
// otherwise ignore packet
self.expect_eof_or_err().await?;
self.stream.stream.shutdown(Shutdown::Both)?;
Ok(())
}
}
impl Connection for MySqlConnection {
fn open<T>(url: T) -> BoxFuture<'static, crate::Result<Self>>
where
T: TryInto<Url, Error = crate::Error>,
Self: Sized,
{
Box::pin(MySqlConnection::open(url.try_into()))
}
fn close(self) -> BoxFuture<'static, crate::Result<()>> {
Box::pin(self.close())
}
}

View File

@ -0,0 +1,12 @@
use crate::Database;
/// **MySQL** database driver.
pub struct MySql;
impl Database for MySql {
type Connection = super::MySqlConnection;
type Arguments = super::MySqlArguments;
type Row = super::MySqlRow;
}

View File

@ -1,25 +1,53 @@
use crate::{error::DatabaseError, mysql::protocol::ErrorCode};
use std::fmt::{self, Debug, Display};
use std::fmt;
use crate::error::DatabaseError;
use crate::mysql::protocol::ErrPacket;
#[derive(Debug)]
pub struct Error {
pub code: ErrorCode,
pub message: Box<str>,
}
pub struct MySqlError(pub(super) ErrPacket);
impl DatabaseError for Error {
impl DatabaseError for MySqlError {
fn message(&self) -> &str {
&self.message
&*self.0.error_message
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
}
impl fmt::Display for Error {
// TODO: De-duplicate these two impls with Postgres (macro?)
impl Debug for MySqlError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Mysql returned an error: {}; {}",
self.code, self.message
)
f.debug_struct("DatabaseError")
.field("message", &self.message())
.field("details", &self.details())
.field("hint", &self.hint())
.field("table_name", &self.table_name())
.field("column_name", &self.column_name())
.field("constraint_name", &self.constraint_name())
.finish()
}
}
impl Display for MySqlError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad(self.message())
}
}

View File

@ -1,46 +0,0 @@
use crate::{
mysql::{
connection::Connection,
protocol::{Capabilities, HandshakeResponsePacket, InitialHandshakePacket},
},
url::Url,
Result,
};
pub(crate) async fn establish(conn: &mut Connection, url: &Url) -> Result<()> {
let initial = InitialHandshakePacket::decode(conn.receive().await?)?;
// TODO: Capabilities::SECURE_CONNECTION
// TODO: Capabilities::CONNECT_ATTRS
// TODO: Capabilities::PLUGIN_AUTH
// TODO: Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA
// TODO: Capabilities::TRANSACTIONS
// TODO: Capabilities::CLIENT_DEPRECATE_EOF
// TODO?: Capabilities::CLIENT_SESSION_TRACK
let capabilities = Capabilities::CLIENT_PROTOCOL_41 | Capabilities::CONNECT_WITH_DB;
let response = HandshakeResponsePacket {
// TODO: Find a good value for [max_packet_size]
capabilities,
max_packet_size: 1024,
client_collation: 192, // utf8_unicode_ci
username: url.username(),
database: &url.database(),
auth_data: None,
auth_plugin_name: None,
connection_attrs: &[],
};
// The AND between our supported capabilities and the servers' is
// what we can use so remember it on the connection
conn.capabilities = capabilities & initial.capabilities;
conn.write(response);
conn.stream.flush().await?;
let _ = conn.receive_ok_or_err().await?;
// TODO: If CONNECT_WITH_DB is not supported we need to send an InitDb command just after establish
Ok(())
}

View File

@ -1,159 +1,375 @@
use super::{MySql, Connection};
use crate::{backend::Backend, describe::{Describe, ResultField}, executor::Executor, mysql::{
protocol::{
Capabilities, ColumnCountPacket, ColumnDefinitionPacket, ComStmtExecute, EofPacket,
ErrPacket, OkPacket, ResultRow, StmtExecFlag,
},
query::MySqlDbParameters,
}, params::{IntoQueryParameters, QueryParameters}, row::FromRow, url::Url, Error};
use futures_core::{future::BoxFuture, stream::BoxStream, Future};
use std::pin::Pin;
use std::collections::HashMap;
use std::sync::Arc;
impl Connection {
async fn prepare_cached(&mut self, query: &str) -> crate::Result<u32> {
let conn = &mut self.conn;
Ok(*(self.cache.get_or_compute(query, || conn.prepare_ignore_describe(query)).await?))
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use crate::describe::{Column, Describe};
use crate::executor::Executor;
use crate::mysql::error::MySqlError;
use crate::mysql::protocol::{
Capabilities, ColumnCount, ColumnDefinition, ComQuery, ComSetOption, ComStmtExecute,
ComStmtPrepare, ComStmtPrepareOk, Cursor, Decode, EofPacket, ErrPacket, OkPacket, Row,
SetOption, Type,
};
use crate::mysql::{MySql, MySqlArguments, MySqlConnection, MySqlRow};
enum Step {
Command(u64),
Row(Row),
}
enum OkOrResultSet {
Ok(OkPacket),
ResultSet(ColumnCount),
}
impl MySqlConnection {
async fn ignore_columns(&mut self, count: usize) -> crate::Result<()> {
for _ in 0..count {
let _column = ColumnDefinition::decode(self.receive().await?)?;
}
if count > 0 {
self.receive_eof().await?;
}
Ok(())
}
async fn receive_ok_or_column_count(&mut self) -> crate::Result<OkOrResultSet> {
let packet = self.receive().await?;
match packet[0] {
0xfe if packet.len() < 0xffffff => {
let ok = OkPacket::decode(packet)?;
self.ready = true;
Ok(OkOrResultSet::Ok(ok))
}
0x00 => {
let ok = OkPacket::decode(packet)?;
self.ready = true;
Ok(OkOrResultSet::Ok(ok))
}
0xff => {
let err = ErrPacket::decode(packet)?;
self.ready = true;
Err(MySqlError(err).into())
}
_ => {
let cc = ColumnCount::decode(packet)?;
Ok(OkOrResultSet::ResultSet(cc))
}
}
}
async fn receive_column_types(&mut self, count: usize) -> crate::Result<Box<[Type]>> {
let mut columns: Vec<Type> = Vec::with_capacity(count);
for _ in 0..count {
let packet = self.receive().await?;
let column: ColumnDefinition = ColumnDefinition::decode(packet)?;
columns.push(column.r#type);
}
if count > 0 {
self.receive_eof().await?;
}
Ok(columns.into_boxed_slice())
}
async fn wait_for_ready(&mut self) -> crate::Result<()> {
if !self.ready {
while let Some(_step) = self.step(&[], true).await? {
// Drain steps until we hit the end
}
}
Ok(())
}
async fn prepare(&mut self, query: &str) -> crate::Result<ComStmtPrepareOk> {
// Start by sending a COM_STMT_PREPARE
self.begin_command_phase();
self.write(ComStmtPrepare { query });
self.stream.flush().await?;
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html
// First we should receive a COM_STMT_PREPARE_OK
let packet = self.receive().await?;
if packet[0] == 0xff {
// Oops, there was an error in the prepare command
return Err(MySqlError(ErrPacket::decode(packet)?).into());
}
ComStmtPrepareOk::decode(packet)
}
async fn prepare_with_cache(&mut self, query: &str) -> crate::Result<u32> {
if let Some(&id) = self.statement_cache.get(query) {
Ok(id)
} else {
let prepare_ok = self.prepare(query).await?;
// Remember our statement ID, so we do'd do this again the next time
self.statement_cache
.put(query.to_owned(), prepare_ok.statement_id);
// Ignore input parameters
self.ignore_columns(prepare_ok.params as usize).await?;
// Collect output parameter names
let mut columns = HashMap::with_capacity(prepare_ok.columns as usize);
let mut index = 0_usize;
for _ in 0..prepare_ok.columns {
let column = ColumnDefinition::decode(self.receive().await?)?;
if let Some(name) = column.column_alias.or(column.column) {
columns.insert(name, index);
}
index += 1;
}
if prepare_ok.columns > 0 {
self.receive_eof().await?;
}
// Remember our column map in the statement cache
self.statement_cache
.put_columns(prepare_ok.statement_id, columns);
Ok(prepare_ok.statement_id)
}
}
// [COM_STMT_EXECUTE]
async fn execute_statement(&mut self, id: u32, args: MySqlArguments) -> crate::Result<()> {
self.begin_command_phase();
self.ready = false;
self.write(ComStmtExecute {
cursor: Cursor::NO_CURSOR,
statement_id: id,
params: &args.params,
null_bitmap: &args.null_bitmap,
param_types: &args.param_types,
});
self.stream.flush().await?;
Ok(())
}
async fn step(&mut self, columns: &[Type], binary: bool) -> crate::Result<Option<Step>> {
let capabilities = self.capabilities;
let packet = ret_if_none!(self.try_receive().await?);
match packet[0] {
0xfe if packet.len() < 0xffffff => {
// Resultset row can begin with 0xfe byte (when using text protocol
// with a field length > 0xffffff)
if !capabilities.contains(Capabilities::DEPRECATE_EOF) {
let _eof = EofPacket::decode(packet)?;
self.ready = true;
return Ok(None);
} else {
let ok = OkPacket::decode(packet)?;
self.ready = true;
return Ok(Some(Step::Command(ok.affected_rows)));
}
}
0xff => {
let err = ErrPacket::decode(packet)?;
self.ready = true;
return Err(MySqlError(err).into());
}
_ => {
return Ok(Some(Step::Row(Row::decode(packet, columns, binary)?)));
}
}
}
}
impl Executor for Connection {
type Backend = MySql;
impl MySqlConnection {
async fn send(&mut self, query: &str) -> crate::Result<()> {
self.wait_for_ready().await?;
fn ping(&mut self) -> BoxFuture<crate::Result<()>> {
Box::pin(self.conn.ping())
self.begin_command_phase();
self.ready = false;
// enable multi-statement only for this query
self.write(ComQuery { query });
self.stream.flush().await?;
// COM_QUERY can terminate before the result set with an ERR or OK packet
let num_columns = match self.receive_ok_or_column_count().await? {
OkOrResultSet::Ok(_) => {
return Ok(());
}
OkOrResultSet::ResultSet(cc) => cc.columns as usize,
};
let columns = self.receive_column_types(num_columns as usize).await?;
while let Some(step) = self.step(&columns, false).await? {
// Drop all responses
}
Ok(())
}
async fn execute(&mut self, query: &str, args: MySqlArguments) -> crate::Result<u64> {
self.wait_for_ready().await?;
let statement_id = self.prepare_with_cache(query).await?;
self.execute_statement(statement_id, args).await?;
// COM_STMT_EXECUTE can terminate before the result set with an ERR or OK packet
let num_columns = match self.receive_ok_or_column_count().await? {
OkOrResultSet::Ok(ok) => {
return Ok(ok.affected_rows);
}
OkOrResultSet::ResultSet(cc) => cc.columns as usize,
};
self.ignore_columns(num_columns).await?;
let mut res = 0;
while let Some(step) = self.step(&[], true).await? {
if let Step::Command(affected) = step {
res = affected;
}
}
Ok(res)
}
async fn describe(&mut self, query: &str) -> crate::Result<Describe<MySql>> {
self.wait_for_ready().await?;
let prepare_ok = self.prepare(query).await?;
let mut param_types = Vec::with_capacity(prepare_ok.params as usize);
let mut result_columns = Vec::with_capacity(prepare_ok.columns as usize);
for _ in 0..prepare_ok.params {
let param = ColumnDefinition::decode(self.receive().await?)?;
param_types.push(param.r#type.0);
}
if prepare_ok.params > 0 {
self.receive_eof().await?;
}
for _ in 0..prepare_ok.columns {
let column = ColumnDefinition::decode(self.receive().await?)?;
result_columns.push(Column::<MySql> {
name: column.column_alias.or(column.column),
table_id: column.table_alias.or(column.table),
type_id: column.r#type.0,
_non_exhaustive: (),
});
}
if prepare_ok.columns > 0 {
self.receive_eof().await?;
}
Ok(Describe {
param_types: param_types.into_boxed_slice(),
result_columns: result_columns.into_boxed_slice(),
_non_exhaustive: (),
})
}
fn fetch<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
args: MySqlArguments,
) -> BoxStream<'e, crate::Result<MySqlRow>> {
Box::pin(async_stream::try_stream! {
self.wait_for_ready().await?;
let statement_id = self.prepare_with_cache(query).await?;
let columns = self.statement_cache.get_columns(statement_id);
self.execute_statement(statement_id, args).await?;
// COM_STMT_EXECUTE can terminate before the result set with an ERR or OK packet
let num_columns = match self.receive_ok_or_column_count().await? {
OkOrResultSet::Ok(_) => {
return;
}
OkOrResultSet::ResultSet(cc) => {
cc.columns as usize
}
};
let column_types = self.receive_column_types(num_columns).await?;
while let Some(Step::Row(row)) = self.step(&column_types, true).await? {
yield MySqlRow { row, columns: Arc::clone(&columns) };
}
})
}
}
impl Executor for MySqlConnection {
type Database = super::MySql;
fn send<'e, 'q: 'e>(&'e mut self, query: &'q str) -> BoxFuture<'e, crate::Result<()>> {
Box::pin(self.send(query))
}
fn execute<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: MySqlDbParameters,
args: MySqlArguments,
) -> BoxFuture<'e, crate::Result<u64>> {
Box::pin(async move {
let statement_id = self.prepare_cached(query).await?;
self.conn.send_execute(statement_id, params).await?;
let columns = self.conn.result_column_defs().await?;
let capabilities = self.conn.capabilities;
// For each row in the result set we will receive a ResultRow packet.
// We may receive an [OkPacket], [EofPacket], or [ErrPacket] (depending on if EOFs are enabled) to finalize the iteration.
let mut rows = 0u64;
loop {
let packet = self.conn.receive().await?;
if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF {
// NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows)
// but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF.
if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) {
let _eof = EofPacket::decode(packet)?;
} else {
let _ok = OkPacket::decode(packet, capabilities)?;
}
break;
} else if packet[0] == 0xFF {
let err = ErrPacket::decode(packet)?;
panic!("received db err = {:?}", err);
} else {
// Ignore result rows; exec only returns number of affected rows;
let _ = ResultRow::decode(packet, &columns)?;
// For every row we decode we increment counter
rows = rows + 1;
}
}
Ok(rows)
})
Box::pin(self.execute(query, args))
}
fn fetch<'e, 'q: 'e, T: 'e>(
fn fetch<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
params: MySqlDbParameters,
) -> BoxStream<'e, crate::Result<T>>
where
T: FromRow<Self::Backend> + Send + Unpin,
{
Box::pin(async_stream::try_stream! {
let prepare = self.prepare_cached(query).await?;
self.conn.send_execute(prepare, params).await?;
let columns = self.conn.result_column_defs().await?;
let capabilities = self.conn.capabilities;
loop {
let packet = self.conn.receive().await?;
if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF {
// NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows)
// but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF.
if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) {
let _eof = EofPacket::decode(packet)?;
} else {
let _ok = OkPacket::decode(packet, capabilities)?;
}
break;
} else if packet[0] == 0xFF {
let _err = ErrPacket::decode(packet)?;
panic!("ErrPacket received");
} else {
let row = ResultRow::decode(packet, &columns)?;
yield FromRow::from_row(row);
}
}
})
}
fn fetch_optional<'e, 'q: 'e, T: 'e>(
&'e mut self,
query: &'q str,
params: MySqlDbParameters,
) -> BoxFuture<'e, crate::Result<Option<T>>>
where
T: FromRow<Self::Backend> + Send,
{
Box::pin(async move {
let statement_id = self.prepare_cached(query).await?;
self.conn.send_execute(statement_id, params).await?;
let columns = self.conn.result_column_defs().await?;
let capabilities = self.conn.capabilities;
let mut row = None;
loop {
let packet = self.conn.receive().await?;
if packet[0] == 0xFE && packet.len() < 0xFF_FF_FF {
// NOTE: It's possible for a ResultRow to start with 0xFE (which would normally signify end-of-rows)
// but it's not possible for an Ok/Eof to be larger than 0xFF_FF_FF.
if !capabilities.contains(Capabilities::CLIENT_DEPRECATE_EOF) {
let _eof = EofPacket::decode(packet)?;
} else {
let _ok = OkPacket::decode(packet, capabilities)?;
}
break;
} else if packet[0] == 0xFF {
let _err = ErrPacket::decode(packet)?;
panic!("Received error packet: {:?}", _err);
} else {
row = Some(FromRow::from_row(ResultRow::decode(packet, &columns)?));
}
}
Ok(row)
})
args: MySqlArguments,
) -> BoxStream<'e, crate::Result<MySqlRow>> {
self.fetch(query, args)
}
fn describe<'e, 'q: 'e>(
&'e mut self,
query: &'q str,
) -> BoxFuture<'e, crate::Result<Describe<Self::Backend>>> {
Box::pin(self.conn.prepare_describe(query))
}
fn send<'e, 'q: 'e>(&'e mut self, commands: &'q str) -> BoxFuture<'e, crate::Result<()>> {
Box::pin(self.conn.send_raw(commands))
) -> BoxFuture<'e, crate::Result<Describe<Self::Database>>> {
Box::pin(self.describe(query))
}
}

View File

@ -1,52 +1,35 @@
use crate::io::Buf;
use byteorder::ByteOrder;
use std::io;
use byteorder::ByteOrder;
use crate::io::Buf;
pub trait BufExt {
fn get_uint<T: ByteOrder>(&mut self, n: usize) -> io::Result<u64>;
fn get_uint_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<u64>>;
fn get_str_eof(&mut self) -> io::Result<&str>;
fn get_str_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<&str>>;
fn get_bytes(&mut self, n: usize) -> io::Result<&[u8]>;
fn get_bytes_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<&[u8]>>;
}
impl<'a> BufExt for &'a [u8] {
fn get_uint<T: ByteOrder>(&mut self, n: usize) -> io::Result<u64> {
let val = T::read_uint(*self, n);
self.advance(n);
Ok(val)
}
impl BufExt for &'_ [u8] {
fn get_uint_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<u64>> {
Ok(match self.get_u8()? {
0xFB => None,
0xFC => Some(u64::from(self.get_u16::<T>()?)),
0xFD => Some(u64::from(self.get_u24::<T>()?)),
0xFE => Some(self.get_u64::<T>()?),
// ? 0xFF => panic!("int<lenenc> unprocessable first byte 0xFF"),
value => Some(u64::from(value)),
})
}
fn get_str_eof(&mut self) -> io::Result<&str> {
self.get_str(self.len())
}
fn get_str_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<&str>> {
self.get_uint_lenenc::<T>()?
.map(move |len| self.get_str(len as usize))
.transpose()
}
fn get_bytes(&mut self, n: usize) -> io::Result<&[u8]> {
let buf = &self[..n];
self.advance(n);
Ok(buf)
}
fn get_bytes_lenenc<T: ByteOrder>(&mut self) -> io::Result<Option<&[u8]>> {
self.get_uint_lenenc::<T>()?
.map(move |len| self.get_bytes(len as usize))

View File

@ -1,16 +1,14 @@
use crate::io::BufMut;
use byteorder::ByteOrder;
use std::{u16, u32, u64, u8};
use byteorder::ByteOrder;
use crate::io::BufMut;
pub trait BufMutExt {
fn put_uint_lenenc<T: ByteOrder, U: Into<Option<u64>>>(&mut self, val: U);
fn put_str_lenenc<T: ByteOrder>(&mut self, val: &str);
fn put_str(&mut self, val: &str);
fn put_bytes(&mut self, val: &[u8]);
fn put_bytes_lenenc<T: ByteOrder>(&mut self, val: &[u8]);
}
@ -49,23 +47,11 @@ impl BufMutExt for Vec<u8> {
}
}
#[inline]
fn put_str(&mut self, val: &str) {
self.put_bytes(val.as_bytes());
}
#[inline]
fn put_str_lenenc<T: ByteOrder>(&mut self, val: &str) {
self.put_uint_lenenc::<T, _>(val.len() as u64);
self.extend_from_slice(val.as_bytes());
}
#[inline]
fn put_bytes(&mut self, val: &[u8]) {
self.extend_from_slice(val);
}
#[inline]
fn put_bytes_lenenc<T: ByteOrder>(&mut self, val: &[u8]) {
self.put_uint_lenenc::<T, _>(val.len() as u64);
self.extend_from_slice(val);
@ -74,28 +60,9 @@ impl BufMutExt for Vec<u8> {
#[cfg(test)]
mod tests {
use super::BufMutExt;
use crate::io::BufMut;
use super::{BufMut, BufMutExt};
use byteorder::LittleEndian;
// [X] it_encodes_int_lenenc_u64
// [X] it_encodes_int_lenenc_u32
// [X] it_encodes_int_lenenc_u24
// [X] it_encodes_int_lenenc_u16
// [X] it_encodes_int_lenenc_u8
// [X] it_encodes_int_u64
// [X] it_encodes_int_u32
// [X] it_encodes_int_u24
// [X] it_encodes_int_u16
// [X] it_encodes_int_u8
// [X] it_encodes_string_lenenc
// [X] it_encodes_string_fix
// [X] it_encodes_string_null
// [X] it_encodes_string_eof
// [X] it_encodes_byte_lenenc
// [X] it_encodes_byte_fix
// [X] it_encodes_byte_eof
#[test]
fn it_encodes_int_lenenc_none() {
let mut buf = Vec::with_capacity(1024);

View File

@ -1,5 +1,5 @@
pub mod buf_ext;
pub mod buf_mut_ext;
mod buf_ext;
mod buf_mut_ext;
pub use buf_ext::BufExt;
pub use buf_mut_ext::BufMutExt;

View File

@ -1,36 +1,17 @@
mod backend;
//! **MySQL** database and connection types.
mod arguments;
mod connection;
mod database;
mod error;
mod establish;
mod executor;
mod io;
mod protocol;
mod query;
mod row;
pub mod types;
mod types;
use self::connection::Connection as RawConnection;
use crate::cache::StatementCache;
use futures_core::future::BoxFuture;
use crate::Backend;
/// Backend for MySQL.
pub enum MySql {}
impl MySql {
/// An alias for [Backend::connect()](../trait.Backend.html#method.connect)
pub async fn connect(url: &str) -> crate::Result<Connection> {
<Self as Backend>::connect(url).await
}
}
pub struct Connection {
conn: RawConnection,
cache: StatementCache<u32>,
}
impl crate::Connection for Connection {
fn close(self) -> BoxFuture<'static, crate::Result<()>> {
Box::pin(self.conn.close())
}
}
pub use arguments::MySqlArguments;
pub use connection::MySqlConnection;
pub use database::MySql;
// pub use error::DatabaseError;
pub use row::MySqlRow;

View File

@ -1,38 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{binary::BinaryProtocol, Capabilities, Encode},
},
};
use byteorder::LittleEndian;
/// Closes a previously prepared statement.
#[derive(Debug)]
pub struct ComStmtClose {
statement_id: i32,
}
impl Encode for ComStmtClose {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_CLOSE : int<1>
buf.put_u8(BinaryProtocol::ComStmtClose as u8);
// statement_id : int<4>
buf.put_i32::<LittleEndian>(self.statement_id);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_stmt_close() {
let mut buf = Vec::new();
ComStmtClose { statement_id: 1 }.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x19\x01\0\0\0");
}
}

View File

@ -1,89 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{binary::BinaryProtocol, Capabilities, Encode},
types::MySqlTypeMetadata,
},
};
use byteorder::LittleEndian;
bitflags::bitflags! {
// https://mariadb.com/kb/en/library/com_stmt_execute/#flag
pub struct StmtExecFlag: u8 {
const NO_CURSOR = 0;
const READ_ONLY = 1;
const CURSOR_FOR_UPDATE = 2;
const SCROLLABLE_CURSOR = 4;
}
}
// https://mariadb.com/kb/en/library/com_stmt_execute
/// Executes a previously prepared statement.
#[derive(Debug)]
pub struct ComStmtExecute<'a> {
pub statement_id: u32,
pub flags: StmtExecFlag,
pub params: &'a [u8],
pub null: &'a [u8],
pub param_types: &'a [MySqlTypeMetadata],
}
impl Encode for ComStmtExecute<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_EXECUTE : int<1>
buf.put_u8(BinaryProtocol::ComStmtExec as u8);
// statement id : int<4>
buf.put_u32::<LittleEndian>(self.statement_id);
// flags : int<1>
buf.put_u8(self.flags.bits());
// Iteration count (always 1) : int<4>
buf.put_u32::<LittleEndian>(1);
// if (param_count > 0)
if self.param_types.len() > 0 {
// null bitmap : byte<(param_count + 7)/8>
buf.put_bytes(self.null);
// send type to server (0 / 1) : byte<1>
buf.put_u8(1);
// for each parameter :
for param_type in self.param_types {
// field type : byte<1>
buf.put_u8(param_type.field_type.0);
// parameter flag : byte<1>
buf.put_u8(param_type.param_flag.bits());
}
// for each parameter (i.e param_count times)
// byte<n> binary parameter value
buf.put_bytes(self.params);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_stmt_exec() {
let mut buf = Vec::new();
ComStmtExecute {
statement_id: 1,
flags: StmtExecFlag::NO_CURSOR,
null: &vec![],
params: &vec![],
param_types: &vec![],
}
.encode(&mut buf, Capabilities::empty());
// TODO: Add a regression test
}
}

View File

@ -1,44 +0,0 @@
use crate::{
io::BufMut,
mysql::protocol::{binary::BinaryProtocol, Capabilities, Encode},
};
use byteorder::LittleEndian;
// https://mariadb.com/kb/en/library/com_stmt_fetch/
/// Fetch rows from a prepared statement.
#[derive(Debug)]
pub struct ComStmtFetch {
pub statement_id: u32,
pub rows: u32,
}
impl Encode for ComStmtFetch {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_FETCH : int<1>
buf.put_u8(BinaryProtocol::ComStmtFetch as u8);
// statement id : int<4>
buf.put_u32::<LittleEndian>(self.statement_id);
// number of rows to fetch : int<4>
buf.put_u32::<LittleEndian>(self.rows);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_stmt_fetch() {
let mut buf = Vec::new();
ComStmtFetch {
statement_id: 1,
rows: 10,
}
.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x1C\x01\0\0\0\x0A\0\0\0");
}
}

View File

@ -1,39 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{Capabilities, Encode},
},
};
#[derive(Debug)]
pub struct ComStmtPrepare<'a> {
pub statement: &'a str,
}
impl Encode for ComStmtPrepare<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_PREPARE : int<1>
buf.put_u8(super::BinaryProtocol::ComStmtPrepare as u8);
// SQL Statement : string<EOF>
buf.put_str(&self.statement);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_stmt_prepare() {
let mut buf = Vec::new();
ComStmtPrepare {
statement: "SELECT * FROM users WHERE username = ?",
}
.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], &b"\x16SELECT * FROM users WHERE username = ?"[..]);
}
}

View File

@ -1,81 +0,0 @@
use crate::io::Buf;
use byteorder::LittleEndian;
use std::io;
// https://mariadb.com/kb/en/library/com_stmt_prepare/#com_stmt_prepare_ok
#[derive(Debug)]
pub struct ComStmtPrepareOk {
pub statement_id: u32,
/// Number of columns in the returned result set (or 0 if statement does not return result set).
pub columns: u16,
/// Number of prepared statement parameters ('?' placeholders).
pub params: u16,
/// Number of warnings.
pub warnings: u16,
}
impl ComStmtPrepareOk {
pub(crate) fn decode(mut buf: &[u8]) -> crate::Result<Self> {
let header = buf.get_u8()?;
if header != 0x00 {
return Err(
protocol_err!("expected COM_STMT_PREPARE_OK (0x00); received {}", header).into(),
);
}
let statement_id = buf.get_u32::<LittleEndian>()?;
let columns = buf.get_u16::<LittleEndian>()?;
let params = buf.get_u16::<LittleEndian>()?;
// Skip 1 unused byte
// -not used- : string<1>
buf.advance(1);
let warnings = buf.get_u16::<LittleEndian>()?;
Ok(Self {
statement_id,
columns,
params,
warnings,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_com_stmt_prepare_ok() -> crate::Result<()> {
#[rustfmt::skip]
let buf = &__bytes_builder!(
// int<1> 0x00 COM_STMT_PREPARE_OK header
0u8,
// int<4> statement id
1u8, 0u8, 0u8, 0u8,
// int<2> number of columns in the returned result set (or 0 if statement does not return result set)
10u8, 0u8,
// int<2> number of prepared statement parameters ('?' placeholders)
1u8, 0u8,
// string<1> -not used-
0u8,
// int<2> number of warnings
0u8, 0u8
)[..];
let message = ComStmtPrepareOk::decode(&buf)?;
assert_eq!(message.statement_id, 1);
assert_eq!(message.columns, 10);
assert_eq!(message.params, 1);
assert_eq!(message.warnings, 0);
Ok(())
}
}

View File

@ -1,34 +0,0 @@
use crate::{
io::BufMut,
mysql::protocol::{binary::BinaryProtocol, Capabilities, Encode},
};
use byteorder::LittleEndian;
#[derive(Debug)]
pub struct ComStmtReset {
pub statement_id: u32,
}
impl Encode for ComStmtReset {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_RESET : int<1>
buf.put_u8(BinaryProtocol::ComStmtReset as u8);
// statement_id : int<4>
buf.put_u32::<LittleEndian>(self.statement_id);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_stmt_reset() {
let mut buf = Vec::new();
ComStmtReset { statement_id: 1 }.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x1A\x01\0\0\0");
}
}

View File

@ -1,21 +0,0 @@
pub mod com_stmt_close;
pub mod com_stmt_exec;
pub mod com_stmt_fetch;
pub mod com_stmt_prepare;
pub mod com_stmt_prepare_ok;
pub mod com_stmt_reset;
pub use com_stmt_close::ComStmtClose;
pub use com_stmt_exec::{ComStmtExecute, StmtExecFlag};
pub use com_stmt_fetch::ComStmtFetch;
pub use com_stmt_prepare::ComStmtPrepare;
pub use com_stmt_prepare_ok::ComStmtPrepareOk;
pub use com_stmt_reset::ComStmtReset;
pub enum BinaryProtocol {
ComStmtPrepare = 0x16,
ComStmtExec = 0x17,
ComStmtClose = 0x19,
ComStmtReset = 0x1A,
ComStmtFetch = 0x1C,
}

View File

@ -1,65 +1,86 @@
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/group__group__cs__capabilities__flags.html
// https://mariadb.com/kb/en/library/connection/#capabilities
bitflags::bitflags! {
pub struct Capabilities: u128 {
const CLIENT_MYSQL = 1;
pub struct Capabilities: u64 {
// [MariaDB] MySQL compatibility
const MYSQL = 1;
// [*] Send found rows instead of affected rows in EOF_Packet.
const FOUND_ROWS = 2;
// One can specify db on connect
// Get all column flags.
const LONG_FLAG = 4;
// [*] Database (schema) name can be specified on connect in Handshake Response Packet.
const CONNECT_WITH_DB = 8;
// Can use compression protocol
// Don't allow database.table.column
const NO_SCHEMA = 16;
// [*] Compression protocol supported
const COMPRESS = 32;
// Special handling of ODBC behavior.
const ODBC = 64;
// Can use LOAD DATA LOCAL
const LOCAL_FILES = 128;
// Ignore spaces before '('
// [*] Ignore spaces before '('
const IGNORE_SPACE = 256;
// 4.1+ protocol
const CLIENT_PROTOCOL_41 = 1 << 9;
// [*] New 4.1+ protocol
const PROTOCOL_41 = 512;
const CLIENT_INTERACTIVE = 1 << 10;
// This is an interactive client
const INTERACTIVE = 1024;
// Can use SSL
const SSL = 1 << 11;
// Use SSL encryption for this session
const SSL = 2048;
const TRANSACTIONS = 1 << 12;
// Client knows about transactions
const TRANSACTIONS = 8192;
// 4.1+ authentication
const SECURE_CONNECTION = 1 << 13;
const SECURE_CONNECTION = (1 << 13);
// Enable/disable multi-stmt support
const MULTI_STATEMENTS = 1 << 16;
// Enable/disable multi-statement support for COM_QUERY *and* COM_STMT_PREPARE
const MULTI_STATEMENTS = (1 << 16);
// Enable/disable multi-results
const MULTI_RESULTS = 1 << 17;
// Enable/disable multi-results for COM_QUERY
const MULTI_RESULTS = (1 << 17);
// Enable/disable multi-results for PrepareStatement
const PS_MULTI_RESULTS = 1 << 18;
// Enable/disable multi-results for COM_STMT_PREPARE
const PS_MULTI_RESULTS = (1 << 18);
// Client supports plugin authentication
const PLUGIN_AUTH = 1 << 19;
const PLUGIN_AUTH = (1 << 19);
// Client send connection attributes
const CONNECT_ATTRS = 1 << 20;
// Client supports connection attributes
const CONNECT_ATTRS = (1 << 20);
// Enable authentication response packet to be larger than 255 bytes
const PLUGIN_AUTH_LENENC_CLIENT_DATA = 1 << 21;
// Enable authentication response packet to be larger than 255 bytes.
const PLUGIN_AUTH_LENENC_DATA = (1 << 21);
// Enable/disable session tracking in OK_Packet
const CLIENT_SESSION_TRACK = 1 << 23;
// Don't close the connection for a user account with expired password.
const CAN_HANDLE_EXPIRED_PASSWORDS = (1 << 22);
// EOF_Packet deprecation
const CLIENT_DEPRECATE_EOF = 1 << 24;
// Capable of handling server state change information.
const SESSION_TRACK = (1 << 23);
// Client support progress indicator (since 10.2)
const MARIA_DB_CLIENT_PROGRESS = 1 << 32;
// Client no longer needs EOF_Packet and will use OK_Packet instead.
const DEPRECATE_EOF = (1 << 24);
// Permit COM_MULTI protocol
const MARIA_DB_CLIENT_COM_MULTI = 1 << 33;
// Support ZSTD protocol compression
const ZSTD_COMPRESSION_ALGORITHM = (1 << 26);
// Permit bulk insert
const MARIA_CLIENT_STMT_BULK_OPERATIONS = 1 << 34;
// Verify server certificate
const SSL_VERIFY_SERVER_CERT = (1 << 30);
// The client can handle optional metadata information in the resultset
const OPTIONAL_RESULTSET_METADATA = (1 << 25);
// Don't reset the options after an unsuccessful connect
const REMEMBER_OPTIONS = (1 << 31);
}
}

View File

@ -0,0 +1,18 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::Decode;
#[derive(Debug)]
pub struct ColumnCount {
pub columns: u64,
}
impl Decode for ColumnCount {
fn decode(mut buf: &[u8]) -> crate::Result<Self> {
let columns = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0);
Ok(Self { columns })
}
}

View File

@ -0,0 +1,77 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::{Decode, FieldFlags, Type};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_query_response_text_resultset_column_definition.html
// https://mariadb.com/kb/en/resultset/#column-definition-packet
#[derive(Debug)]
pub struct ColumnDefinition {
pub schema: Option<Box<str>>,
pub table_alias: Option<Box<str>>,
pub table: Option<Box<str>>,
pub column_alias: Option<Box<str>>,
pub column: Option<Box<str>>,
pub char_set: u16,
pub max_size: u32,
pub r#type: Type,
pub flags: FieldFlags,
pub decimals: u8,
}
impl Decode for ColumnDefinition {
fn decode(mut buf: &[u8]) -> crate::Result<Self> {
// catalog : string<lenenc>
let catalog = buf.get_str_lenenc::<LittleEndian>()?;
if catalog != Some("def") {
return Err(protocol_err!(
"expected ColumnDefinition (\"def\"); received {:?}",
catalog
))?;
}
let schema = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
let table_alias = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
let table = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
let column_alias = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
let column = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
let len_fixed_fields = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0);
if len_fixed_fields != 0x0c {
return Err(protocol_err!(
"expected ColumnDefinition (0x0c); received {:?}",
len_fixed_fields
))?;
}
let char_set = buf.get_u16::<LittleEndian>()?;
let max_size = buf.get_u32::<LittleEndian>()?;
let r#type = buf.get_u8()?;
let flags = buf.get_u16::<LittleEndian>()?;
let decimals = buf.get_u8()?;
Ok(Self {
schema,
table,
table_alias,
column,
column_alias,
char_set,
max_size,
r#type: Type(r#type),
flags: FieldFlags::from_bits_truncate(flags),
decimals,
})
}
}

View File

@ -0,0 +1,21 @@
use byteorder::LittleEndian;
use crate::io::BufMut;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::{Capabilities, Encode};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_query.html
#[derive(Debug)]
pub struct ComQuery<'a> {
pub query: &'a str,
}
impl Encode for ComQuery<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_QUERY : int<1>
buf.put_u8(0x03);
// query : string<EOF>
buf.put_str(self.query);
}
}

View File

@ -0,0 +1,29 @@
use byteorder::LittleEndian;
use crate::io::BufMut;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::{Capabilities, Encode};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a53f60000da139fc7d547db96635a2c02
#[derive(Debug, Copy, Clone)]
#[repr(u16)]
pub enum SetOption {
MultiStatementsOn = 0x00,
MultiStatementsOff = 0x01,
}
// https://dev.mysql.com/doc/internals/en/com-set-option.html
#[derive(Debug)]
pub struct ComSetOption {
pub option: SetOption,
}
impl Encode for ComSetOption {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_SET_OPTION : int<1>
buf.put_u8(0x1a);
// option : int<2>
buf.put_u16::<LittleEndian>(self.option as u16);
}
}

View File

@ -0,0 +1,62 @@
use byteorder::LittleEndian;
use crate::io::BufMut;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::{Capabilities, Encode};
use crate::mysql::types::MySqlTypeMetadata;
bitflags::bitflags! {
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a3e5e9e744ff6f7b989a604fd669977da
// https://mariadb.com/kb/en/library/com_stmt_execute/#flag
pub struct Cursor: u8 {
const NO_CURSOR = 0;
const READ_ONLY = 1;
const FOR_UPDATE = 2;
const SCROLLABLE = 4;
}
}
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_execute.html
#[derive(Debug)]
pub struct ComStmtExecute<'a> {
pub statement_id: u32,
pub cursor: Cursor,
pub params: &'a [u8],
pub null_bitmap: &'a [u8],
pub param_types: &'a [MySqlTypeMetadata],
}
impl Encode for ComStmtExecute<'_> {
fn encode(&self, buf: &mut Vec<u8>, capabilities: Capabilities) {
// COM_STMT_EXECUTE : int<1>
buf.put_u8(0x17);
// statement_id : int<4>
buf.put_u32::<LittleEndian>(self.statement_id);
// cursor : int<1>
buf.put_u8(self.cursor.bits());
// iterations (always 1) : int<4>
buf.put_u32::<LittleEndian>(1);
if self.param_types.len() > 0 {
// null bitmap : byte<(param_count + 7)/8>
buf.put_bytes(self.null_bitmap);
// send type to server (0 / 1) : byte<1>
buf.put_u8(1);
for ty in self.param_types {
// field type : byte<1>
buf.put_u8(ty.r#type.0);
// parameter flag : byte<1>
buf.put_u8(ty.flag);
}
// byte<n> binary parameter value
buf.put_bytes(self.params);
}
}
}

View File

@ -0,0 +1,21 @@
use byteorder::LittleEndian;
use crate::io::BufMut;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::{Capabilities, Encode};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html
#[derive(Debug)]
pub struct ComStmtPrepare<'a> {
pub query: &'a str,
}
impl Encode for ComStmtPrepare<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STMT_PREPARE : int<1>
buf.put_u8(0x16);
// query : string<EOF>
buf.put_str(self.query);
}
}

View File

@ -0,0 +1,49 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::Decode;
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_com_stmt_prepare.html#sect_protocol_com_stmt_prepare_response_ok
#[derive(Debug)]
pub struct ComStmtPrepareOk {
pub statement_id: u32,
/// Number of columns in the returned result set (or 0 if statement does not return result set).
pub columns: u16,
/// Number of prepared statement parameters ('?' placeholders).
pub params: u16,
/// Number of warnings.
pub warnings: u16,
}
impl Decode for ComStmtPrepareOk {
fn decode(mut buf: &[u8]) -> crate::Result<Self> {
let header = buf.get_u8()?;
if header != 0x00 {
return Err(protocol_err!(
"expected COM_STMT_PREPARE_OK (0x00); received 0x{:X}",
header
))?;
}
let statement_id = buf.get_u32::<LittleEndian>()?;
let columns = buf.get_u16::<LittleEndian>()?;
let params = buf.get_u16::<LittleEndian>()?;
// -not used- : string<1>
buf.advance(1);
let warnings = buf.get_u16::<LittleEndian>()?;
Ok(Self {
statement_id,
columns,
params,
warnings,
})
}
}

View File

@ -1,21 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{Capabilities, Encode},
},
};
#[derive(Default, Debug)]
pub struct AuthenticationSwitchRequest<'a> {
pub auth_plugin_name: &'a str,
pub auth_plugin_data: &'a [u8],
}
impl Encode for AuthenticationSwitchRequest<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
buf.put_u8(0xFE);
buf.put_str_nul(&self.auth_plugin_name);
buf.put_bytes(&self.auth_plugin_data);
}
}

View File

@ -1,164 +0,0 @@
use crate::{
io::Buf,
mysql::{
io::BufExt,
protocol::{Capabilities, ServerStatusFlag},
},
};
use byteorder::LittleEndian;
use std::io;
#[derive(Debug)]
pub struct InitialHandshakePacket {
pub protocol_version: u8,
pub server_version: String,
pub server_status: ServerStatusFlag,
pub server_default_collation: u8,
pub connection_id: u32,
pub scramble: Box<[u8]>,
pub capabilities: Capabilities,
pub auth_plugin_name: Option<String>,
}
impl InitialHandshakePacket {
pub(crate) fn decode(mut buf: &[u8]) -> io::Result<Self> {
let protocol_version = buf.get_u8()?;
let server_version = buf.get_str_nul()?.to_owned();
let connection_id = buf.get_u32::<LittleEndian>()?;
let mut scramble = Vec::with_capacity(8);
// scramble 1st part (authentication seed) : string<8>
scramble.extend_from_slice(&buf[..8]);
buf.advance(8);
// reserved : string<1>
buf.advance(1);
// server capabilities (1st part) : int<2>
let capabilities_1 = buf.get_u16::<LittleEndian>()?;
let mut capabilities = Capabilities::from_bits_truncate(capabilities_1.into());
// server default collation : int<1>
let server_default_collation = buf.get_u8()?;
// status flags : int<2>
let server_status = buf.get_u16::<LittleEndian>()?;
// server capabilities (2nd part) : int<2>
let capabilities_2 = buf.get_u16::<LittleEndian>()?;
capabilities |= Capabilities::from_bits_truncate(((capabilities_2 as u32) << 16).into());
// if (server_capabilities & PLUGIN_AUTH)
let plugin_data_length = if capabilities.contains(Capabilities::PLUGIN_AUTH) {
// plugin data length : int<1>
buf.get_u8()?
} else {
// 0x00 : int<1>
buf.advance(0);
0
};
// filler : string<6>
buf.advance(6);
// if (server_capabilities & CLIENT_MYSQL)
if capabilities.contains(Capabilities::CLIENT_MYSQL) {
// filler : string<4>
buf.advance(4);
} else {
// server capabilities 3rd part . Mysql specific flags : int<4>
let capabilities_3 = buf.get_u32::<LittleEndian>()?;
capabilities |= Capabilities::from_bits_truncate((capabilities_2 as u128) << 32);
}
// if (server_capabilities & CLIENT_SECURE_CONNECTION)
if capabilities.contains(Capabilities::SECURE_CONNECTION) {
// scramble 2nd part . Length = max(12, plugin data length - 9) : string<N>
let len = ((plugin_data_length as isize) - 9).max(12) as usize;
scramble.extend_from_slice(&buf[..len]);
buf.advance(len);
// reserved byte : string<1>
buf.advance(1);
}
// if (server_capabilities & PLUGIN_AUTH)
let auth_plugin_name = if capabilities.contains(Capabilities::PLUGIN_AUTH) {
Some(buf.get_str_nul()?.to_owned())
} else {
None
};
Ok(Self {
protocol_version,
server_version,
server_default_collation,
server_status: ServerStatusFlag::from_bits_truncate(server_status),
connection_id,
scramble: scramble.into_boxed_slice(),
capabilities,
auth_plugin_name,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_initial_handshake_packet() -> io::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// int<3> length
1u8, 0u8, 0u8,
// int<1> seq_no
0u8,
//int<1> protocol version
10u8,
//string<NUL> server version (Mysql server version is by default prefixed by "5.5.5-")
b"5.5.5-10.4.6-Mysql-1:10.4.6+maria~bionic\0",
//int<4> connection id
13u8, 0u8, 0u8, 0u8,
//string<8> scramble 1st part (authentication seed)
b"?~~|vZAu",
//string<1> reserved byte
0u8,
//int<2> server capabilities (1st part)
0xFEu8, 0xF7u8,
//int<1> server default collation
8u8,
//int<2> status flags
2u8, 0u8,
//int<2> server capabilities (2nd part)
0xFF_u8, 0x81_u8,
//if (server_capabilities & PLUGIN_AUTH)
// int<1> plugin data length
15u8,
//else
// int<1> 0x00
//string<6> filler
0u8, 0u8, 0u8, 0u8, 0u8, 0u8,
//if (server_capabilities & CLIENT_MYSQL)
// string<4> filler
//else
// int<4> server capabilities 3rd part . Mysql specific flags /* Mysql 10.2 or later */
7u8, 0u8, 0u8, 0u8,
//if (server_capabilities & CLIENT_SECURE_CONNECTION)
// string<n> scramble 2nd part . Length = max(12, plugin data length - 9)
b"JQ8cihP4Q}Dx",
// string<1> reserved byte
0u8,
//if (server_capabilities & PLUGIN_AUTH)
// string<NUL> authentication plugin name
b"mysql_native_password\0"
);
let _message = InitialHandshakePacket::decode(&buf)?;
Ok(())
}
}

View File

@ -1,9 +0,0 @@
mod auth_switch_request;
mod initial;
mod response;
mod ssl_request;
pub use auth_switch_request::AuthenticationSwitchRequest;
pub use initial::InitialHandshakePacket;
pub use response::HandshakeResponsePacket;
pub use ssl_request::SslRequest;

View File

@ -1,86 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{Capabilities, Encode},
},
};
use byteorder::LittleEndian;
#[derive(Debug)]
pub struct HandshakeResponsePacket<'a> {
pub capabilities: Capabilities,
pub max_packet_size: u32,
pub client_collation: u8,
pub username: &'a str,
pub database: &'a str,
pub auth_data: Option<&'a [u8]>,
pub auth_plugin_name: Option<&'a str>,
pub connection_attrs: &'a [(&'a str, &'a str)],
}
impl<'a> Encode for HandshakeResponsePacket<'a> {
fn encode(&self, buf: &mut Vec<u8>, capabilities: Capabilities) {
// client capabilities : int<4>
buf.put_u32::<LittleEndian>(self.capabilities.bits() as u32);
// max packet size : int<4>
buf.put_u32::<LittleEndian>(self.max_packet_size);
// client character collation : int<1>
buf.put_u8(self.client_collation);
// reserved : string<19>
buf.advance(19);
// if not (capabilities & CLIENT_MYSQL)
if !capabilities.contains(Capabilities::CLIENT_MYSQL) {
// extended client capabilities : int<4>
buf.put_u32::<LittleEndian>((self.capabilities.bits() >> 32) as u32);
} else {
// reserved : int<4>
buf.advance(4);
}
// username : string<NUL>
buf.put_str_nul(self.username);
// if (capabilities & PLUGIN_AUTH_LENENC_CLIENT_DATA)
let auth_data = self.auth_data.unwrap_or_default();
if capabilities.contains(Capabilities::PLUGIN_AUTH_LENENC_CLIENT_DATA) {
// authentication data : string<lenenc>
buf.put_bytes_lenenc::<LittleEndian>(auth_data);
} else if capabilities.contains(Capabilities::SECURE_CONNECTION) {
// length of authentication response : int<1>
// authentication response (length is indicated by previous field) : string<fix>
buf.put_u8(auth_data.len() as u8);
buf.put_bytes(auth_data);
} else {
// 0x00 : int<1>
buf.put_u8(0);
}
// if (capabilities & CLIENT_CONNECT_WITH_DB)
if capabilities.contains(Capabilities::CONNECT_WITH_DB) {
// default database name : string<NUL>
buf.put_str_nul(self.database);
}
// if (capabilities & CLIENT_PLUGIN_AUTH)
if capabilities.contains(Capabilities::PLUGIN_AUTH) {
// authentication plugin name : string<NUL>
buf.put_str_nul(self.auth_plugin_name.unwrap_or_default());
}
// if (capabilities & CLIENT_CONNECT_ATTRS)
if capabilities.contains(Capabilities::CONNECT_ATTRS) {
// size of connection attributes : int<lenenc>
buf.put_uint_lenenc::<LittleEndian, _>(self.connection_attrs.len() as u64);
for (key, value) in self.connection_attrs {
buf.put_str_lenenc::<LittleEndian>(key);
buf.put_str_lenenc::<LittleEndian>(value);
}
}
}
}

View File

@ -1,40 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{Capabilities, Encode},
},
};
use byteorder::LittleEndian;
#[derive(Debug)]
pub struct SslRequest {
pub capabilities: Capabilities,
pub max_packet_size: u32,
pub client_collation: u8,
}
impl Encode for SslRequest {
fn encode(&self, buf: &mut Vec<u8>, capabilities: Capabilities) {
// client capabilities : int<4>
buf.put_u32::<LittleEndian>(self.capabilities.bits() as u32);
// max packet size : int<4>
buf.put_u32::<LittleEndian>(self.max_packet_size);
// client character collation : int<1>
buf.put_u8(self.client_collation);
// reserved : string<19>
buf.advance(19);
// if not (capabilities & CLIENT_MYSQL)
if !capabilities.contains(Capabilities::CLIENT_MYSQL) {
// extended client capabilities : int<4>
buf.put_u32::<LittleEndian>((self.capabilities.bits() >> 32) as u32);
} else {
// reserved : int<4>
buf.advance(4);
}
}
}

View File

@ -0,0 +1,7 @@
use std::io;
pub trait Decode {
fn decode(buf: &[u8]) -> crate::Result<Self>
where
Self: Sized;
}

View File

@ -1,4 +1,4 @@
use super::Capabilities;
use crate::mysql::protocol::Capabilities;
pub trait Encode {
fn encode(&self, buf: &mut Vec<u8>, capabilities: Capabilities);

View File

@ -0,0 +1,52 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::{Capabilities, Decode, Status};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_eof_packet.html
// https://mariadb.com/kb/en/eof_packet/
#[derive(Debug)]
pub struct EofPacket {
warnings: u16,
status: Status,
}
impl Decode for EofPacket {
fn decode(mut buf: &[u8]) -> crate::Result<Self>
where
Self: Sized,
{
let header = buf.get_u8()?;
if header != 0xFE {
return Err(protocol_err!(
"expected EOF (0xFE); received 0x{:X}",
header
))?;
}
let warnings = buf.get_u16::<LittleEndian>()?;
let status = buf.get_u16::<LittleEndian>()?;
Ok(Self {
warnings,
status: Status::from_bits_truncate(status),
})
}
}
//#[cfg(test)]
//mod tests {
// use super::{Capabilities, Decode, ErrPacket, Status};
//
// const ERR_HANDSHAKE_UNKNOWN_DB: &[u8] = b"\xff\x19\x04#42000Unknown database \'unknown\'";
//
// #[test]
// fn it_decodes_ok_handshake() {
// let mut p = ErrPacket::decode(ERR_HANDSHAKE_UNKNOWN_DB).unwrap();
//
// assert_eq!(p.error_code, 1049);
// assert_eq!(&*p.sql_state, "42000");
// assert_eq!(&*p.error_message, "Unknown database \'unknown\'");
// }
//}

View File

@ -0,0 +1,55 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::{Capabilities, Decode, Status};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_err_packet.html
// https://mariadb.com/kb/en/err_packet/
#[derive(Debug)]
pub struct ErrPacket {
pub error_code: u16,
pub sql_state: Box<str>,
pub error_message: Box<str>,
}
impl Decode for ErrPacket {
fn decode(mut buf: &[u8]) -> crate::Result<Self>
where
Self: Sized,
{
let header = buf.get_u8()?;
if header != 0xFF {
return Err(protocol_err!("expected 0xFF; received 0x{:X}", header))?;
}
let error_code = buf.get_u16::<LittleEndian>()?;
let _sql_state_marker: u8 = buf.get_u8()?;
let sql_state = buf.get_str(5)?.into();
let error_message = buf.get_str(buf.len())?.into();
Ok(Self {
error_code,
sql_state,
error_message,
})
}
}
#[cfg(test)]
mod tests {
use super::{Capabilities, Decode, ErrPacket, Status};
const ERR_HANDSHAKE_UNKNOWN_DB: &[u8] = b"\xff\x19\x04#42000Unknown database \'unknown\'";
#[test]
fn it_decodes_ok_handshake() {
let mut p = ErrPacket::decode(ERR_HANDSHAKE_UNKNOWN_DB).unwrap();
assert_eq!(p.error_code, 1049);
assert_eq!(&*p.sql_state, "42000");
assert_eq!(&*p.error_message, "Unknown database \'unknown\'");
}
}

View File

@ -1,997 +0,0 @@
use std::fmt;
#[derive(Default, Debug)]
pub struct ErrorCode(pub(crate) u16);
use crate::error::DatabaseError;
macro_rules! error_code_impl {
($(const $name:ident: ErrorCode = ErrorCode($code:expr));*;) => {
impl ErrorCode {
$(const $name: ErrorCode = ErrorCode($code);)*
pub fn code_name(&self) -> &'static str {
match self.0 {
$($code => stringify!($name),)*
_ => "<unknown error>"
}
}
}
}
}
impl fmt::Display for ErrorCode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} ({})", self.code_name(), self.0)
}
}
// Values from https://mariadb.com/kb/en/library/mysql-error-codes/
error_code_impl! {
const ER_ABORTING_CONNECTION: ErrorCode = ErrorCode(1152);
const ER_ACCESS_DENIED_CHANGE_USER_ERROR: ErrorCode = ErrorCode(1873);
const ER_ACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1045);
const ER_ACCESS_DENIED_NO_PASSWORD_ERROR: ErrorCode = ErrorCode(1698);
const ER_ADD_PARTITION_NO_NEW_PARTITION: ErrorCode = ErrorCode(1514);
const ER_ADD_PARTITION_SUBPART_ERROR: ErrorCode = ErrorCode(1513);
const ER_ADMIN_WRONG_MRG_TABLE: ErrorCode = ErrorCode(1472);
const ER_AES_INVALID_IV: ErrorCode = ErrorCode(1882);
const ER_ALTER_FILEGROUP_FAILED: ErrorCode = ErrorCode(1533);
const ER_ALTER_INF: ErrorCode = ErrorCode(1088);
const ER_ALTER_OPERATION_NOT_SUPPORTED: ErrorCode = ErrorCode(1845);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: ErrorCode = ErrorCode(1846);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC: ErrorCode = ErrorCode(1854);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS: ErrorCode = ErrorCode(1856);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE: ErrorCode = ErrorCode(1850);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY: ErrorCode = ErrorCode(1847);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK: ErrorCode = ErrorCode(1851);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME: ErrorCode = ErrorCode(1849);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS: ErrorCode = ErrorCode(1857);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS: ErrorCode = ErrorCode(1855);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE: ErrorCode = ErrorCode(1852);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK: ErrorCode = ErrorCode(1853);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL: ErrorCode = ErrorCode(1861);
const ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION: ErrorCode = ErrorCode(1848);
const ER_AMBIGUOUS_FIELD_TERM: ErrorCode = ErrorCode(1475);
const ER_AUTOINC_READ_FAILED: ErrorCode = ErrorCode(1467);
const ER_AUTO_CONVERT: ErrorCode = ErrorCode(1246);
const ER_AUTO_INCREMENT_CONFLICT: ErrorCode = ErrorCode(1869);
const ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON: ErrorCode = ErrorCode(1777);
const ER_BAD_BASE64_DATA: ErrorCode = ErrorCode(1958);
const ER_BAD_DATA: ErrorCode = ErrorCode(1918);
const ER_BAD_DB_ERROR: ErrorCode = ErrorCode(1049);
const ER_BAD_FIELD_ERROR: ErrorCode = ErrorCode(1054);
const ER_BAD_FT_COLUMN: ErrorCode = ErrorCode(1283);
const ER_BAD_HOST_ERROR: ErrorCode = ErrorCode(1042);
const ER_BAD_LOG_STATEMENT: ErrorCode = ErrorCode(1580);
const ER_BAD_NULL_ERROR: ErrorCode = ErrorCode(1048);
const ER_BAD_OPTION_VALUE: ErrorCode = ErrorCode(1912);
const ER_BAD_SLAVE: ErrorCode = ErrorCode(1200);
const ER_BAD_SLAVE_AUTO_POSITION: ErrorCode = ErrorCode(1776);
const ER_BAD_SLAVE_UNTIL_COND: ErrorCode = ErrorCode(1277);
const ER_BAD_TABLE_ERROR: ErrorCode = ErrorCode(1051);
const ER_BASE64_DECODE_ERROR: ErrorCode = ErrorCode(1575);
const ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX: ErrorCode = ErrorCode(1738);
const ER_BINLOG_CREATE_ROUTINE_NEED_SUPER: ErrorCode = ErrorCode(1419);
const ER_BINLOG_LOGGING_IMPOSSIBLE: ErrorCode = ErrorCode(1598);
const ER_BINLOG_LOGICAL_CORRUPTION: ErrorCode = ErrorCode(1866);
const ER_BINLOG_MULTIPLE_ENGINES: ErrorCode = ErrorCode(1667);
const ER_BINLOG_MUST_BE_EMPTY: ErrorCode = ErrorCode(1956);
const ER_BINLOG_PURGE_EMFILE: ErrorCode = ErrorCode(1587);
const ER_BINLOG_PURGE_FATAL_ERR: ErrorCode = ErrorCode(1377);
const ER_BINLOG_PURGE_PROHIBITED: ErrorCode = ErrorCode(1375);
const ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE: ErrorCode = ErrorCode(1744);
const ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1661);
const ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE: ErrorCode = ErrorCode(1664);
const ER_BINLOG_ROW_INJECTION_AND_STMT_MODE: ErrorCode = ErrorCode(1666);
const ER_BINLOG_ROW_LOGGING_FAILED: ErrorCode = ErrorCode(1534);
const ER_BINLOG_ROW_MODE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1662);
const ER_BINLOG_ROW_RBR_TO_SBR: ErrorCode = ErrorCode(1536);
const ER_BINLOG_ROW_WRONG_TABLE_DEF: ErrorCode = ErrorCode(1535);
const ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX: ErrorCode = ErrorCode(1745);
const ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES: ErrorCode = ErrorCode(1844);
const ER_BINLOG_STMT_MODE_AND_ROW_ENGINE: ErrorCode = ErrorCode(1665);
const ER_BINLOG_UNSAFE_AND_STMT_ENGINE: ErrorCode = ErrorCode(1663);
const ER_BINLOG_UNSAFE_AUTOINC_COLUMNS: ErrorCode = ErrorCode(1671);
const ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST: ErrorCode = ErrorCode(1727);
const ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT: ErrorCode = ErrorCode(1717);
const ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT: ErrorCode = ErrorCode(1718);
const ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC: ErrorCode = ErrorCode(1723);
const ER_BINLOG_UNSAFE_INSERT_DELAYED: ErrorCode = ErrorCode(1669);
const ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT: ErrorCode = ErrorCode(1714);
const ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE: ErrorCode = ErrorCode(1715);
const ER_BINLOG_UNSAFE_INSERT_TWO_KEYS: ErrorCode = ErrorCode(1724);
const ER_BINLOG_UNSAFE_LIMIT: ErrorCode = ErrorCode(1668);
const ER_BINLOG_UNSAFE_MIXED_STATEMENT: ErrorCode = ErrorCode(1693);
const ER_BINLOG_UNSAFE_MULTIPLE_ENGINES: ErrorCode = ErrorCode(1692);
const ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS: ErrorCode = ErrorCode(1675);
const ER_BINLOG_UNSAFE_REPLACE_SELECT: ErrorCode = ErrorCode(1716);
const ER_BINLOG_UNSAFE_ROUTINE: ErrorCode = ErrorCode(1418);
const ER_BINLOG_UNSAFE_STATEMENT: ErrorCode = ErrorCode(1592);
const ER_BINLOG_UNSAFE_SYSTEM_FUNCTION: ErrorCode = ErrorCode(1674);
const ER_BINLOG_UNSAFE_SYSTEM_TABLE: ErrorCode = ErrorCode(1670);
const ER_BINLOG_UNSAFE_SYSTEM_VARIABLE: ErrorCode = ErrorCode(1673);
const ER_BINLOG_UNSAFE_UDF: ErrorCode = ErrorCode(1672);
const ER_BINLOG_UNSAFE_UPDATE_IGNORE: ErrorCode = ErrorCode(1719);
const ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT: ErrorCode = ErrorCode(1722);
const ER_BLOBS_AND_NO_TERMINATED: ErrorCode = ErrorCode(1084);
const ER_BLOB_CANT_HAVE_DEFAULT: ErrorCode = ErrorCode(1101);
const ER_BLOB_FIELD_IN_PART_FUNC_ERROR: ErrorCode = ErrorCode(1502);
const ER_BLOB_KEY_WITHOUT_LENGTH: ErrorCode = ErrorCode(1170);
const ER_BLOB_USED_AS_KEY: ErrorCode = ErrorCode(1073);
const ER_CANNOT_ADD_FOREIGN: ErrorCode = ErrorCode(1215);
const ER_CANNOT_CONVERT_CHARACTER: ErrorCode = ErrorCode(1977);
const ER_CANNOT_GRANT_ROLE: ErrorCode = ErrorCode(1961);
const ER_CANNOT_LOAD_FROM_TABLE: ErrorCode = ErrorCode(1548);
const ER_CANNOT_LOAD_FROM_TABLE_V2: ErrorCode = ErrorCode(1728);
const ER_CANNOT_LOAD_SLAVE_GTID_STATE: ErrorCode = ErrorCode(1946);
const ER_CANNOT_REVOKE_ROLE: ErrorCode = ErrorCode(1962);
const ER_CANNOT_UPDATE_GTID_STATE: ErrorCode = ErrorCode(1942);
const ER_CANNOT_USER: ErrorCode = ErrorCode(1396);
const ER_CANT_ACTIVATE_LOG: ErrorCode = ErrorCode(1573);
const ER_CANT_AGGREGATE2_COLLATIONS: ErrorCode = ErrorCode(1267);
const ER_CANT_AGGREGATE3_COLLATIONS: ErrorCode = ErrorCode(1270);
const ER_CANT_AGGREGATE_NCOLLATIONS: ErrorCode = ErrorCode(1271);
const ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL: ErrorCode =
ErrorCode(1768);
const ER_CANT_CHANGE_TX_ISOLATION: ErrorCode = ErrorCode(1568);
const ER_CANT_CREATE_DB: ErrorCode = ErrorCode(1006);
const ER_CANT_CREATE_FEDERATED_TABLE: ErrorCode = ErrorCode(1434);
const ER_CANT_CREATE_FILE: ErrorCode = ErrorCode(1004);
const ER_CANT_CREATE_GEOMETRY_OBJECT: ErrorCode = ErrorCode(1416);
const ER_CANT_CREATE_HANDLER_FILE: ErrorCode = ErrorCode(1501);
const ER_CANT_CREATE_SROUTINE: ErrorCode = ErrorCode(1607);
const ER_CANT_CREATE_TABLE: ErrorCode = ErrorCode(1005);
const ER_CANT_CREATE_THREAD: ErrorCode = ErrorCode(1135);
const ER_CANT_CREATE_USER_WITH_GRANT: ErrorCode = ErrorCode(1410);
const ER_CANT_DELETE_FILE: ErrorCode = ErrorCode(1011);
const ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET: ErrorCode = ErrorCode(1778);
const ER_CANT_DO_ONLINE: ErrorCode = ErrorCode(1915);
const ER_CANT_DO_THIS_DURING_AN_TRANSACTION: ErrorCode = ErrorCode(1179);
const ER_CANT_DROP_FIELD_OR_KEY: ErrorCode = ErrorCode(1091);
const ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: ErrorCode = ErrorCode(1792);
const ER_CANT_FIND_DL_ENTRY: ErrorCode = ErrorCode(1127);
const ER_CANT_FIND_SYSTEM_REC: ErrorCode = ErrorCode(1012);
const ER_CANT_FIND_UDF: ErrorCode = ErrorCode(1122);
const ER_CANT_GET_STAT: ErrorCode = ErrorCode(1013);
const ER_CANT_GET_WD: ErrorCode = ErrorCode(1014);
const ER_CANT_INITIALIZE_UDF: ErrorCode = ErrorCode(1123);
const ER_CANT_LOCK: ErrorCode = ErrorCode(1015);
const ER_CANT_LOCK_LOG_TABLE: ErrorCode = ErrorCode(1556);
const ER_CANT_OPEN_FILE: ErrorCode = ErrorCode(1016);
const ER_CANT_OPEN_LIBRARY: ErrorCode = ErrorCode(1126);
const ER_CANT_READ_DIR: ErrorCode = ErrorCode(1018);
const ER_CANT_REMOVE_ALL_FIELDS: ErrorCode = ErrorCode(1090);
const ER_CANT_RENAME_LOG_TABLE: ErrorCode = ErrorCode(1581);
const ER_CANT_REOPEN_TABLE: ErrorCode = ErrorCode(1137);
const ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1783);
const ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON: ErrorCode = ErrorCode(1782);
const ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1781);
const ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID: ErrorCode = ErrorCode(1790);
const ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY: ErrorCode = ErrorCode(1840);
const ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1839);
const ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY: ErrorCode = ErrorCode(1841);
const ER_CANT_SET_WD: ErrorCode = ErrorCode(1019);
const ER_CANT_START_STOP_SLAVE: ErrorCode = ErrorCode(1936);
const ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT: ErrorCode = ErrorCode(1746);
const ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG: ErrorCode = ErrorCode(1442);
const ER_CANT_UPDATE_WITH_READLOCK: ErrorCode = ErrorCode(1223);
const ER_CANT_USE_OPTION_HERE: ErrorCode = ErrorCode(1234);
const ER_CANT_WRITE_LOCK_LOG_TABLE: ErrorCode = ErrorCode(1555);
const ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE: ErrorCode = ErrorCode(1750);
const ER_CHANGE_SLAVE_PARALLEL_THREADS_ACTIVE: ErrorCode = ErrorCode(1963);
const ER_CHECKREAD: ErrorCode = ErrorCode(1020);
const ER_CHECK_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1178);
const ER_CHECK_NO_SUCH_TABLE: ErrorCode = ErrorCode(1177);
const ER_COALESCE_ONLY_ON_HASH_PARTITION: ErrorCode = ErrorCode(1509);
const ER_COALESCE_PARTITION_NO_PARTITION: ErrorCode = ErrorCode(1515);
const ER_COLLATION_CHARSET_MISMATCH: ErrorCode = ErrorCode(1253);
const ER_COLUMNACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1143);
const ER_COL_COUNT_DOESNT_MATCH_CORRUPTED: ErrorCode = ErrorCode(1547);
const ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2: ErrorCode = ErrorCode(1805);
const ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE: ErrorCode = ErrorCode(1558);
const ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG: ErrorCode = ErrorCode(1422);
const ER_COND_ITEM_TOO_LONG: ErrorCode = ErrorCode(1648);
const ER_CONFLICTING_DECLARATIONS: ErrorCode = ErrorCode(1302);
const ER_CONFLICT_FN_PARSE_ERROR: ErrorCode = ErrorCode(1626);
const ER_CONNECTION_ALREADY_EXISTS: ErrorCode = ErrorCode(1934);
const ER_CONNECTION_KILLED: ErrorCode = ErrorCode(1927);
const ER_CONNECT_TO_FOREIGN_DATA_SOURCE: ErrorCode = ErrorCode(1429);
const ER_CONNECT_TO_MASTER: ErrorCode = ErrorCode(1218);
const ER_CONSECUTIVE_REORG_PARTITIONS: ErrorCode = ErrorCode(1519);
const ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR: ErrorCode = ErrorCode(1486);
const ER_CONST_EXPR_IN_VCOL: ErrorCode = ErrorCode(1908);
const ER_CON_COUNT_ERROR: ErrorCode = ErrorCode(1040);
const ER_CORRUPT_HELP_DB: ErrorCode = ErrorCode(1244);
const ER_CRASHED_ON_REPAIR: ErrorCode = ErrorCode(1195);
const ER_CRASHED_ON_USAGE: ErrorCode = ErrorCode(1194);
const ER_CREATE_DB_WITH_READ_LOCK: ErrorCode = ErrorCode(1209);
const ER_CREATE_FILEGROUP_FAILED: ErrorCode = ErrorCode(1528);
const ER_CUT_VALUE_GROUP_CONCAT: ErrorCode = ErrorCode(1260);
const ER_CYCLIC_REFERENCE: ErrorCode = ErrorCode(1245);
const ER_DATABASE_NAME: ErrorCode = ErrorCode(1631);
const ER_DATA_CONVERSION_ERROR_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1902);
const ER_DATA_OUT_OF_RANGE: ErrorCode = ErrorCode(1690);
const ER_DATA_OVERFLOW: ErrorCode = ErrorCode(1916);
const ER_DATA_TOO_LONG: ErrorCode = ErrorCode(1406);
const ER_DATA_TRUNCATED: ErrorCode = ErrorCode(1917);
const ER_DATETIME_FUNCTION_OVERFLOW: ErrorCode = ErrorCode(1441);
const ER_DA_INVALID_CONDITION_NUMBER: ErrorCode = ErrorCode(1758);
const ER_DBACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1044);
const ER_DB_CREATE_EXISTS: ErrorCode = ErrorCode(1007);
const ER_DB_DROP_DELETE: ErrorCode = ErrorCode(1009);
const ER_DB_DROP_EXISTS: ErrorCode = ErrorCode(1008);
const ER_DB_DROP_RMDIR: ErrorCode = ErrorCode(1010);
const ER_DDL_LOG_ERROR: ErrorCode = ErrorCode(1565);
const ER_DEBUG_SYNC_HIT_LIMIT: ErrorCode = ErrorCode(1640);
const ER_DEBUG_SYNC_TIMEOUT: ErrorCode = ErrorCode(1639);
const ER_DELAYED_CANT_CHANGE_LOCK: ErrorCode = ErrorCode(1150);
const ER_DELAYED_INSERT_TABLE_LOCKED: ErrorCode = ErrorCode(1165);
const ER_DELAYED_NOT_SUPPORTED: ErrorCode = ErrorCode(1616);
const ER_DERIVED_MUST_HAVE_ALIAS: ErrorCode = ErrorCode(1248);
const ER_DIFF_GROUPS_PROC: ErrorCode = ErrorCode(1384);
const ER_DISCARD_FK_CHECKS_RUNNING: ErrorCode = ErrorCode(1807);
const ER_DISK_FULL: ErrorCode = ErrorCode(1021);
const ER_DIVISION_BY_ZER: ErrorCode = ErrorCode(1365);
const ER_DROP_DB_WITH_READ_LOCK: ErrorCode = ErrorCode(1208);
const ER_DROP_FILEGROUP_FAILED: ErrorCode = ErrorCode(1529);
const ER_DROP_INDEX_FK: ErrorCode = ErrorCode(1553);
const ER_DROP_LAST_PARTITION: ErrorCode = ErrorCode(1508);
const ER_DROP_PARTITION_NON_EXISTENT: ErrorCode = ErrorCode(1507);
const ER_DROP_USER: ErrorCode = ErrorCode(1268);
const ER_DUMP_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1185);
const ER_DUPLICATED_VALUE_IN_TYPE: ErrorCode = ErrorCode(1291);
const ER_DUPLICATE_GTID_DOMAIN: ErrorCode = ErrorCode(1943);
const ER_DUP_ARGUMENT: ErrorCode = ErrorCode(1225);
const ER_DUP_ENTRY: ErrorCode = ErrorCode(1062);
const ER_DUP_ENTRY_AUTOINCREMENT_CASE: ErrorCode = ErrorCode(1569);
const ER_DUP_ENTRY_WITH_KEY_NAME: ErrorCode = ErrorCode(1586);
const ER_DUP_FIELDNAME: ErrorCode = ErrorCode(1060);
const ER_DUP_INDEX: ErrorCode = ErrorCode(1831);
const ER_DUP_KEY: ErrorCode = ErrorCode(1022);
const ER_DUP_KEYNAME: ErrorCode = ErrorCode(1061);
const ER_DUP_SIGNAL_SET: ErrorCode = ErrorCode(1641);
const ER_DUP_UNIQUE: ErrorCode = ErrorCode(1169);
const ER_DUP_UNKNOWN_IN_INDEX: ErrorCode = ErrorCode(1859);
const ER_DYN_COL_DATA: ErrorCode = ErrorCode(1921);
const ER_DYN_COL_IMPLEMENTATION_LIMIT: ErrorCode = ErrorCode(1920);
const ER_DYN_COL_WRONG_CHARSET: ErrorCode = ErrorCode(1922);
const ER_DYN_COL_WRONG_FORMAT: ErrorCode = ErrorCode(1919);
const ER_EMPTY_QUERY: ErrorCode = ErrorCode(1065);
const ER_ERROR_DURING_CHECKPOINT: ErrorCode = ErrorCode(1183);
const ER_ERROR_DURING_COMMIT: ErrorCode = ErrorCode(1180);
const ER_ERROR_DURING_FLUSH_LOGS: ErrorCode = ErrorCode(1182);
const ER_ERROR_DURING_ROLLBACK: ErrorCode = ErrorCode(1181);
const ER_ERROR_IN_TRIGGER_BODY: ErrorCode = ErrorCode(1710);
const ER_ERROR_IN_UNKNOWN_TRIGGER_BODY: ErrorCode = ErrorCode(1711);
const ER_ERROR_ON_CLOSE: ErrorCode = ErrorCode(1023);
const ER_ERROR_ON_READ: ErrorCode = ErrorCode(1024);
const ER_ERROR_ON_RENAME: ErrorCode = ErrorCode(1025);
const ER_ERROR_ON_WRITE: ErrorCode = ErrorCode(1026);
const ER_ERROR_WHEN_EXECUTING_COMMAND: ErrorCode = ErrorCode(1220);
const ER_EVENTS_DB_ERROR: ErrorCode = ErrorCode(1577);
const ER_EVENT_ALREADY_EXISTS: ErrorCode = ErrorCode(1537);
const ER_EVENT_CANNOT_ALTER_IN_THE_PAST: ErrorCode = ErrorCode(1589);
const ER_EVENT_CANNOT_CREATE_IN_THE_PAST: ErrorCode = ErrorCode(1588);
const ER_EVENT_CANNOT_DELETE: ErrorCode = ErrorCode(1549);
const ER_EVENT_CANT_ALTER: ErrorCode = ErrorCode(1540);
const ER_EVENT_COMPILE_ERROR: ErrorCode = ErrorCode(1550);
const ER_EVENT_DATA_TOO_LONG: ErrorCode = ErrorCode(1552);
const ER_EVENT_DOES_NOT_EXIST: ErrorCode = ErrorCode(1539);
const ER_EVENT_DROP_FAILED: ErrorCode = ErrorCode(1541);
const ER_EVENT_ENDS_BEFORE_STARTS: ErrorCode = ErrorCode(1543);
const ER_EVENT_EXEC_TIME_IN_THE_PAST: ErrorCode = ErrorCode(1544);
const ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG: ErrorCode = ErrorCode(1542);
const ER_EVENT_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1605);
const ER_EVENT_MODIFY_QUEUE_ERROR: ErrorCode = ErrorCode(1570);
const ER_EVENT_NEITHER_M_EXPR_NOR_M_AT: ErrorCode = ErrorCode(1546);
const ER_EVENT_OPEN_TABLE_FAILED: ErrorCode = ErrorCode(1545);
const ER_EVENT_RECURSION_FORBIDDEN: ErrorCode = ErrorCode(1576);
const ER_EVENT_SAME_NAME: ErrorCode = ErrorCode(1551);
const ER_EVENT_SET_VAR_ERROR: ErrorCode = ErrorCode(1571);
const ER_EVENT_STORE_FAILED: ErrorCode = ErrorCode(1538);
const ER_EXCEPTIONS_WRITE_ERROR: ErrorCode = ErrorCode(1627);
const ER_EXEC_STMT_WITH_OPEN_CURSOR: ErrorCode = ErrorCode(1420);
const ER_FAILED_GTID_STATE_INIT: ErrorCode = ErrorCode(1940);
const ER_FAILED_READ_FROM_PAR_FILE: ErrorCode = ErrorCode(1696);
const ER_FAILED_ROUTINE_BREAK_BINLOG: ErrorCode = ErrorCode(1417);
const ER_FEATURE_DISABLED: ErrorCode = ErrorCode(1289);
const ER_FIELD_NOT_FOUND_PART_ERROR: ErrorCode = ErrorCode(1488);
const ER_FIELD_SPECIFIED_TWICE: ErrorCode = ErrorCode(1110);
const ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD: ErrorCode = ErrorCode(1659);
const ER_FILEGROUP_OPTION_ONLY_ONCE: ErrorCode = ErrorCode(1527);
const ER_FILE_EXISTS_ERROR: ErrorCode = ErrorCode(1086);
const ER_FILE_NOT_FOUND: ErrorCode = ErrorCode(1017);
const ER_FILE_USED: ErrorCode = ErrorCode(1027);
const ER_FILSORT_ABORT: ErrorCode = ErrorCode(1028);
const ER_FK_CANNOT_DELETE_PARENT: ErrorCode = ErrorCode(1834);
const ER_FK_CANNOT_OPEN_PARENT: ErrorCode = ErrorCode(1824);
const ER_FK_COLUMN_CANNOT_CHANGE: ErrorCode = ErrorCode(1832);
const ER_FK_COLUMN_CANNOT_CHANGE_CHILD: ErrorCode = ErrorCode(1833);
const ER_FK_COLUMN_CANNOT_DROP: ErrorCode = ErrorCode(1828);
const ER_FK_COLUMN_CANNOT_DROP_CHILD: ErrorCode = ErrorCode(1829);
const ER_FK_COLUMN_NOT_NULL: ErrorCode = ErrorCode(1830);
const ER_FK_DUP_NAME: ErrorCode = ErrorCode(1826);
const ER_FK_FAIL_ADD_SYSTEM: ErrorCode = ErrorCode(1823);
const ER_FK_INCORRECT_OPTION: ErrorCode = ErrorCode(1825);
const ER_FK_NO_INDEX_CHILD: ErrorCode = ErrorCode(1821);
const ER_FK_NO_INDEX_PARENT: ErrorCode = ErrorCode(1822);
const ER_FLUSH_MASTER_BINLOG_CLOSED: ErrorCode = ErrorCode(1186);
const ER_FORBID_SCHEMA_CHANGE: ErrorCode = ErrorCode(1450);
const ER_FORCING_CLOSE: ErrorCode = ErrorCode(1080);
const ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST: ErrorCode = ErrorCode(1431);
const ER_FOREIGN_DATA_STRING_INVALID: ErrorCode = ErrorCode(1433);
const ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE: ErrorCode = ErrorCode(1432);
const ER_FOREIGN_DUPLICATE_KEY: ErrorCode = ErrorCode(1557);
const ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: ErrorCode = ErrorCode(1762);
const ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: ErrorCode = ErrorCode(1761);
const ER_FOREIGN_KEY_ON_PARTITIONED: ErrorCode = ErrorCode(1506);
const ER_FOREIGN_SERVER_DOESNT_EXIST: ErrorCode = ErrorCode(1477);
const ER_FOREIGN_SERVER_EXISTS: ErrorCode = ErrorCode(1476);
const ER_FORM_NOT_FOUND: ErrorCode = ErrorCode(1029);
const ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF: ErrorCode = ErrorCode(1784);
const ER_FPARSER_BAD_HEADER: ErrorCode = ErrorCode(1341);
const ER_FPARSER_EOF_IN_COMMENT: ErrorCode = ErrorCode(1342);
const ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER: ErrorCode = ErrorCode(1344);
const ER_FPARSER_ERROR_IN_PARAMETER: ErrorCode = ErrorCode(1343);
const ER_FPARSER_TOO_BIG_FILE: ErrorCode = ErrorCode(1340);
const ER_FRM_UNKNOWN_TYPE: ErrorCode = ErrorCode(1346);
const ER_FSEEK_FAIL: ErrorCode = ErrorCode(1376);
const ER_FT_MATCHING_KEY_NOT_FOUND: ErrorCode = ErrorCode(1191);
const ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING: ErrorCode = ErrorCode(1757);
const ER_FUNCTION_NOT_DEFINED: ErrorCode = ErrorCode(1128);
const ER_FUNC_INEXISTENT_NAME_COLLISION: ErrorCode = ErrorCode(1630);
const ER_GET_ERRMSG: ErrorCode = ErrorCode(1296);
const ER_GET_ERRN: ErrorCode = ErrorCode(1030);
const ER_GET_TEMPORARY_ERRMSG: ErrorCode = ErrorCode(1297);
const ER_GLOBAL_VARIABLE: ErrorCode = ErrorCode(1229);
const ER_GNO_EXHAUSTED: ErrorCode = ErrorCode(1775);
const ER_GOT_SIGNAL: ErrorCode = ErrorCode(1078);
const ER_GRANT_PLUGIN_USER_EXISTS: ErrorCode = ErrorCode(1700);
const ER_GRANT_WRONG_HOST_OR_USER: ErrorCode = ErrorCode(1145);
const ER_GTID_EXECUTED_WAS_CHANGED: ErrorCode = ErrorCode(1843);
const ER_GTID_MODE2_OR3_REQUIRES_DISABLE_GTID_UNSAFE_STATEMENTS_ON: ErrorCode = ErrorCode(1779);
const ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME: ErrorCode = ErrorCode(1788);
const ER_GTID_MODE_REQUIRES_BINLOG: ErrorCode = ErrorCode(1780);
const ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL: ErrorCode = ErrorCode(1770);
const ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST: ErrorCode = ErrorCode(1767);
const ER_GTID_NEXT_TYPE_UNDEFINED_GROUP: ErrorCode = ErrorCode(1837);
const ER_GTID_OPEN_TABLE_FAILED: ErrorCode = ErrorCode(1944);
const ER_GTID_POSITION_NOT_FOUND_IN_BINLOG: ErrorCode = ErrorCode(1945);
const ER_GTID_POSITION_NOT_FOUND_IN_BINLOG2: ErrorCode = ErrorCode(1955);
const ER_GTID_PURGED_WAS_CHANGED: ErrorCode = ErrorCode(1842);
const ER_GTID_START_FROM_BINLOG_HOLE: ErrorCode = ErrorCode(1951);
const ER_GTID_STRICT_OUT_OF_ORDER: ErrorCode = ErrorCode(1950);
const ER_GTID_UNSAFE_BINLOG_SPLITTABLE_STATEMENT_AND_GTID_GROUP: ErrorCode = ErrorCode(1884);
const ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION: ErrorCode = ErrorCode(1787);
const ER_GTID_UNSAFE_CREATE_SELECT: ErrorCode = ErrorCode(1786);
const ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE: ErrorCode = ErrorCode(1785);
const ER_HANDSHAKE_ERROR: ErrorCode = ErrorCode(1043);
const ER_HASHCHK: ErrorCode = ErrorCode(1000);
const ER_HOSTNAME: ErrorCode = ErrorCode(1469);
const ER_HOST_IS_BLOCKED: ErrorCode = ErrorCode(1129);
const ER_HOST_NOT_PRIVILEGED: ErrorCode = ErrorCode(1130);
const ER_IDENT_CAUSES_TOO_LONG_PATH: ErrorCode = ErrorCode(1860);
const ER_ILLEGAL_GRANT_FOR_TABLE: ErrorCode = ErrorCode(1144);
const ER_ILLEGAL_HA: ErrorCode = ErrorCode(1031);
const ER_ILLEGAL_HA_CREATE_OPTION: ErrorCode = ErrorCode(1478);
const ER_ILLEGAL_REFERENCE: ErrorCode = ErrorCode(1247);
const ER_ILLEGAL_SUBQUERY_OPTIMIZER_SWITCHES: ErrorCode = ErrorCode(1923);
const ER_ILLEGAL_VALUE_FOR_TYPE: ErrorCode = ErrorCode(1367);
const ER_INCONSISTENT_PARTITION_INFO_ERROR: ErrorCode = ErrorCode(1490);
const ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR: ErrorCode = ErrorCode(1494);
const ER_INCORRECT_GLOBAL_LOCAL_VAR: ErrorCode = ErrorCode(1238);
const ER_INCORRECT_GTID_STATE: ErrorCode = ErrorCode(1941);
const ER_INDEX_COLUMN_TOO_LONG: ErrorCode = ErrorCode(1709);
const ER_INDEX_CORRUPT: ErrorCode = ErrorCode(1712);
const ER_INDEX_REBUILD: ErrorCode = ErrorCode(1187);
const ER_INNODB_FORCED_RECOVERY: ErrorCode = ErrorCode(1881);
const ER_INNODB_FT_AUX_NOT_HEX_ID: ErrorCode = ErrorCode(1879);
const ER_INNODB_FT_LIMIT: ErrorCode = ErrorCode(1795);
const ER_INNODB_FT_WRONG_DOCID_COLUMN: ErrorCode = ErrorCode(1797);
const ER_INNODB_FT_WRONG_DOCID_INDEX: ErrorCode = ErrorCode(1798);
const ER_INNODB_IMPORT_ERROR: ErrorCode = ErrorCode(1816);
const ER_INNODB_INDEX_CORRUPT: ErrorCode = ErrorCode(1817);
const ER_INNODB_NO_FT_TEMP_TABLE: ErrorCode = ErrorCode(1796);
const ER_INNODB_NO_FT_USES_PARSER: ErrorCode = ErrorCode(1865);
const ER_INNODB_ONLINE_LOG_TOO_BIG: ErrorCode = ErrorCode(1799);
const ER_INNODB_READ_ONLY: ErrorCode = ErrorCode(1874);
const ER_INSECURE_CHANGE_MASTER: ErrorCode = ErrorCode(1760);
const ER_INSECURE_PLAIN_TEXT: ErrorCode = ErrorCode(1759);
const ER_INSERT_INF: ErrorCode = ErrorCode(1092);
const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT: ErrorCode = ErrorCode(1685);
const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1679);
const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO: ErrorCode = ErrorCode(1953);
const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION: ErrorCode = ErrorCode(1929);
const ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN: ErrorCode = ErrorCode(1694);
const ER_INTERNAL_ERROR: ErrorCode = ErrorCode(1815);
const ER_INVALID_CHARACTER_STRING: ErrorCode = ErrorCode(1300);
const ER_INVALID_CURRENT_USER: ErrorCode = ErrorCode(1960);
const ER_INVALID_DEFAULT: ErrorCode = ErrorCode(1067);
const ER_INVALID_DEFAULT_VALUE_FOR_FIELD: ErrorCode = ErrorCode(1978);
const ER_INVALID_GROUP_FUNC_USE: ErrorCode = ErrorCode(1111);
const ER_INVALID_ON_UPDATE: ErrorCode = ErrorCode(1294);
const ER_INVALID_ROLE: ErrorCode = ErrorCode(1959);
const ER_INVALID_USE_OF_NULL: ErrorCode = ErrorCode(1138);
const ER_INVALID_YEAR_COLUMN_LENGTH: ErrorCode = ErrorCode(1818);
const ER_IO_ERR_LOG_INDEX_READ: ErrorCode = ErrorCode(1374);
const ER_IO_READ_ERROR: ErrorCode = ErrorCode(1810);
const ER_IO_WRITE_ERROR: ErrorCode = ErrorCode(1811);
const ER_IPSOCK_ERROR: ErrorCode = ErrorCode(1081);
const ER_IT_IS_A_VIEW: ErrorCode = ErrorCode(1965);
const ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1904);
const ER_KEY_COLUMN_DOES_NOT_EXITS: ErrorCode = ErrorCode(1072);
const ER_KEY_DOES_NOT_EXITS: ErrorCode = ErrorCode(1176);
const ER_KEY_NOT_FOUND: ErrorCode = ErrorCode(1032);
const ER_KEY_PART0: ErrorCode = ErrorCode(1391);
const ER_KEY_REF_DO_NOT_MATCH_TABLE_REF: ErrorCode = ErrorCode(1240);
const ER_KILL_DENIED_ERROR: ErrorCode = ErrorCode(1095);
const ER_KILL_QUERY_DENIED_ERROR: ErrorCode = ErrorCode(1979);
const ER_LIMITED_PART_RANGE: ErrorCode = ErrorCode(1523);
const ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR: ErrorCode = ErrorCode(1489);
const ER_LOAD_DATA_INVALID_COLUMN: ErrorCode = ErrorCode(1611);
const ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR: ErrorCode = ErrorCode(1409);
const ER_LOAD_INF: ErrorCode = ErrorCode(1087);
const ER_LOCAL_VARIABLE: ErrorCode = ErrorCode(1228);
const ER_LOCK_ABORTED: ErrorCode = ErrorCode(1689);
const ER_LOCK_DEADLOCK: ErrorCode = ErrorCode(1213);
const ER_LOCK_OR_ACTIVE_TRANSACTION: ErrorCode = ErrorCode(1192);
const ER_LOCK_TABLE_FULL: ErrorCode = ErrorCode(1206);
const ER_LOCK_WAIT_TIMEOUT: ErrorCode = ErrorCode(1205);
const ER_LOGGING_PROHIBIT_CHANGING_OF: ErrorCode = ErrorCode(1387);
const ER_LOG_IN_USE: ErrorCode = ErrorCode(1378);
const ER_LOG_PURGE_NO_FILE: ErrorCode = ErrorCode(1612);
const ER_LOG_PURGE_UNKNOWN_ERR: ErrorCode = ErrorCode(1379);
const ER_MALFORMED_DEFINER: ErrorCode = ErrorCode(1446);
const ER_MALFORMED_GTID_SET_ENCODING: ErrorCode = ErrorCode(1773);
const ER_MALFORMED_GTID_SET_SPECIFICATION: ErrorCode = ErrorCode(1772);
const ER_MALFORMED_GTID_SPECIFICATION: ErrorCode = ErrorCode(1774);
const ER_MALFORMED_PACKET: ErrorCode = ErrorCode(1835);
const ER_MASTER: ErrorCode = ErrorCode(1188);
const ER_MASTER_DELAY_VALUE_OUT_OF_RANGE: ErrorCode = ErrorCode(1729);
const ER_MASTER_FATAL_ERROR_READING_BINLOG: ErrorCode = ErrorCode(1236);
const ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG: ErrorCode = ErrorCode(1947);
const ER_MASTER_GTID_POS_MISSING_DOMAIN: ErrorCode = ErrorCode(1948);
const ER_MASTER_HAS_PURGED_REQUIRED_GTIDS: ErrorCode = ErrorCode(1789);
const ER_MASTER_INF: ErrorCode = ErrorCode(1201);
const ER_MASTER_LOG_PREFIX: ErrorCode = ErrorCode(1935);
const ER_MASTER_NET_READ: ErrorCode = ErrorCode(1189);
const ER_MASTER_NET_WRITE: ErrorCode = ErrorCode(1190);
const ER_MAXVALUE_IN_VALUES_IN: ErrorCode = ErrorCode(1656);
const ER_MAX_PREPARED_STMT_COUNT_REACHED: ErrorCode = ErrorCode(1461);
const ER_MESSAGE_AND_STATEMENT: ErrorCode = ErrorCode(1676);
const ER_MISSING_SKIP_SLAVE: ErrorCode = ErrorCode(1278);
const ER_MIXING_NOT_ALLOWED: ErrorCode = ErrorCode(1224);
const ER_MIX_HANDLER_ERROR: ErrorCode = ErrorCode(1497);
const ER_MIX_OF_GROUP_FUNC_AND_FIELDS: ErrorCode = ErrorCode(1140);
const ER_MTS_CANT_PARALLEL: ErrorCode = ErrorCode(1755);
const ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS: ErrorCode = ErrorCode(1802);
const ER_MTS_EVENT_BIGGER_PENDING_JOBS_SIZE_MAX: ErrorCode = ErrorCode(1864);
const ER_MTS_FEATURE_IS_NOT_SUPPORTED: ErrorCode = ErrorCode(1753);
const ER_MTS_INCONSISTENT_DATA: ErrorCode = ErrorCode(1756);
const ER_MTS_RECOVERY_FAILURE: ErrorCode = ErrorCode(1803);
const ER_MTS_RESET_WORKERS: ErrorCode = ErrorCode(1804);
const ER_MTS_UPDATED_DBS_GREATER_MAX: ErrorCode = ErrorCode(1754);
const ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR: ErrorCode = ErrorCode(1495);
const ER_MULTIPLE_PRI_KEY: ErrorCode = ErrorCode(1068);
const ER_MULTI_UPDATE_KEY_CONFLICT: ErrorCode = ErrorCode(1706);
const ER_MUST_CHANGE_PASSWORD: ErrorCode = ErrorCode(1820);
const ER_MUST_CHANGE_PASSWORD_LOGIN: ErrorCode = ErrorCode(1862);
const ER_M_BIGGER_THAN_D: ErrorCode = ErrorCode(1427);
const ER_NAME_BECOMES_EMPTY: ErrorCode = ErrorCode(1474);
const ER_NATIVE_FCT_NAME_COLLISION: ErrorCode = ErrorCode(1585);
const ER_NDB_CANT_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1561);
const ER_NDB_REPLICATION_SCHEMA_ERROR: ErrorCode = ErrorCode(1625);
const ER_NEED_REPREPARE: ErrorCode = ErrorCode(1615);
const ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE: ErrorCode = ErrorCode(1743);
const ER_NET_ERROR_ON_WRITE: ErrorCode = ErrorCode(1160);
const ER_NET_FCNTL_ERROR: ErrorCode = ErrorCode(1155);
const ER_NET_PACKETS_OUT_OF_ORDER: ErrorCode = ErrorCode(1156);
const ER_NET_PACKET_TOO_LARGE: ErrorCode = ErrorCode(1153);
const ER_NET_READ_ERROR: ErrorCode = ErrorCode(1158);
const ER_NET_READ_ERROR_FROM_PIPE: ErrorCode = ErrorCode(1154);
const ER_NET_READ_INTERRUPTED: ErrorCode = ErrorCode(1159);
const ER_NET_UNCOMPRESS_ERROR: ErrorCode = ErrorCode(1157);
const ER_NET_WRITE_INTERRUPTED: ErrorCode = ErrorCode(1161);
const ER_NEW_ABORTING_CONNECTION: ErrorCode = ErrorCode(1184);
const ER_NISAMCHK: ErrorCode = ErrorCode(1001);
const ER_NO: ErrorCode = ErrorCode(1002);
const ER_NONEXISTING_GRANT: ErrorCode = ErrorCode(1141);
const ER_NONEXISTING_PROC_GRANT: ErrorCode = ErrorCode(1403);
const ER_NONEXISTING_TABLE_GRANT: ErrorCode = ErrorCode(1147);
const ER_NONUNIQ_TABLE: ErrorCode = ErrorCode(1066);
const ER_NONUPDATEABLE_COLUMN: ErrorCode = ErrorCode(1348);
const ER_NON_GROUPING_FIELD_USED: ErrorCode = ErrorCode(1463);
const ER_NON_INSERTABLE_TABLE: ErrorCode = ErrorCode(1471);
const ER_NON_UNIQ_ERROR: ErrorCode = ErrorCode(1052);
const ER_NON_UPDATABLE_TABLE: ErrorCode = ErrorCode(1288);
const ER_NORMAL_SHUTDOWN: ErrorCode = ErrorCode(1077);
const ER_NOT_ALLOWED_COMMAND: ErrorCode = ErrorCode(1148);
const ER_NOT_FORM_FILE: ErrorCode = ErrorCode(1033);
const ER_NOT_KEYFILE: ErrorCode = ErrorCode(1034);
const ER_NOT_SUPPORTED_AUTH_MODE: ErrorCode = ErrorCode(1251);
const ER_NOT_SUPPORTED_YET: ErrorCode = ErrorCode(1235);
const ER_NOT_VALID_PASSWORD: ErrorCode = ErrorCode(1819);
const ER_NO_BINARY_LOGGING: ErrorCode = ErrorCode(1381);
const ER_NO_BINLOG_ERROR: ErrorCode = ErrorCode(1518);
const ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR: ErrorCode = ErrorCode(1487);
const ER_NO_DB_ERROR: ErrorCode = ErrorCode(1046);
const ER_NO_DEFAULT: ErrorCode = ErrorCode(1230);
const ER_NO_DEFAULT_FOR_FIELD: ErrorCode = ErrorCode(1364);
const ER_NO_DEFAULT_FOR_VIEW_FIELD: ErrorCode = ErrorCode(1423);
const ER_NO_EIS_FOR_FIELD: ErrorCode = ErrorCode(1980);
const ER_NO_FILE_MAPPING: ErrorCode = ErrorCode(1388);
const ER_NO_FORMAT_DESCRIPTION_EVENT: ErrorCode = ErrorCode(1609);
const ER_NO_GROUP_FOR_PROC: ErrorCode = ErrorCode(1385);
const ER_NO_PARTITION_FOR_GIVEN_VALUE: ErrorCode = ErrorCode(1526);
const ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT: ErrorCode = ErrorCode(1591);
const ER_NO_PARTS_ERROR: ErrorCode = ErrorCode(1504);
const ER_NO_PERMISSION_TO_CREATE_USER: ErrorCode = ErrorCode(1211);
const ER_NO_RAID_COMPILED: ErrorCode = ErrorCode(1174);
const ER_NO_REFERENCED_ROW: ErrorCode = ErrorCode(1216);
const ER_NO_REFERENCED_ROW2: ErrorCode = ErrorCode(1452);
const ER_NO_SUCH_INDEX: ErrorCode = ErrorCode(1082);
const ER_NO_SUCH_KEY_VALUE: ErrorCode = ErrorCode(1741);
const ER_NO_SUCH_PARTITION_UNUSED: ErrorCode = ErrorCode(1749);
const ER_NO_SUCH_QUERY: ErrorCode = ErrorCode(1957);
const ER_NO_SUCH_TABLE: ErrorCode = ErrorCode(1146);
const ER_NO_SUCH_TABLE_IN_ENGINE: ErrorCode = ErrorCode(1932);
const ER_NO_SUCH_THREAD: ErrorCode = ErrorCode(1094);
const ER_NO_SUCH_USER: ErrorCode = ErrorCode(1449);
const ER_NO_TABLES_USED: ErrorCode = ErrorCode(1096);
const ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA: ErrorCode = ErrorCode(1465);
const ER_NO_UNIQUE_LOGFILE: ErrorCode = ErrorCode(1098);
const ER_NULL_COLUMN_IN_INDEX: ErrorCode = ErrorCode(1121);
const ER_NULL_IN_VALUES_LESS_THAN: ErrorCode = ErrorCode(1566);
const ER_OLD_FILE_FORMAT: ErrorCode = ErrorCode(1455);
const ER_OLD_KEYFILE: ErrorCode = ErrorCode(1035);
const ER_OLD_TEMPORALS_UPGRADED: ErrorCode = ErrorCode(1880);
const ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT: ErrorCode = ErrorCode(1730);
const ER_ONLY_INTEGERS_ALLOWED: ErrorCode = ErrorCode(1578);
const ER_ONLY_ON_RANGE_LIST_PARTITION: ErrorCode = ErrorCode(1512);
const ER_OPEN_AS_READONLY: ErrorCode = ErrorCode(1036);
const ER_OPERAND_COLUMNS: ErrorCode = ErrorCode(1241);
const ER_OPTION_PREVENTS_STATEMENT: ErrorCode = ErrorCode(1290);
const ER_ORDER_WITH_PROC: ErrorCode = ErrorCode(1386);
const ER_OUTOFMEMORY: ErrorCode = ErrorCode(1037);
const ER_OUT_OF_RESOURCES: ErrorCode = ErrorCode(1041);
const ER_OUT_OF_SORTMEMORY: ErrorCode = ErrorCode(1038);
const ER_PARSE_ERROR: ErrorCode = ErrorCode(1064);
const ER_PARTITIONS_MUST_BE_DEFINED_ERROR: ErrorCode = ErrorCode(1492);
const ER_PARTITION_CLAUSE_ON_NONPARTITIONED: ErrorCode = ErrorCode(1747);
const ER_PARTITION_COLUMN_LIST_ERROR: ErrorCode = ErrorCode(1653);
const ER_PARTITION_CONST_DOMAIN_ERROR: ErrorCode = ErrorCode(1563);
const ER_PARTITION_ENTRY_ERROR: ErrorCode = ErrorCode(1496);
const ER_PARTITION_EXCHANGE_DIFFERENT_OPTION: ErrorCode = ErrorCode(1731);
const ER_PARTITION_EXCHANGE_FOREIGN_KEY: ErrorCode = ErrorCode(1740);
const ER_PARTITION_EXCHANGE_PART_TABLE: ErrorCode = ErrorCode(1732);
const ER_PARTITION_EXCHANGE_TEMP_TABLE: ErrorCode = ErrorCode(1733);
const ER_PARTITION_FIELDS_TOO_LONG: ErrorCode = ErrorCode(1660);
const ER_PARTITION_FUNCTION_FAILURE: ErrorCode = ErrorCode(1521);
const ER_PARTITION_FUNCTION_IS_NOT_ALLOWED: ErrorCode = ErrorCode(1564);
const ER_PARTITION_FUNC_NOT_ALLOWED_ERROR: ErrorCode = ErrorCode(1491);
const ER_PARTITION_INSTEAD_OF_SUBPARTITION: ErrorCode = ErrorCode(1734);
const ER_PARTITION_MAXVALUE_ERROR: ErrorCode = ErrorCode(1481);
const ER_PARTITION_MERGE_ERROR: ErrorCode = ErrorCode(1572);
const ER_PARTITION_MGMT_ON_NONPARTITIONED: ErrorCode = ErrorCode(1505);
const ER_PARTITION_NAME: ErrorCode = ErrorCode(1633);
const ER_PARTITION_NOT_DEFINED_ERROR: ErrorCode = ErrorCode(1498);
const ER_PARTITION_NO_TEMPORARY: ErrorCode = ErrorCode(1562);
const ER_PARTITION_REQUIRES_VALUES_ERROR: ErrorCode = ErrorCode(1479);
const ER_PARTITION_SUBPARTITION_ERROR: ErrorCode = ErrorCode(1482);
const ER_PARTITION_SUBPART_MIX_ERROR: ErrorCode = ErrorCode(1483);
const ER_PARTITION_WRONG_NO_PART_ERROR: ErrorCode = ErrorCode(1484);
const ER_PARTITION_WRONG_NO_SUBPART_ERROR: ErrorCode = ErrorCode(1485);
const ER_PARTITION_WRONG_VALUES_ERROR: ErrorCode = ErrorCode(1480);
const ER_PART_STATE_ERROR: ErrorCode = ErrorCode(1522);
const ER_PASSWD_LENGTH: ErrorCode = ErrorCode(1372);
const ER_PASSWORD_ANONYMOUS_USER: ErrorCode = ErrorCode(1131);
const ER_PASSWORD_FORMAT: ErrorCode = ErrorCode(1827);
const ER_PASSWORD_NOT_ALLOWED: ErrorCode = ErrorCode(1132);
const ER_PASSWORD_NO_MATCH: ErrorCode = ErrorCode(1133);
const ER_PATH_LENGTH: ErrorCode = ErrorCode(1680);
const ER_PLUGIN_CANNOT_BE_UNINSTALLED: ErrorCode = ErrorCode(1883);
const ER_PLUGIN_INSTALLED: ErrorCode = ErrorCode(1968);
const ER_PLUGIN_IS_NOT_LOADED: ErrorCode = ErrorCode(1524);
const ER_PLUGIN_IS_PERMANENT: ErrorCode = ErrorCode(1702);
const ER_PLUGIN_NO_INSTALL: ErrorCode = ErrorCode(1721);
const ER_PLUGIN_NO_UNINSTALL: ErrorCode = ErrorCode(1720);
const ER_PRIMARY_CANT_HAVE_NULL: ErrorCode = ErrorCode(1171);
const ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1903);
const ER_PRIOR_COMMIT_FAILED: ErrorCode = ErrorCode(1964);
const ER_PROCACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1370);
const ER_PROC_AUTO_GRANT_FAIL: ErrorCode = ErrorCode(1404);
const ER_PROC_AUTO_REVOKE_FAIL: ErrorCode = ErrorCode(1405);
const ER_PS_MANY_PARAM: ErrorCode = ErrorCode(1390);
const ER_PS_NO_RECURSION: ErrorCode = ErrorCode(1444);
const ER_QUERY_CACHE_DISABLED: ErrorCode = ErrorCode(1651);
const ER_QUERY_CACHE_IS_DISABLED: ErrorCode = ErrorCode(1924);
const ER_QUERY_CACHE_IS_GLOBALY_DISABLED: ErrorCode = ErrorCode(1925);
const ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT: ErrorCode = ErrorCode(1931);
const ER_QUERY_INTERRUPTED: ErrorCode = ErrorCode(1317);
const ER_QUERY_ON_FOREIGN_DATA_SOURCE: ErrorCode = ErrorCode(1430);
const ER_QUERY_ON_MASTER: ErrorCode = ErrorCode(1219);
const ER_RANGE_NOT_INCREASING_ERROR: ErrorCode = ErrorCode(1493);
const ER_RBR_NOT_AVAILABLE: ErrorCode = ErrorCode(1574);
const ER_READY: ErrorCode = ErrorCode(1076);
const ER_READ_ONLY_MODE: ErrorCode = ErrorCode(1836);
const ER_READ_ONLY_TRANSACTION: ErrorCode = ErrorCode(1207);
const ER_RECORD_FILE_FULL: ErrorCode = ErrorCode(1114);
const ER_REGEXP_ERROR: ErrorCode = ErrorCode(1139);
const ER_RELAY_LOG_FAIL: ErrorCode = ErrorCode(1371);
const ER_RELAY_LOG_INIT: ErrorCode = ErrorCode(1380);
const ER_REMOVED_SPACES: ErrorCode = ErrorCode(1466);
const ER_RENAMED_NAME: ErrorCode = ErrorCode(1636);
const ER_REORG_HASH_ONLY_ON_SAME_N: ErrorCode = ErrorCode(1510);
const ER_REORG_NO_PARAM_ERROR: ErrorCode = ErrorCode(1511);
const ER_REORG_OUTSIDE_RANGE: ErrorCode = ErrorCode(1520);
const ER_REORG_PARTITION_NOT_EXIST: ErrorCode = ErrorCode(1516);
const ER_REQUIRES_PRIMARY_KEY: ErrorCode = ErrorCode(1173);
const ER_RESERVED_SYNTAX: ErrorCode = ErrorCode(1382);
const ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: ErrorCode = ErrorCode(1645);
const ER_REVOKE_GRANTS: ErrorCode = ErrorCode(1269);
const ER_ROLE_CREATE_EXISTS: ErrorCode = ErrorCode(1975);
const ER_ROLE_DROP_EXISTS: ErrorCode = ErrorCode(1976);
const ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET: ErrorCode = ErrorCode(1748);
const ER_ROW_DOES_NOT_MATCH_PARTITION: ErrorCode = ErrorCode(1737);
const ER_ROW_EXPR_FOR_VCOL: ErrorCode = ErrorCode(1909);
const ER_ROW_IN_WRONG_PARTITION: ErrorCode = ErrorCode(1863);
const ER_ROW_IS_REFERENCED: ErrorCode = ErrorCode(1217);
const ER_ROW_IS_REFERENCED2: ErrorCode = ErrorCode(1451);
const ER_ROW_SINGLE_PARTITION_FIELD_ERROR: ErrorCode = ErrorCode(1658);
const ER_RPL_INFO_DATA_TOO_LONG: ErrorCode = ErrorCode(1742);
const ER_SAME_NAME_PARTITION: ErrorCode = ErrorCode(1517);
const ER_SAME_NAME_PARTITION_FIELD: ErrorCode = ErrorCode(1652);
const ER_SELECT_REDUCED: ErrorCode = ErrorCode(1249);
const ER_SERVER_IS_IN_SECURE_AUTH_MODE: ErrorCode = ErrorCode(1275);
const ER_SERVER_SHUTDOWN: ErrorCode = ErrorCode(1053);
const ER_SET_CONSTANTS_ONLY: ErrorCode = ErrorCode(1204);
const ER_SET_PASSWORD_AUTH_PLUGIN: ErrorCode = ErrorCode(1699);
const ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION: ErrorCode = ErrorCode(1769);
const ER_SET_STATEMENT_NOT_SUPPORTED: ErrorCode = ErrorCode(1971);
const ER_SHUTDOWN_COMPLETE: ErrorCode = ErrorCode(1079);
const ER_SIGNAL_BAD_CONDITION_TYPE: ErrorCode = ErrorCode(1646);
const ER_SIGNAL_EXCEPTION: ErrorCode = ErrorCode(1644);
const ER_SIGNAL_NOT_FOUND: ErrorCode = ErrorCode(1643);
const ER_SIGNAL_WARN: ErrorCode = ErrorCode(1642);
const ER_SIZE_OVERFLOW_ERROR: ErrorCode = ErrorCode(1532);
const ER_SKIPPING_LOGGED_TRANSACTION: ErrorCode = ErrorCode(1771);
const ER_SLAVE_CANT_CREATE_CONVERSION: ErrorCode = ErrorCode(1678);
const ER_SLAVE_CONFIGURATION: ErrorCode = ErrorCode(1794);
const ER_SLAVE_CONVERSION_FAILED: ErrorCode = ErrorCode(1677);
const ER_SLAVE_CORRUPT_EVENT: ErrorCode = ErrorCode(1610);
const ER_SLAVE_CREATE_EVENT_FAILURE: ErrorCode = ErrorCode(1596);
const ER_SLAVE_FATAL_ERROR: ErrorCode = ErrorCode(1593);
const ER_SLAVE_HAS_MORE_GTIDS_THAN_MASTER: ErrorCode = ErrorCode(1885);
const ER_SLAVE_HEARTBEAT_FAILURE: ErrorCode = ErrorCode(1623);
const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE: ErrorCode = ErrorCode(1624);
const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX: ErrorCode = ErrorCode(1704);
const ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN: ErrorCode = ErrorCode(1703);
const ER_SLAVE_IGNORED_SSL_PARAMS: ErrorCode = ErrorCode(1274);
const ER_SLAVE_IGNORED_TABLE: ErrorCode = ErrorCode(1237);
const ER_SLAVE_IGNORE_SERVER_IDS: ErrorCode = ErrorCode(1650);
const ER_SLAVE_INCIDENT: ErrorCode = ErrorCode(1590);
const ER_SLAVE_MASTER_COM_FAILURE: ErrorCode = ErrorCode(1597);
const ER_SLAVE_MI_INIT_REPOSITORY: ErrorCode = ErrorCode(1871);
const ER_SLAVE_MUST_STOP: ErrorCode = ErrorCode(1198);
const ER_SLAVE_NOT_RUNNING: ErrorCode = ErrorCode(1199);
const ER_SLAVE_RELAY_LOG_READ_FAILURE: ErrorCode = ErrorCode(1594);
const ER_SLAVE_RELAY_LOG_WRITE_FAILURE: ErrorCode = ErrorCode(1595);
const ER_SLAVE_RLI_INIT_REPOSITORY: ErrorCode = ErrorCode(1872);
const ER_SLAVE_SILENT_RETRY_TRANSACTION: ErrorCode = ErrorCode(1806);
const ER_SLAVE_SKIP_NOT_IN_GTID: ErrorCode = ErrorCode(1966);
const ER_SLAVE_STARTED: ErrorCode = ErrorCode(1937);
const ER_SLAVE_STOPPED: ErrorCode = ErrorCode(1938);
const ER_SLAVE_THREAD: ErrorCode = ErrorCode(1202);
const ER_SLAVE_UNEXPECTED_MASTER_SWITCH: ErrorCode = ErrorCode(1952);
const ER_SLAVE_WAS_NOT_RUNNING: ErrorCode = ErrorCode(1255);
const ER_SLAVE_WAS_RUNNING: ErrorCode = ErrorCode(1254);
const ER_SPATIAL_CANT_HAVE_NULL: ErrorCode = ErrorCode(1252);
const ER_SPATIAL_MUST_HAVE_GEOM_COL: ErrorCode = ErrorCode(1687);
const ER_SPECIFIC_ACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1227);
const ER_SP_ALREADY_EXISTS: ErrorCode = ErrorCode(1304);
const ER_SP_BADRETURN: ErrorCode = ErrorCode(1313);
const ER_SP_BADSELECT: ErrorCode = ErrorCode(1312);
const ER_SP_BADSTATEMENT: ErrorCode = ErrorCode(1314);
const ER_SP_BAD_CURSOR_QUERY: ErrorCode = ErrorCode(1322);
const ER_SP_BAD_CURSOR_SELECT: ErrorCode = ErrorCode(1323);
const ER_SP_BAD_SQLSTATE: ErrorCode = ErrorCode(1407);
const ER_SP_BAD_VAR_SHADOW: ErrorCode = ErrorCode(1453);
const ER_SP_CANT_ALTER: ErrorCode = ErrorCode(1334);
const ER_SP_CANT_SET_AUTOCOMMIT: ErrorCode = ErrorCode(1445);
const ER_SP_CASE_NOT_FOUND: ErrorCode = ErrorCode(1339);
const ER_SP_COND_MISMATCH: ErrorCode = ErrorCode(1319);
const ER_SP_CURSOR_AFTER_HANDLER: ErrorCode = ErrorCode(1338);
const ER_SP_CURSOR_ALREADY_OPEN: ErrorCode = ErrorCode(1325);
const ER_SP_CURSOR_MISMATCH: ErrorCode = ErrorCode(1324);
const ER_SP_CURSOR_NOT_OPEN: ErrorCode = ErrorCode(1326);
const ER_SP_DOES_NOT_EXIST: ErrorCode = ErrorCode(1305);
const ER_SP_DROP_FAILED: ErrorCode = ErrorCode(1306);
const ER_SP_DUP_COND: ErrorCode = ErrorCode(1332);
const ER_SP_DUP_CURS: ErrorCode = ErrorCode(1333);
const ER_SP_DUP_HANDLER: ErrorCode = ErrorCode(1413);
const ER_SP_DUP_PARAM: ErrorCode = ErrorCode(1330);
const ER_SP_DUP_VAR: ErrorCode = ErrorCode(1331);
const ER_SP_FETCH_NO_DATA: ErrorCode = ErrorCode(1329);
const ER_SP_GOTO_IN_HNDLR: ErrorCode = ErrorCode(1358);
const ER_SP_LABEL_MISMATCH: ErrorCode = ErrorCode(1310);
const ER_SP_LABEL_REDEFINE: ErrorCode = ErrorCode(1309);
const ER_SP_LILABEL_MISMATCH: ErrorCode = ErrorCode(1308);
const ER_SP_NORETURN: ErrorCode = ErrorCode(1320);
const ER_SP_NORETURNEND: ErrorCode = ErrorCode(1321);
const ER_SP_NOT_VAR_ARG: ErrorCode = ErrorCode(1414);
const ER_SP_NO_AGGREGATE: ErrorCode = ErrorCode(1460);
const ER_SP_NO_DROP_SP: ErrorCode = ErrorCode(1357);
const ER_SP_NO_RECURSION: ErrorCode = ErrorCode(1424);
const ER_SP_NO_RECURSIVE_CREATE: ErrorCode = ErrorCode(1303);
const ER_SP_NO_RETSET: ErrorCode = ErrorCode(1415);
const ER_SP_PROC_TABLE_CORRUPT: ErrorCode = ErrorCode(1457);
const ER_SP_RECURSION_LIMIT: ErrorCode = ErrorCode(1456);
const ER_SP_STORE_FAILED: ErrorCode = ErrorCode(1307);
const ER_SP_SUBSELECT_NYI: ErrorCode = ErrorCode(1335);
const ER_SP_UNDECLARED_VAR: ErrorCode = ErrorCode(1327);
const ER_SP_UNINIT_VAR: ErrorCode = ErrorCode(1311);
const ER_SP_VARCOND_AFTER_CURSHNDLR: ErrorCode = ErrorCode(1337);
const ER_SP_WRONG_NAME: ErrorCode = ErrorCode(1458);
const ER_SP_WRONG_NO_OF_ARGS: ErrorCode = ErrorCode(1318);
const ER_SP_WRONG_NO_OF_FETCH_ARGS: ErrorCode = ErrorCode(1328);
const ER_SQLTHREAD_WITH_SECURE_SLAVE: ErrorCode = ErrorCode(1763);
const ER_SQL_DISCOVER_ERROR: ErrorCode = ErrorCode(1939);
const ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE: ErrorCode = ErrorCode(1858);
const ER_SR_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1601);
const ER_STACK_OVERRUN: ErrorCode = ErrorCode(1119);
const ER_STACK_OVERRUN_NEED_MORE: ErrorCode = ErrorCode(1436);
const ER_STARTUP: ErrorCode = ErrorCode(1408);
const ER_STATEMENT_TIMEOUT: ErrorCode = ErrorCode(1969);
const ER_STMT_CACHE_FULL: ErrorCode = ErrorCode(1705);
const ER_STMT_HAS_NO_OPEN_CURSOR: ErrorCode = ErrorCode(1421);
const ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: ErrorCode = ErrorCode(1336);
const ER_STOP_SLAVE_IO_THREAD_TIMEOUT: ErrorCode = ErrorCode(1876);
const ER_STOP_SLAVE_SQL_THREAD_TIMEOUT: ErrorCode = ErrorCode(1875);
const ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT: ErrorCode = ErrorCode(1686);
const ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT: ErrorCode = ErrorCode(1560);
const ER_STORED_FUNCTION_PREVENTS_SWITCH_GTID_DOMAIN_ID_SEQ_NO: ErrorCode = ErrorCode(1954);
const ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION: ErrorCode = ErrorCode(1930);
const ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN: ErrorCode = ErrorCode(1695);
const ER_SUBPARTITION_ERROR: ErrorCode = ErrorCode(1500);
const ER_SUBPARTITION_NAME: ErrorCode = ErrorCode(1634);
const ER_SUBQUERIES_NOT_SUPPORTED: ErrorCode = ErrorCode(1970);
const ER_SUBQUERY_NO1_ROW: ErrorCode = ErrorCode(1242);
const ER_SYNTAX_ERROR: ErrorCode = ErrorCode(1149);
const ER_TABLEACCESS_DENIED_ERROR: ErrorCode = ErrorCode(1142);
const ER_TABLENAME_NOT_ALLOWED_HERE: ErrorCode = ErrorCode(1250);
const ER_TABLESPACE_AUTO_EXTEND_ERROR: ErrorCode = ErrorCode(1530);
const ER_TABLESPACE_DISCARDED: ErrorCode = ErrorCode(1814);
const ER_TABLESPACE_EXISTS: ErrorCode = ErrorCode(1813);
const ER_TABLESPACE_MISSING: ErrorCode = ErrorCode(1812);
const ER_TABLES_DIFFERENT_METADATA: ErrorCode = ErrorCode(1736);
const ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: ErrorCode = ErrorCode(1164);
const ER_TABLE_CANT_HANDLE_BLOB: ErrorCode = ErrorCode(1163);
const ER_TABLE_CANT_HANDLE_FT: ErrorCode = ErrorCode(1214);
const ER_TABLE_CANT_HANDLE_SPKEYS: ErrorCode = ErrorCode(1464);
const ER_TABLE_CORRUPT: ErrorCode = ErrorCode(1877);
const ER_TABLE_DEFINITION_TOO_BIG: ErrorCode = ErrorCode(1967);
const ER_TABLE_DEF_CHANGED: ErrorCode = ErrorCode(1412);
const ER_TABLE_EXISTS_ERROR: ErrorCode = ErrorCode(1050);
const ER_TABLE_HAS_NO_FT: ErrorCode = ErrorCode(1764);
const ER_TABLE_IN_FK_CHECK: ErrorCode = ErrorCode(1725);
const ER_TABLE_IN_SYSTEM_TABLESPACE: ErrorCode = ErrorCode(1809);
const ER_TABLE_MUST_HAVE_COLUMNS: ErrorCode = ErrorCode(1113);
const ER_TABLE_NAME: ErrorCode = ErrorCode(1632);
const ER_TABLE_NEEDS_REBUILD: ErrorCode = ErrorCode(1707);
const ER_TABLE_NEEDS_UPGRADE: ErrorCode = ErrorCode(1459);
const ER_TABLE_NOT_LOCKED: ErrorCode = ErrorCode(1100);
const ER_TABLE_NOT_LOCKED_FOR_WRITE: ErrorCode = ErrorCode(1099);
const ER_TABLE_SCHEMA_MISMATCH: ErrorCode = ErrorCode(1808);
const ER_TARGET_NOT_EXPLAINABLE: ErrorCode = ErrorCode(1933);
const ER_TEMPORARY_NAME: ErrorCode = ErrorCode(1635);
const ER_TEMP_FILE_WRITE_FAILURE: ErrorCode = ErrorCode(1878);
const ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR: ErrorCode = ErrorCode(1559);
const ER_TEXTFILE_NOT_READABLE: ErrorCode = ErrorCode(1085);
const ER_TOO_BIG_DISPLAYWIDTH: ErrorCode = ErrorCode(1439);
const ER_TOO_BIG_FIELDLENGTH: ErrorCode = ErrorCode(1074);
const ER_TOO_BIG_FOR_UNCOMPRESS: ErrorCode = ErrorCode(1256);
const ER_TOO_BIG_PRECISION: ErrorCode = ErrorCode(1426);
const ER_TOO_BIG_ROWSIZE: ErrorCode = ErrorCode(1118);
const ER_TOO_BIG_SCALE: ErrorCode = ErrorCode(1425);
const ER_TOO_BIG_SELECT: ErrorCode = ErrorCode(1104);
const ER_TOO_BIG_SET: ErrorCode = ErrorCode(1097);
const ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT: ErrorCode = ErrorCode(1473);
const ER_TOO_LONG_BODY: ErrorCode = ErrorCode(1437);
const ER_TOO_LONG_FIELD_COMMENT: ErrorCode = ErrorCode(1629);
const ER_TOO_LONG_IDENT: ErrorCode = ErrorCode(1059);
const ER_TOO_LONG_INDEX_COMMENT: ErrorCode = ErrorCode(1688);
const ER_TOO_LONG_KEY: ErrorCode = ErrorCode(1071);
const ER_TOO_LONG_STRING: ErrorCode = ErrorCode(1162);
const ER_TOO_LONG_TABLE_COMMENT: ErrorCode = ErrorCode(1628);
const ER_TOO_LONG_TABLE_PARTITION_COMMENT: ErrorCode = ErrorCode(1793);
const ER_TOO_MANY_CONCURRENT_TRXS: ErrorCode = ErrorCode(1637);
const ER_TOO_MANY_DELAYED_THREADS: ErrorCode = ErrorCode(1151);
const ER_TOO_MANY_FIELDS: ErrorCode = ErrorCode(1117);
const ER_TOO_MANY_KEYS: ErrorCode = ErrorCode(1069);
const ER_TOO_MANY_KEY_PARTS: ErrorCode = ErrorCode(1070);
const ER_TOO_MANY_PARTITIONS_ERROR: ErrorCode = ErrorCode(1499);
const ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR: ErrorCode = ErrorCode(1655);
const ER_TOO_MANY_ROWS: ErrorCode = ErrorCode(1172);
const ER_TOO_MANY_TABLES: ErrorCode = ErrorCode(1116);
const ER_TOO_MANY_USER_CONNECTIONS: ErrorCode = ErrorCode(1203);
const ER_TOO_MANY_VALUES_ERROR: ErrorCode = ErrorCode(1657);
const ER_TOO_MUCH_AUTO_TIMESTAMP_COLS: ErrorCode = ErrorCode(1293);
const ER_TRANS_CACHE_FULL: ErrorCode = ErrorCode(1197);
const ER_TRG_ALREADY_EXISTS: ErrorCode = ErrorCode(1359);
const ER_TRG_CANT_CHANGE_ROW: ErrorCode = ErrorCode(1362);
const ER_TRG_CANT_OPEN_TABLE: ErrorCode = ErrorCode(1606);
const ER_TRG_CORRUPTED_FILE: ErrorCode = ErrorCode(1602);
const ER_TRG_DOES_NOT_EXIST: ErrorCode = ErrorCode(1360);
const ER_TRG_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1604);
const ER_TRG_IN_WRONG_SCHEMA: ErrorCode = ErrorCode(1435);
const ER_TRG_NO_CREATION_CTX: ErrorCode = ErrorCode(1603);
const ER_TRG_NO_DEFINER: ErrorCode = ErrorCode(1454);
const ER_TRG_NO_SUCH_ROW_IN_TRG: ErrorCode = ErrorCode(1363);
const ER_TRG_ON_VIEW_OR_TEMP_TABLE: ErrorCode = ErrorCode(1361);
const ER_TRUNCATED_WRONG_VALUE: ErrorCode = ErrorCode(1292);
const ER_TRUNCATED_WRONG_VALUE_FOR_FIELD: ErrorCode = ErrorCode(1366);
const ER_TRUNCATE_ILLEGAL_FK: ErrorCode = ErrorCode(1701);
const ER_UDF_EXISTS: ErrorCode = ErrorCode(1125);
const ER_UDF_NO_PATHS: ErrorCode = ErrorCode(1124);
const ER_UNDO_RECORD_TOO_BIG: ErrorCode = ErrorCode(1713);
const ER_UNEXPECTED_EOF: ErrorCode = ErrorCode(1039);
const ER_UNION_TABLES_IN_DIFFERENT_DIR: ErrorCode = ErrorCode(1212);
const ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF: ErrorCode = ErrorCode(1503);
const ER_UNKNOWN_ALTER_ALGORITHM: ErrorCode = ErrorCode(1800);
const ER_UNKNOWN_ALTER_LOCK: ErrorCode = ErrorCode(1801);
const ER_UNKNOWN_CHARACTER_SET: ErrorCode = ErrorCode(1115);
const ER_UNKNOWN_COLLATION: ErrorCode = ErrorCode(1273);
const ER_UNKNOWN_COM_ERROR: ErrorCode = ErrorCode(1047);
const ER_UNKNOWN_ERROR: ErrorCode = ErrorCode(1105);
const ER_UNKNOWN_EXPLAIN_FORMAT: ErrorCode = ErrorCode(1791);
const ER_UNKNOWN_KEY_CACHE: ErrorCode = ErrorCode(1284);
const ER_UNKNOWN_LOCALE: ErrorCode = ErrorCode(1649);
const ER_UNKNOWN_OPTION: ErrorCode = ErrorCode(1911);
const ER_UNKNOWN_PARTITION: ErrorCode = ErrorCode(1735);
const ER_UNKNOWN_PROCEDURE: ErrorCode = ErrorCode(1106);
const ER_UNKNOWN_STMT_HANDLER: ErrorCode = ErrorCode(1243);
const ER_UNKNOWN_STORAGE_ENGINE: ErrorCode = ErrorCode(1286);
const ER_UNKNOWN_SYSTEM_VARIABLE: ErrorCode = ErrorCode(1193);
const ER_UNKNOWN_TABLE: ErrorCode = ErrorCode(1109);
const ER_UNKNOWN_TARGET_BINLOG: ErrorCode = ErrorCode(1373);
const ER_UNKNOWN_TIME_ZONE: ErrorCode = ErrorCode(1298);
const ER_UNSUPORTED_LOG_ENGINE: ErrorCode = ErrorCode(1579);
const ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1907);
const ER_UNSUPPORTED_ENGINE: ErrorCode = ErrorCode(1726);
const ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS: ErrorCode = ErrorCode(1910);
const ER_UNSUPPORTED_EXTENSION: ErrorCode = ErrorCode(1112);
const ER_UNSUPPORTED_PS: ErrorCode = ErrorCode(1295);
const ER_UNTIL_COND_IGNORED: ErrorCode = ErrorCode(1279);
const ER_UNTIL_REQUIRES_USING_GTID: ErrorCode = ErrorCode(1949);
const ER_UNUSED11: ErrorCode = ErrorCode(1608);
const ER_UNUSED17: ErrorCode = ErrorCode(1972);
const ER_UPDATE_INF: ErrorCode = ErrorCode(1134);
const ER_UPDATE_LOG_DEPRECATED_IGNORED: ErrorCode = ErrorCode(1315);
const ER_UPDATE_LOG_DEPRECATED_TRANSLATED: ErrorCode = ErrorCode(1316);
const ER_UPDATE_TABLE_USED: ErrorCode = ErrorCode(1093);
const ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE: ErrorCode = ErrorCode(1175);
const ER_USERNAME: ErrorCode = ErrorCode(1468);
const ER_USER_CREATE_EXISTS: ErrorCode = ErrorCode(1973);
const ER_USER_DROP_EXISTS: ErrorCode = ErrorCode(1974);
const ER_USER_LIMIT_REACHED: ErrorCode = ErrorCode(1226);
const ER_VALUES_IS_NOT_INT_TYPE_ERROR: ErrorCode = ErrorCode(1697);
const ER_VARIABLE_IS_NOT_STRUCT: ErrorCode = ErrorCode(1272);
const ER_VARIABLE_IS_READONLY: ErrorCode = ErrorCode(1621);
const ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER: ErrorCode = ErrorCode(1765);
const ER_VARIABLE_NOT_SETTABLE_IN_SP: ErrorCode = ErrorCode(1838);
const ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION: ErrorCode = ErrorCode(1766);
const ER_VAR_CANT_BE_READ: ErrorCode = ErrorCode(1233);
const ER_VCOL_BASED_ON_VCOL: ErrorCode = ErrorCode(1900);
const ER_VIEW_CHECKSUM: ErrorCode = ErrorCode(1392);
const ER_VIEW_CHECK_FAILED: ErrorCode = ErrorCode(1369);
const ER_VIEW_DELETE_MERGE_VIEW: ErrorCode = ErrorCode(1395);
const ER_VIEW_FRM_NO_USER: ErrorCode = ErrorCode(1447);
const ER_VIEW_INVALID: ErrorCode = ErrorCode(1356);
const ER_VIEW_INVALID_CREATION_CTX: ErrorCode = ErrorCode(1600);
const ER_VIEW_MULTIUPDATE: ErrorCode = ErrorCode(1393);
const ER_VIEW_NONUPD_CHECK: ErrorCode = ErrorCode(1368);
const ER_VIEW_NO_CREATION_CTX: ErrorCode = ErrorCode(1599);
const ER_VIEW_NO_EXPLAIN: ErrorCode = ErrorCode(1345);
const ER_VIEW_NO_INSERT_FIELD_LIST: ErrorCode = ErrorCode(1394);
const ER_VIEW_ORDERBY_IGNORED: ErrorCode = ErrorCode(1926);
const ER_VIEW_OTHER_USER: ErrorCode = ErrorCode(1448);
const ER_VIEW_PREVENT_UPDATE: ErrorCode = ErrorCode(1443);
const ER_VIEW_RECURSIVE: ErrorCode = ErrorCode(1462);
const ER_VIEW_SELECT_CLAUSE: ErrorCode = ErrorCode(1350);
const ER_VIEW_SELECT_DERIVED: ErrorCode = ErrorCode(1349);
const ER_VIEW_SELECT_TMPTABLE: ErrorCode = ErrorCode(1352);
const ER_VIEW_SELECT_VARIABLE: ErrorCode = ErrorCode(1351);
const ER_VIEW_WRONG_LIST: ErrorCode = ErrorCode(1353);
const ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED: ErrorCode = ErrorCode(1901);
const ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1906);
const ER_WARNING_NOT_COMPLETE_ROLLBACK: ErrorCode = ErrorCode(1196);
const ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE: ErrorCode = ErrorCode(1751);
const ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE: ErrorCode = ErrorCode(1752);
const ER_WARN_AGGFUNC_DEPENDENCE: ErrorCode = ErrorCode(1981);
const ER_WARN_ALLOWED_PACKET_OVERFLOWED: ErrorCode = ErrorCode(1301);
const ER_WARN_CANT_DROP_DEFAULT_KEYCACHE: ErrorCode = ErrorCode(1438);
const ER_WARN_DATA_OUT_OF_RANGE: ErrorCode = ErrorCode(1264);
const ER_WARN_DEPRECATED_SYNTAX: ErrorCode = ErrorCode(1287);
const ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT: ErrorCode = ErrorCode(1681);
const ER_WARN_DEPRECATED_SYNTAX_WITH_VER: ErrorCode = ErrorCode(1554);
const ER_WARN_ENGINE_TRANSACTION_ROLLBACK: ErrorCode = ErrorCode(1622);
const ER_WARN_FIELD_RESOLVED: ErrorCode = ErrorCode(1276);
const ER_WARN_HOSTNAME_WONT_WORK: ErrorCode = ErrorCode(1285);
const ER_WARN_INDEX_NOT_APPLICABLE: ErrorCode = ErrorCode(1739);
const ER_WARN_INVALID_TIMESTAMP: ErrorCode = ErrorCode(1299);
const ER_WARN_IS_SKIPPED_TABLE: ErrorCode = ErrorCode(1684);
const ER_WARN_NULL_TO_NOTNULL: ErrorCode = ErrorCode(1263);
const ER_WARN_PURGE_LOG_IN_USE: ErrorCode = ErrorCode(1867);
const ER_WARN_PURGE_LOG_IS_ACTIVE: ErrorCode = ErrorCode(1868);
const ER_WARN_QC_RESIZE: ErrorCode = ErrorCode(1282);
const ER_WARN_TOO_FEW_RECORDS: ErrorCode = ErrorCode(1261);
const ER_WARN_TOO_MANY_RECORDS: ErrorCode = ErrorCode(1262);
const ER_WARN_USING_OTHER_HANDLER: ErrorCode = ErrorCode(1266);
const ER_WARN_VIEW_MERGE: ErrorCode = ErrorCode(1354);
const ER_WARN_VIEW_WITHOUT_KEY: ErrorCode = ErrorCode(1355);
const ER_WRONG_ARGUMENTS: ErrorCode = ErrorCode(1210);
const ER_WRONG_AUTO_KEY: ErrorCode = ErrorCode(1075);
const ER_WRONG_COLUMN_NAME: ErrorCode = ErrorCode(1166);
const ER_WRONG_DB_NAME: ErrorCode = ErrorCode(1102);
const ER_WRONG_FIELD_SPEC: ErrorCode = ErrorCode(1063);
const ER_WRONG_FIELD_TERMINATORS: ErrorCode = ErrorCode(1083);
const ER_WRONG_FIELD_WITH_GROUP: ErrorCode = ErrorCode(1055);
const ER_WRONG_FK_DEF: ErrorCode = ErrorCode(1239);
const ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN: ErrorCode = ErrorCode(1905);
const ER_WRONG_GROUP_FIELD: ErrorCode = ErrorCode(1056);
const ER_WRONG_KEY_COLUMN: ErrorCode = ErrorCode(1167);
const ER_WRONG_LOCK_OF_SYSTEM_TABLE: ErrorCode = ErrorCode(1428);
const ER_WRONG_MAGIC: ErrorCode = ErrorCode(1389);
const ER_WRONG_MRG_TABLE: ErrorCode = ErrorCode(1168);
const ER_WRONG_NAME_FOR_CATALOG: ErrorCode = ErrorCode(1281);
const ER_WRONG_NAME_FOR_INDEX: ErrorCode = ErrorCode(1280);
const ER_WRONG_NATIVE_TABLE_STRUCTURE: ErrorCode = ErrorCode(1682);
const ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: ErrorCode = ErrorCode(1222);
const ER_WRONG_OBJECT: ErrorCode = ErrorCode(1347);
const ER_WRONG_OUTER_JOIN: ErrorCode = ErrorCode(1120);
const ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: ErrorCode = ErrorCode(1582);
const ER_WRONG_PARAMCOUNT_TO_PROCEDURE: ErrorCode = ErrorCode(1107);
const ER_WRONG_PARAMETERS_TO_NATIVE_FCT: ErrorCode = ErrorCode(1583);
const ER_WRONG_PARAMETERS_TO_PROCEDURE: ErrorCode = ErrorCode(1108);
const ER_WRONG_PARAMETERS_TO_STORED_FCT: ErrorCode = ErrorCode(1584);
const ER_WRONG_PARTITION_NAME: ErrorCode = ErrorCode(1567);
const ER_WRONG_PERFSCHEMA_USAGE: ErrorCode = ErrorCode(1683);
const ER_WRONG_SIZE_NUMBER: ErrorCode = ErrorCode(1531);
const ER_WRONG_SPVAR_TYPE_IN_LIMIT: ErrorCode = ErrorCode(1691);
const ER_WRONG_STRING_LENGTH: ErrorCode = ErrorCode(1470);
const ER_WRONG_SUB_KEY: ErrorCode = ErrorCode(1089);
const ER_WRONG_SUM_SELECT: ErrorCode = ErrorCode(1057);
const ER_WRONG_TABLE_NAME: ErrorCode = ErrorCode(1103);
const ER_WRONG_TYPE_COLUMN_VALUE_ERROR: ErrorCode = ErrorCode(1654);
const ER_WRONG_TYPE_FOR_VAR: ErrorCode = ErrorCode(1232);
const ER_WRONG_USAGE: ErrorCode = ErrorCode(1221);
const ER_WRONG_VALUE: ErrorCode = ErrorCode(1525);
const ER_WRONG_VALUE_COUNT: ErrorCode = ErrorCode(1058);
const ER_WRONG_VALUE_COUNT_ON_ROW: ErrorCode = ErrorCode(1136);
const ER_WRONG_VALUE_FOR_TYPE: ErrorCode = ErrorCode(1411);
const ER_WRONG_VALUE_FOR_VAR: ErrorCode = ErrorCode(1231);
const ER_WSAS_FAILED: ErrorCode = ErrorCode(1383);
const ER_XAER_DUPID: ErrorCode = ErrorCode(1440);
const ER_XAER_INVAL: ErrorCode = ErrorCode(1398);
const ER_XAER_NOTA: ErrorCode = ErrorCode(1397);
const ER_XAER_OUTSIDE: ErrorCode = ErrorCode(1400);
const ER_XAER_RMERR: ErrorCode = ErrorCode(1401);
const ER_XAER_RMFAIL: ErrorCode = ErrorCode(1399);
const ER_XA_RBDEADLOCK: ErrorCode = ErrorCode(1614);
const ER_XA_RBROLLBACK: ErrorCode = ErrorCode(1402);
const ER_XA_RBTIMEOUT: ErrorCode = ErrorCode(1613);
const ER_YES: ErrorCode = ErrorCode(1003);
const ER_ZLIB_Z_BUF_ERROR: ErrorCode = ErrorCode(1258);
const ER_ZLIB_Z_DATA_ERROR: ErrorCode = ErrorCode(1259);
const ER_ZLIB_Z_MEM_ERROR: ErrorCode = ErrorCode(1257);
const WARN_COND_ITEM_TRUNCATED: ErrorCode = ErrorCode(1647);
const WARN_DATA_TRUNCATED: ErrorCode = ErrorCode(1265);
const WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED: ErrorCode = ErrorCode(1638);
const WARN_NO_MASTER_INF: ErrorCode = ErrorCode(1617);
const WARN_ON_BLOCKHOLE_IN_RBR: ErrorCode = ErrorCode(1870);
const WARN_OPTION_BELOW_LIMIT: ErrorCode = ErrorCode(1708);
const WARN_OPTION_IGNORED: ErrorCode = ErrorCode(1618);
const WARN_PLUGIN_BUSY: ErrorCode = ErrorCode(1620);
const WARN_PLUGIN_DELETE_BUILTIN: ErrorCode = ErrorCode(1619);
}

View File

@ -1,65 +1,50 @@
// https://mariadb.com/kb/en/library/resultset/#field-types
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct FieldType(pub u8);
impl FieldType {
pub const MYSQL_TYPE_BIT: FieldType = FieldType(16);
pub const MYSQL_TYPE_BLOB: FieldType = FieldType(252);
pub const MYSQL_TYPE_DATE: FieldType = FieldType(10);
pub const MYSQL_TYPE_DATETIME: FieldType = FieldType(12);
pub const MYSQL_TYPE_DATETIME2: FieldType = FieldType(18);
pub const MYSQL_TYPE_DECIMAL: FieldType = FieldType(0);
pub const MYSQL_TYPE_DOUBLE: FieldType = FieldType(5);
pub const MYSQL_TYPE_ENUM: FieldType = FieldType(247);
pub const MYSQL_TYPE_FLOAT: FieldType = FieldType(4);
pub const MYSQL_TYPE_GEOMETRY: FieldType = FieldType(255);
pub const MYSQL_TYPE_INT24: FieldType = FieldType(9);
pub const MYSQL_TYPE_JSON: FieldType = FieldType(245);
pub const MYSQL_TYPE_LONG: FieldType = FieldType(3);
pub const MYSQL_TYPE_LONGLONG: FieldType = FieldType(8);
pub const MYSQL_TYPE_LONG_BLOB: FieldType = FieldType(251);
pub const MYSQL_TYPE_MEDIUM_BLOB: FieldType = FieldType(250);
pub const MYSQL_TYPE_NEWDATE: FieldType = FieldType(14);
pub const MYSQL_TYPE_NEWDECIMAL: FieldType = FieldType(246);
pub const MYSQL_TYPE_NULL: FieldType = FieldType(6);
pub const MYSQL_TYPE_SET: FieldType = FieldType(248);
pub const MYSQL_TYPE_SHORT: FieldType = FieldType(2);
pub const MYSQL_TYPE_STRING: FieldType = FieldType(254);
pub const MYSQL_TYPE_TIME: FieldType = FieldType(11);
pub const MYSQL_TYPE_TIME2: FieldType = FieldType(19);
pub const MYSQL_TYPE_TIMESTAMP: FieldType = FieldType(7);
pub const MYSQL_TYPE_TIMESTAMP2: FieldType = FieldType(17);
pub const MYSQL_TYPE_TINY: FieldType = FieldType(1);
pub const MYSQL_TYPE_TINY_BLOB: FieldType = FieldType(249);
pub const MYSQL_TYPE_VARCHAR: FieldType = FieldType(15);
pub const MYSQL_TYPE_VAR_STRING: FieldType = FieldType(253);
pub const MYSQL_TYPE_YEAR: FieldType = FieldType(13);
}
// https://mariadb.com/kb/en/library/com_stmt_execute/#parameter-flag
bitflags::bitflags! {
pub struct ParameterFlag: u8 {
const UNSIGNED = 128;
}
}
// https://mariadb.com/kb/en/library/resultset/#field-detail-flag
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/group__group__cs__column__definition__flags.html
bitflags::bitflags! {
pub struct FieldDetailFlag: u16 {
pub struct FieldFlags: u16 {
/// Field cannot be NULL
const NOT_NULL = 1;
/// Field is **part of** a primary key
const PRIMARY_KEY = 2;
/// Field is **part of** a unique key/constraint
const UNIQUE_KEY = 4;
/// Field is **part of** a unique or primary key
const MULTIPLE_KEY = 8;
/// Field is a blob.
const BLOB = 16;
const UNSIGNED = 32;
const ZEROFILL_FLAG = 64;
const BINARY_COLLATION = 128;
/// Field is unsigned
const UNISIGNED = 32;
/// Field is zero filled.
const ZEROFILL = 64;
/// Field is binary (set for strings)
const BINARY = 128;
/// Field is an enumeration
const ENUM = 256;
/// Field is an auto-increment field
const AUTO_INCREMENT = 512;
/// Field is a timestamp
const TIMESTAMP = 1024;
/// Field is a set
const SET = 2048;
const NO_DEFAULT_VALUE_FLAG = 4096;
const ON_UPDATE_NOW_FLAG = 8192;
const NUM_FLAG = 32768;
/// Field does not have a default value
const NO_DEFAULT_VALUE = 4096;
/// Field is set to NOW on UPDATE
const ON_UPDATE_NOW = 8192;
/// Field is a number
const NUM = 32768;
}
}

View File

@ -0,0 +1,159 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::protocol::{Capabilities, Decode, Status};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_connection_phase_packets_protocol_handshake_v10.html
// https://mariadb.com/kb/en/connection/#initial-handshake-packet
#[derive(Debug)]
pub struct Handshake {
pub protocol_version: u8,
pub server_version: Box<str>,
pub connection_id: u32,
pub server_capabilities: Capabilities,
pub server_default_collation: u8,
pub status: Status,
pub auth_plugin_name: Option<Box<str>>,
pub auth_plugin_data: Box<[u8]>,
}
impl Decode for Handshake {
fn decode(mut buf: &[u8]) -> crate::Result<Self>
where
Self: Sized,
{
let protocol_version = buf.get_u8()?;
let server_version = buf.get_str_nul()?.into();
let connection_id = buf.get_u32::<LittleEndian>()?;
let mut scramble = Vec::with_capacity(8);
// scramble first part : string<8>
scramble.extend_from_slice(&buf[..8]);
buf.advance(8);
// reserved : string<1>
buf.advance(1);
// capability_flags_1 : int<2>
let capabilities_1 = buf.get_u16::<LittleEndian>()?;
let mut capabilities = Capabilities::from_bits_truncate(capabilities_1.into());
// character_set : int<1>
let char_set = buf.get_u8()?;
// status_flags : int<2>
let status = buf.get_u16::<LittleEndian>()?;
let status = Status::from_bits_truncate(status);
// capability_flags_2 : int<2>
let capabilities_2 = buf.get_u16::<LittleEndian>()?;
capabilities |= Capabilities::from_bits_truncate(((capabilities_2 as u32) << 16).into());
let auth_plugin_data_len = if capabilities.contains(Capabilities::PLUGIN_AUTH) {
// plugin data length : int<1>
buf.get_u8()?
} else {
// 0x00 : int<1>
buf.advance(0);
0
};
// reserved: string<6>
buf.advance(6);
if capabilities.contains(Capabilities::MYSQL) {
// reserved: string<4>
buf.advance(4);
} else {
// capability_flags_3 : int<4>
let capabilities_3 = buf.get_u32::<LittleEndian>()?;
capabilities |= Capabilities::from_bits_truncate((capabilities_2 as u64) << 32);
}
if capabilities.contains(Capabilities::SECURE_CONNECTION) {
// scramble 2nd part : string<n> ( Length = max(12, plugin data length - 9) )
let len = ((auth_plugin_data_len as isize) - 9).max(12) as usize;
scramble.extend_from_slice(&buf[..len]);
buf.advance(len);
// reserved : string<1>
buf.advance(1);
}
let auth_plugin_name = if capabilities.contains(Capabilities::PLUGIN_AUTH) {
Some(buf.get_str_nul()?.to_owned().into())
} else {
None
};
Ok(Self {
protocol_version,
server_capabilities: capabilities,
server_version,
server_default_collation: char_set,
connection_id,
auth_plugin_data: scramble.into_boxed_slice(),
auth_plugin_name,
status,
})
}
}
#[cfg(test)]
mod tests {
use super::{Capabilities, Decode, Handshake, Status};
const HANDSHAKE_MARIA_DB_10_4_7: &[u8] = b"\n5.5.5-10.4.7-MariaDB-1:10.4.7+maria~bionic\x00\x0b\x00\x00\x00t6L\\j\"dS\x00\xfe\xf7\x08\x02\x00\xff\x81\x15\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00U14Oph9\"<H5n\x00mysql_native_password\x00";
#[test]
fn it_decodes_handshake_mariadb_10_4_7() {
let mut p = Handshake::decode(HANDSHAKE_MARIA_DB_10_4_7).unwrap();
assert_eq!(p.protocol_version, 10);
assert_eq!(
&*p.server_version,
"5.5.5-10.4.7-MariaDB-1:10.4.7+maria~bionic"
);
p.server_capabilities.toggle(
Capabilities::FOUND_ROWS
| Capabilities::LONG_FLAG
| Capabilities::CONNECT_WITH_DB
| Capabilities::NO_SCHEMA
| Capabilities::COMPRESS
| Capabilities::ODBC
| Capabilities::LOCAL_FILES
| Capabilities::IGNORE_SPACE
| Capabilities::PROTOCOL_41
| Capabilities::INTERACTIVE
| Capabilities::TRANSACTIONS
| Capabilities::SECURE_CONNECTION
| Capabilities::MULTI_STATEMENTS
| Capabilities::MULTI_RESULTS
| Capabilities::PS_MULTI_RESULTS
| Capabilities::PLUGIN_AUTH
| Capabilities::CONNECT_ATTRS
| Capabilities::PLUGIN_AUTH_LENENC_DATA
| Capabilities::CAN_HANDLE_EXPIRED_PASSWORDS
| Capabilities::SESSION_TRACK
| Capabilities::DEPRECATE_EOF
| Capabilities::REMEMBER_OPTIONS,
);
assert!(p.server_capabilities.is_empty());
assert_eq!(p.server_default_collation, 8);
assert!(p.status.contains(Status::SERVER_STATUS_AUTOCOMMIT));
assert_eq!(p.auth_plugin_name.as_deref(), Some("mysql_native_password"));
assert_eq!(
&*p.auth_plugin_data,
&[
116, 54, 76, 92, 106, 34, 100, 83, 85, 49, 52, 79, 112, 104, 57, 34, 60, 72, 53,
110,
]
);
}
}

View File

@ -0,0 +1,57 @@
use byteorder::LittleEndian;
use crate::io::BufMut;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::{Capabilities, Encode};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_connection_phase_packets_protocol_handshake_response.html
// https://mariadb.com/kb/en/connection/#handshake-response-packet
#[derive(Debug)]
pub struct HandshakeResponse<'a> {
pub max_packet_size: u32,
pub client_collation: u8,
pub username: &'a str,
pub database: &'a str,
}
impl Encode for HandshakeResponse<'_> {
fn encode(&self, buf: &mut Vec<u8>, capabilities: Capabilities) {
// client capabilities : int<4>
buf.put_u32::<LittleEndian>(capabilities.bits() as u32);
// max packet size : int<4>
buf.put_u32::<LittleEndian>(self.max_packet_size);
// client character collation : int<1>
buf.put_u8(self.client_collation);
// reserved : string<19>
buf.advance(19);
if capabilities.contains(Capabilities::MYSQL) {
// reserved : string<4>
buf.advance(4);
} else {
// extended client capabilities : int<4>
buf.put_u32::<LittleEndian>((capabilities.bits() >> 32) as u32);
}
// username : string<NUL>
buf.put_str_nul(self.username);
if capabilities.contains(Capabilities::PLUGIN_AUTH_LENENC_DATA) {
// auth_response : string<lenenc>
buf.put_str_lenenc::<LittleEndian>("");
} else {
// auth_response_length : int<1>
buf.put_u8(0);
// auth_response : string<{auth_response_length}>
}
if capabilities.contains(Capabilities::CONNECT_WITH_DB) {
// database : string<NUL>
buf.put_str_nul(self.database);
}
}
}

View File

@ -1,33 +1,49 @@
// Many protocol types are implemented but unused (currently). The hope is to eventually
// work them all into the (raw) connection type.
// There is much to the protocol that is not yet used. As we mature we'll be trimming
// the size of this module to exactly what is necessary.
#![allow(unused)]
// Reference: https://mariadb.com/kb/en/library/connection
// Packets: https://mariadb.com/kb/en/library/0-packet
mod binary;
mod capabilities;
mod connect;
mod decode;
mod encode;
mod error_code;
mod field;
mod response;
mod server_status;
mod text;
pub use binary::{
ComStmtClose, ComStmtExecute, ComStmtFetch, ComStmtPrepare, ComStmtPrepareOk, ComStmtReset,
StmtExecFlag,
};
pub use capabilities::Capabilities;
pub use connect::{
AuthenticationSwitchRequest, HandshakeResponsePacket, InitialHandshakePacket, SslRequest,
};
pub use decode::Decode;
pub use encode::Encode;
pub use error_code::ErrorCode;
pub use field::{FieldDetailFlag, FieldType, ParameterFlag};
pub use response::{
ColumnCountPacket, ColumnDefinitionPacket, EofPacket, ErrPacket, OkPacket, ResultRow,
};
pub use server_status::ServerStatusFlag;
pub use text::{ComDebug, ComInitDb, ComPing, ComProcessKill, ComQuery, ComQuit, ComSetOption, SetOptionOptions};
mod capabilities;
mod field;
mod status;
mod r#type;
pub use capabilities::Capabilities;
pub use field::FieldFlags;
pub use r#type::Type;
pub use status::Status;
mod com_query;
mod com_set_option;
mod com_stmt_execute;
mod com_stmt_prepare;
mod handshake;
pub use com_query::ComQuery;
pub use com_set_option::{ComSetOption, SetOption};
pub use com_stmt_execute::{ComStmtExecute, Cursor};
pub use com_stmt_prepare::ComStmtPrepare;
pub use handshake::Handshake;
mod column_count;
mod column_def;
mod com_stmt_prepare_ok;
mod eof;
mod err;
mod handshake_response;
mod ok;
mod row;
pub use column_count::ColumnCount;
pub use column_def::ColumnDefinition;
pub use com_stmt_prepare_ok::ComStmtPrepareOk;
pub use eof::EofPacket;
pub use err::ErrPacket;
pub use handshake_response::HandshakeResponse;
pub use ok::OkPacket;
pub use row::Row;

View File

@ -0,0 +1,64 @@
use byteorder::LittleEndian;
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::{Capabilities, Decode, Status};
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/page_protocol_basic_ok_packet.html
// https://mariadb.com/kb/en/ok_packet/
#[derive(Debug)]
pub struct OkPacket {
pub affected_rows: u64,
pub last_insert_id: u64,
pub status: Status,
pub warnings: u16,
pub info: Box<str>,
}
impl Decode for OkPacket {
fn decode(mut buf: &[u8]) -> crate::Result<Self>
where
Self: Sized,
{
let header = buf.get_u8()?;
if header != 0 && header != 0xFE {
return Err(protocol_err!(
"expected 0x00 or 0xFE; received 0x{:X}",
header
))?;
}
let affected_rows = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0); // 0
let last_insert_id = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0); // 2
let status = Status::from_bits_truncate(buf.get_u16::<LittleEndian>()?); //
let warnings = buf.get_u16::<LittleEndian>()?;
let info = buf.get_str(buf.len())?.into();
Ok(Self {
affected_rows,
last_insert_id,
status,
warnings,
info,
})
}
}
#[cfg(test)]
mod tests {
use super::{Capabilities, Decode, OkPacket, Status};
const OK_HANDSHAKE: &[u8] = b"\x00\x00\x00\x02@\x00\x00";
#[test]
fn it_decodes_ok_handshake() {
let mut p = OkPacket::decode(OK_HANDSHAKE).unwrap();
assert_eq!(p.affected_rows, 0);
assert_eq!(p.last_insert_id, 0);
assert_eq!(p.warnings, 0);
assert!(p.status.contains(Status::SERVER_STATUS_AUTOCOMMIT));
assert!(p.status.contains(Status::SERVER_SESSION_STATE_CHANGED));
assert!(p.info.is_empty());
}
}

View File

@ -1,43 +0,0 @@
use crate::mysql::io::BufExt;
use byteorder::LittleEndian;
use std::io;
// The column packet is the first packet of a result set.
// Inside of it it contains the number of columns in the result set
// encoded as an int<lenenc>.
// https://mariadb.com/kb/en/library/resultset/#column-count-packet
#[derive(Debug)]
pub struct ColumnCountPacket {
pub columns: u64,
}
impl ColumnCountPacket {
pub(crate) fn decode(mut buf: &[u8]) -> io::Result<Self> {
let columns = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0);
Ok(Self { columns })
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_column_packet_0x_fb() -> io::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// int<lenenc> tag code: Some(3 bytes)
0xFD_u8,
// value: 3 bytes
0x01_u8, 0x01_u8, 0x01_u8
);
let message = ColumnCountPacket::decode(&buf)?;
assert_eq!(message.columns, 0x010101);
Ok(())
}
}

View File

@ -1,123 +0,0 @@
use crate::{
io::Buf,
mysql::{
io::BufExt,
protocol::{FieldDetailFlag, FieldType},
},
};
use byteorder::LittleEndian;
use std::io;
#[derive(Debug)]
// ColumnDefinitionPacket doesn't have a packet header because
// it's nested inside a result set packet
pub struct ColumnDefinitionPacket {
pub schema: Option<String>,
pub table_alias: Option<String>,
pub table: Option<String>,
pub column_alias: Option<String>,
pub column: Option<String>,
pub char_set: u16,
pub max_columns: i32,
pub field_type: FieldType,
pub field_details: FieldDetailFlag,
pub decimals: u8,
}
impl ColumnDefinitionPacket {
pub(crate) fn decode(mut buf: &[u8]) -> io::Result<Self> {
// string<lenenc> catalog (always 'def')
let _catalog = buf.get_str_lenenc::<LittleEndian>()?;
// TODO: Assert that this is always DEF
// string<lenenc> schema
let schema = buf.get_str_lenenc::<LittleEndian>()?.map(ToOwned::to_owned);
// string<lenenc> table alias
let table_alias = buf.get_str_lenenc::<LittleEndian>()?.map(ToOwned::to_owned);
// string<lenenc> table
let table = buf.get_str_lenenc::<LittleEndian>()?.map(ToOwned::to_owned);
// string<lenenc> column alias
let column_alias = buf.get_str_lenenc::<LittleEndian>()?.map(ToOwned::to_owned);
// string<lenenc> column
let column = buf.get_str_lenenc::<LittleEndian>()?.map(ToOwned::to_owned);
// int<lenenc> length of fixed fields (=0xC)
let _length_of_fixed_fields = buf.get_uint_lenenc::<LittleEndian>()?;
// TODO: Assert that this is always 0xC
// int<2> character set number
let char_set = buf.get_u16::<LittleEndian>()?;
// int<4> max. column size
let max_columns = buf.get_i32::<LittleEndian>()?;
// int<1> Field types
let field_type = FieldType(buf.get_u8()?);
// int<2> Field detail flag
let field_details = FieldDetailFlag::from_bits_truncate(buf.get_u16::<LittleEndian>()?);
// int<1> decimals
let decimals = buf.get_u8()?;
// int<2> - unused -
buf.advance(2);
Ok(Self {
schema,
table_alias,
table,
column_alias,
column,
char_set,
max_columns,
field_type,
field_details,
decimals,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_column_def_packet() -> io::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// string<lenenc> catalog (always 'def')
1u8, b'a',
// string<lenenc> schema
1u8, b'b',
// string<lenenc> table alias
1u8, b'c',
// string<lenenc> table
1u8, b'd',
// string<lenenc> column alias
1u8, b'e',
// string<lenenc> column
1u8, b'f',
// int<lenenc> length of fixed fields (=0xC)
0xFC_u8, 1u8, 1u8,
// int<2> character set number
1u8, 1u8,
// int<4> max. column size
1u8, 1u8, 1u8, 1u8,
// int<1> Field types
1u8,
// int<2> Field detail flag
1u8, 0u8,
// int<1> decimals
1u8,
// int<2> - unused -
0u8, 0u8
);
let message = ColumnDefinitionPacket::decode(&buf)?;
assert_eq!(message.schema, Some("b".into()));
assert_eq!(message.table_alias, Some("c".into()));
assert_eq!(message.table, Some("d".into()));
assert_eq!(message.column_alias, Some("e".into()));
assert_eq!(message.column, Some("f".into()));
Ok(())
}
}

View File

@ -1,58 +0,0 @@
use crate::{
io::Buf,
mysql::{
io::BufExt,
protocol::{ErrorCode, ServerStatusFlag},
},
};
use byteorder::LittleEndian;
use std::io;
#[derive(Debug)]
pub struct EofPacket {
pub warning_count: u16,
pub status: ServerStatusFlag,
}
impl EofPacket {
pub(crate) fn decode(mut buf: &[u8]) -> crate::Result<Self> {
let header = buf.get_u8()?;
if header != 0xFE {
return Err(protocol_err!("expected 0xFE; received {}", header))?;
}
let warning_count = buf.get_u16::<LittleEndian>()?;
let status = ServerStatusFlag::from_bits_truncate(buf.get_u16::<LittleEndian>()?);
Ok(Self {
warning_count,
status,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
use std::io;
#[test]
fn it_decodes_eof_packet() -> crate::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// int<1> 0xfe : EOF header
0xFE_u8,
// int<2> warning count
0u8, 0u8,
// int<2> server status
1u8, 1u8
);
let _message = EofPacket::decode(&buf)?;
// TODO: Assert fields?
Ok(())
}
}

View File

@ -1,116 +0,0 @@
use crate::{
io::Buf,
mysql::{error::Error, io::BufExt, protocol::ErrorCode},
};
use byteorder::LittleEndian;
use std::io;
#[derive(Debug)]
pub enum ErrPacket {
Progress {
stage: u8,
max_stage: u8,
progress: u32,
info: Box<str>,
},
Error {
code: ErrorCode,
sql_state: Option<Box<str>>,
message: Box<str>,
},
}
impl ErrPacket {
pub fn decode(mut buf: &[u8]) -> io::Result<Self> {
let header = buf.get_u8()?;
debug_assert_eq!(header, 0xFF);
// error code : int<2>
let code = buf.get_u16::<LittleEndian>()?;
// if (errorcode == 0xFFFF) /* progress reporting */
if code == 0xFF_FF {
let stage = buf.get_u8()?;
let max_stage = buf.get_u8()?;
let progress = buf.get_u24::<LittleEndian>()?;
let info = buf
.get_str_lenenc::<LittleEndian>()?
.unwrap_or_default()
.into();
Ok(Self::Progress {
stage,
max_stage,
progress,
info,
})
} else {
// if (next byte = '#')
let sql_state = if buf[0] == b'#' {
// '#' : string<1>
buf.advance(1);
// sql state : string<5>
Some(buf.get_str(5)?.into())
} else {
None
};
let message = buf.get_str_eof()?.into();
Ok(Self::Error {
code: ErrorCode(code),
sql_state,
message,
})
}
}
pub fn expect_error<T>(self) -> crate::Result<T> {
match self {
ErrPacket::Progress { .. } => {
Err(protocol_err!("expected ErrPacket::Err, got {:?}", self).into())
}
ErrPacket::Error { code, message, .. } => Err(Error { code, message }.into()),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_err_packet() -> io::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// int<1> 0xfe : EOF header
0xFF_u8,
// int<2> error code
0x84_u8, 0x04_u8,
// if (errorcode == 0xFFFF) /* progress reporting */ {
// int<1> stage
// int<1> max_stage
// int<3> progress
// string<lenenc> progress_info
// } else {
// if (next byte = '#') {
// string<1> sql state marker '#'
b"#",
// string<5>sql state
b"08S01",
// string<EOF> error message
b"Got packets out of order"
// } else {
// string<EOF> error message
// }
// }
);
let _message = ErrPacket::decode(&buf)?;
Ok(())
}
}

View File

@ -1,13 +0,0 @@
mod column_count;
mod column_def;
mod eof;
mod err;
mod ok;
mod row;
pub use column_count::ColumnCountPacket;
pub use column_def::ColumnDefinitionPacket;
pub use eof::EofPacket;
pub use err::ErrPacket;
pub use ok::OkPacket;
pub use row::ResultRow;

View File

@ -1,109 +0,0 @@
use crate::{
io::Buf,
mysql::{
io::BufExt,
protocol::{Capabilities, ServerStatusFlag},
},
};
use byteorder::LittleEndian;
use std::io;
// https://mariadb.com/kb/en/library/ok_packet/
#[derive(Debug)]
pub struct OkPacket {
pub affected_rows: u64,
pub last_insert_id: u64,
pub server_status: ServerStatusFlag,
pub warning_count: u16,
pub info: Box<str>,
pub session_state_info: Option<Box<[u8]>>,
pub value_of_variable: Option<Box<str>>,
}
impl OkPacket {
pub fn decode(mut buf: &[u8], capabilities: Capabilities) -> crate::Result<Self> {
let header = buf.get_u8()?;
if header != 0 && header != 0xFE {
return Err(protocol_err!(
"expected 0x00 or 0xFE; received 0x{:X}",
header
))?;
}
let affected_rows = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0);
let last_insert_id = buf.get_uint_lenenc::<LittleEndian>()?.unwrap_or(0);
let server_status = ServerStatusFlag::from_bits_truncate(buf.get_u16::<LittleEndian>()?);
let warning_count = buf.get_u16::<LittleEndian>()?;
let info;
let mut session_state_info = None;
let mut value_of_variable = None;
if capabilities.contains(Capabilities::CLIENT_SESSION_TRACK) {
info = buf
.get_str_lenenc::<LittleEndian>()?
.unwrap_or_default()
.to_owned()
.into();
session_state_info = buf.get_bytes_lenenc::<LittleEndian>()?.map(Into::into);
value_of_variable = buf.get_str_lenenc::<LittleEndian>()?.map(Into::into);
} else {
info = buf.get_str_eof()?.to_owned().into();
}
Ok(Self {
affected_rows,
last_insert_id,
server_status,
warning_count,
info,
session_state_info,
value_of_variable,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::__bytes_builder;
#[test]
fn it_decodes_ok_packet() -> crate::Result<()> {
#[rustfmt::skip]
let buf = __bytes_builder!(
// 0x00 : OK_Packet header or (0xFE if CLIENT_DEPRECATE_EOF is set)
0u8,
// int<lenenc> affected rows
0xFB_u8,
// int<lenenc> last insert id
0xFB_u8,
// int<2> server status
1u8, 1u8,
// int<2> warning count
0u8, 0u8,
// if session_tracking_supported (see CLIENT_SESSION_TRACK) {
// string<lenenc> info
// if (status flags & SERVER_SESSION_STATE_CHANGED) {
// string<lenenc> session state info
// string<lenenc> value of variable
// }
// } else {
// string<EOF> info
b"info"
// }
);
let message = OkPacket::decode(&buf, Capabilities::empty())?;
assert_eq!(message.affected_rows, 0);
assert_eq!(message.last_insert_id, 0);
assert!(message
.server_status
.contains(ServerStatusFlag::SERVER_STATUS_IN_TRANS));
assert_eq!(message.warning_count, 0);
assert_eq!(message.info, "info".into());
Ok(())
}
}

View File

@ -1,87 +0,0 @@
use crate::{
io::Buf,
mysql::{
io::BufExt,
protocol::{ColumnDefinitionPacket, FieldType},
},
};
use byteorder::LittleEndian;
use std::{io, pin::Pin, ptr::NonNull};
/// A resultset row represents a database resultset unit, which is usually generated by
/// executing a statement that queries the database.
#[derive(Debug)]
pub struct ResultRow {
#[used]
buffer: Pin<Box<[u8]>>,
pub values: Box<[Option<NonNull<[u8]>>]>,
}
// SAFE: Raw pointers point to pinned memory inside the struct
unsafe impl Send for ResultRow {}
unsafe impl Sync for ResultRow {}
impl ResultRow {
pub fn decode(mut buf: &[u8], columns: &[ColumnDefinitionPacket]) -> crate::Result<Self> {
// 0x00 header : byte<1>
let header = buf.get_u8()?;
if header != 0 {
return Err(protocol_err!("expected header 0x00, got: {:#04X}", header).into());
}
// NULL-Bitmap : byte<(number_of_columns + 9) / 8>
let null_len = (columns.len() + 9) / 8;
let null = &buf[..];
buf.advance(null_len);
let buffer: Pin<Box<[u8]>> = Pin::new(buf.into());
let mut buf = &*buffer;
let mut values = Vec::with_capacity(columns.len());
for column_idx in 0..columns.len() {
if null[column_idx / 8] & (1 << (column_idx % 8) as u8) != 0 {
values.push(None);
} else {
match columns[column_idx].field_type {
FieldType::MYSQL_TYPE_TINY => {
values.push(Some(buf.get_bytes(1)?.into()));
}
FieldType::MYSQL_TYPE_SHORT => {
values.push(Some(buf.get_bytes(2)?.into()));
}
FieldType::MYSQL_TYPE_LONG => {
values.push(Some(buf.get_bytes(4)?.into()));
}
FieldType::MYSQL_TYPE_LONGLONG => {
values.push(Some(buf.get_bytes(8)?.into()));
}
FieldType::MYSQL_TYPE_TINY_BLOB
| FieldType::MYSQL_TYPE_MEDIUM_BLOB
| FieldType::MYSQL_TYPE_LONG_BLOB
| FieldType::MYSQL_TYPE_BLOB
| FieldType::MYSQL_TYPE_GEOMETRY
| FieldType::MYSQL_TYPE_STRING
| FieldType::MYSQL_TYPE_VARCHAR
| FieldType::MYSQL_TYPE_VAR_STRING => {
values.push(buf.get_bytes_lenenc::<LittleEndian>()?.map(Into::into));
}
type_ => {
unimplemented!("encountered unknown field type: {:?}", type_);
}
}
}
}
Ok(Self {
buffer,
values: values.into_boxed_slice(),
})
}
}

View File

@ -0,0 +1,129 @@
use std::ops::Range;
use byteorder::{ByteOrder, LittleEndian};
use crate::io::Buf;
use crate::mysql::io::BufExt;
use crate::mysql::protocol::{Decode, Type};
pub struct Row {
buffer: Box<[u8]>,
values: Box<[Option<Range<usize>>]>,
binary: bool,
}
impl Row {
pub fn len(&self) -> usize {
self.values.len()
}
pub fn get(&self, index: usize) -> Option<&[u8]> {
let range = self.values[index].as_ref()?;
Some(&self.buffer[(range.start as usize)..(range.end as usize)])
}
}
fn get_lenenc_size(buf: &[u8]) -> usize {
match buf[0] {
0xFB => 1,
0xFC => {
let len_size = 1 + 2;
let len = LittleEndian::read_u16(&buf[1..]);
len_size + (len as usize)
}
0xFD => {
let len_size = 1 + 3;
let len = LittleEndian::read_u24(&buf[1..]);
len_size + (len as usize)
}
0xFE => {
let len_size = 1 + 8;
let len = LittleEndian::read_u64(&buf[1..]);
len_size + (len as usize)
}
value => 1 + (value as usize),
}
}
impl Row {
pub fn decode(mut buf: &[u8], columns: &[Type], binary: bool) -> crate::Result<Self> {
if !binary {
let buffer: Box<[u8]> = buf.into();
let mut values = Vec::with_capacity(columns.len());
let mut index = 0;
for column_idx in 0..columns.len() {
let size = get_lenenc_size(&buf[index..]);
values.push(Some(index..(index + size)));
index += size;
buf.advance(size);
}
return Ok(Self {
buffer,
values: values.into_boxed_slice(),
binary,
});
}
// 0x00 header : byte<1>
let header = buf.get_u8()?;
if header != 0 {
return Err(protocol_err!("expected ROW (0x00), got: {:#04X}", header).into());
}
// NULL-Bitmap : byte<(number_of_columns + 9) / 8>
let null_len = (columns.len() + 9) / 8;
let null_bitmap = &buf[..];
buf.advance(null_len);
let buffer: Box<[u8]> = buf.into();
let mut values = Vec::with_capacity(columns.len());
let mut index = 0;
for column_idx in 0..columns.len() {
if null_bitmap[column_idx / 8] & (1 << (column_idx % 8) as u8) != 0 {
values.push(None);
} else {
let size = match columns[column_idx] {
Type::TINY => 1,
Type::SHORT => 2,
Type::LONG => 4,
Type::LONGLONG => 8,
Type::TINY_BLOB
| Type::MEDIUM_BLOB
| Type::LONG_BLOB
| Type::BLOB
| Type::GEOMETRY
| Type::STRING
| Type::VARCHAR
| Type::VAR_STRING => get_lenenc_size(&buffer[index..]),
r#type => {
unimplemented!("encountered unknown field type: {:?}", r#type);
}
};
values.push(Some(index..(index + size)));
index += size;
}
}
Ok(Self {
buffer,
values: values.into_boxed_slice(),
binary,
})
}
}

View File

@ -1,45 +0,0 @@
// https://mariadb.com/kb/en/library/mariadb-connectorc-types-and-definitions/#server-status
bitflags::bitflags! {
pub struct ServerStatusFlag: u16 {
// A transaction is currently active
const SERVER_STATUS_IN_TRANS = 1;
// Autocommit mode is set
const SERVER_STATUS_AUTOCOMMIT = 2;
// more results exists (more packet follow)
const SERVER_MORE_RESULTS_EXISTS = 8;
const SERVER_QUERY_NO_GOOD_INDEX_USED = 16;
const SERVER_QUERY_NO_INDEX_USED = 32;
// when using COM_STMT_FETCH, indicate that current cursor still has result
const SERVER_STATUS_CURSOR_EXISTS = 64;
// when using COM_STMT_FETCH, indicate that current cursor has finished to send results
const SERVER_STATUS_LAST_ROW_SENT = 128;
// database has been dropped
const SERVER_STATUS_DB_DROPPED = 1 << 8;
// current escape mode is "no backslash escape"
const SERVER_STATUS_NO_BACKSLASH_ESAPES = 1 << 9;
// A DDL change did have an impact on an existing PREPARE (an
// automatic reprepare has been executed)
const SERVER_STATUS_METADATA_CHANGED = 1 << 10;
// Last statement took more than the time value specified in
// server variable long_query_time.
const SERVER_QUERY_WAS_SLOW = 1 << 11;
// this resultset contain stored procedure output parameter
const SERVER_PS_OUT_PARAMS = 1 << 12;
// current transaction is a read-only transaction
const SERVER_STATUS_IN_TRANS_READONLY = 1 << 13;
// session state change. see Session change type for more information
const SERVER_SESSION_STATE_CHANGED = 1 << 14;
}
}

View File

@ -0,0 +1,49 @@
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/mysql__com_8h.html#a1d854e841086925be1883e4d7b4e8cad
// https://mariadb.com/kb/en/library/mariadb-connectorc-types-and-definitions/#server-status
bitflags::bitflags! {
pub struct Status: u16 {
// Is raised when a multi-statement transaction has been started, either explicitly,
// by means of BEGIN or COMMIT AND CHAIN, or implicitly, by the first
// transactional statement, when autocommit=off.
const SERVER_STATUS_IN_TRANS = 1;
// Autocommit mode is set
const SERVER_STATUS_AUTOCOMMIT = 2;
// Multi query - next query exists.
const SERVER_MORE_RESULTS_EXISTS = 8;
const SERVER_QUERY_NO_GOOD_INDEX_USED = 16;
const SERVER_QUERY_NO_INDEX_USED = 32;
// When using COM_STMT_FETCH, indicate that current cursor still has result
const SERVER_STATUS_CURSOR_EXISTS = 64;
// When using COM_STMT_FETCH, indicate that current cursor has finished to send results
const SERVER_STATUS_LAST_ROW_SENT = 128;
// Database has been dropped
const SERVER_STATUS_DB_DROPPED = (1 << 8);
// Current escape mode is "no backslash escape"
const SERVER_STATUS_NO_BACKSLASH_ESCAPES = (1 << 9);
// A DDL change did have an impact on an existing PREPARE (an automatic
// re-prepare has been executed)
const SERVER_STATUS_METADATA_CHANGED = (1 << 10);
// Last statement took more than the time value specified
// in server variable long_query_time.
const SERVER_QUERY_WAS_SLOW = (1 << 11);
// This result-set contain stored procedure output parameter.
const SERVER_PS_OUT_PARAMS = (1 << 12);
// Current transaction is a read-only transaction.
const SERVER_STATUS_IN_TRANS_READONLY = (1 << 13);
// This status flag, when on, implies that one of the state information has changed
// on the server because of the execution of the last statement.
const SERVER_SESSION_STATE_CHANGED = (1 << 14);
}
}

View File

@ -1,28 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
#[derive(Debug)]
pub struct ComDebug;
impl Encode for ComDebug {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_DEBUG Header (0xOD) : int<1>
buf.put_u8(TextProtocol::ComDebug as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_debug() {
let mut buf = Vec::new();
ComDebug.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x0D");
}
}

View File

@ -1,36 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
pub struct ComInitDb<'a> {
pub schema_name: &'a str,
}
impl Encode for ComInitDb<'_> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_INIT_DB Header : int<1>
buf.put_u8(TextProtocol::ComInitDb as u8);
// schema name : string<NUL>
buf.put_str_nul(self.schema_name);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_init_db() {
let mut buf = Vec::new();
ComInitDb {
schema_name: "portal",
}
.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x02portal\0");
}
}

View File

@ -1,28 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
#[derive(Debug)]
pub struct ComPing;
impl Encode for ComPing {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_PING Header : int<1>
buf.put_u8(TextProtocol::ComPing as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_ping() {
let mut buf = Vec::new();
ComPing.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x0E");
}
}

View File

@ -1,35 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
use byteorder::LittleEndian;
/// Forces the server to terminate a specified connection.
pub struct ComProcessKill {
pub process_id: u32,
}
impl Encode for ComProcessKill {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_PROCESS_KILL : int<1>
buf.put_u8(TextProtocol::ComProcessKill as u8);
// process id : int<4>
buf.put_u32::<LittleEndian>(self.process_id);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_process_kill() {
let mut buf = Vec::new();
ComProcessKill { process_id: 1 }.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x0C\x01\0\0\0");
}
}

View File

@ -1,36 +0,0 @@
use crate::{
io::BufMut,
mysql::{
io::BufMutExt,
protocol::{Capabilities, Encode},
},
};
/// Sends the server an SQL statement to be executed immediately.
pub struct ComQuery<'a> {
pub sql_statement: &'a str,
}
impl<'a> Encode for ComQuery<'a> {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
buf.put_u8(super::TextProtocol::ComQuery as u8);
buf.put_str(&self.sql_statement);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_query() {
let mut buf = Vec::new();
ComQuery {
sql_statement: "SELECT * FROM users",
}
.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf, b"\x03SELECT * FROM users");
}
}

View File

@ -1,29 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
pub struct ComQuit;
impl Encode for ComQuit {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
buf.put_u8(TextProtocol::ComQuit as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_quit() -> std::io::Result<()> {
let mut buf = Vec::new();
ComQuit.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x01");
Ok(())
}
}

View File

@ -1,30 +0,0 @@
use super::TextProtocol;
use crate::{
io::BufMut,
mysql::protocol::{Capabilities, Encode},
};
/// Resets a connection without re-authentication.
#[derive(Debug)]
pub struct ComResetConnection;
impl Encode for ComResetConnection {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_RESET_CONNECTION Header : int<1>
buf.put_u8(TextProtocol::ComResetConnection as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_reset_conn() {
let mut buf = Vec::new();
ComResetConnection.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x1F");
}
}

View File

@ -1,45 +0,0 @@
use crate::{
io::BufMut,
mysql::protocol::{text::TextProtocol, Capabilities, Encode},
};
use byteorder::LittleEndian;
#[derive(Debug, Copy, Clone)]
#[repr(u16)]
pub enum SetOptionOptions {
MySqlOptionMultiStatementsOn = 0x00,
MySqlOptionMultiStatementsOff = 0x01,
}
/// Enables or disables server option.
#[derive(Debug)]
pub struct ComSetOption {
pub option: SetOptionOptions,
}
impl Encode for ComSetOption {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_SET_OPTION : int<1>
buf.put_u8(TextProtocol::ComSetOption as u8);
// option : int<2>
buf.put_u16::<LittleEndian>(self.option as u16);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_set_option() {
let mut buf = Vec::new();
ComSetOption {
option: SetOptionOptions::MySqlOptionMultiStatementsOff,
}
.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x1B\x01\0");
}
}

View File

@ -1,27 +0,0 @@
use crate::{
io::BufMut,
mysql::protocol::{text::TextProtocol, Capabilities, Encode},
};
pub struct ComSleep;
impl Encode for ComSleep {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_SLEEP : int<1>
buf.put_u8(TextProtocol::ComSleep as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_sleep() {
let mut buf = Vec::new();
ComSleep.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x00");
}
}

View File

@ -1,28 +0,0 @@
use crate::{
io::BufMut,
mysql::protocol::{text::TextProtocol, Capabilities, Encode},
};
#[derive(Debug)]
pub struct ComStatistics;
impl Encode for ComStatistics {
fn encode(&self, buf: &mut Vec<u8>, _: Capabilities) {
// COM_STATISTICS : int<1>
buf.put_u8(TextProtocol::ComStatistics as u8);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_encodes_com_statistics() {
let mut buf = Vec::new();
ComStatistics.encode(&mut buf, Capabilities::empty());
assert_eq!(&buf[..], b"\x09");
}
}

View File

@ -1,41 +0,0 @@
mod com_debug;
mod com_init_db;
mod com_ping;
mod com_process_kill;
mod com_query;
mod com_quit;
mod com_reset_conn;
mod com_set_option;
mod com_sleep;
mod com_statistics;
pub use com_debug::ComDebug;
pub use com_init_db::ComInitDb;
pub use com_ping::ComPing;
pub use com_process_kill::ComProcessKill;
pub use com_query::ComQuery;
pub use com_quit::ComQuit;
pub use com_reset_conn::ComResetConnection;
pub use com_set_option::{ComSetOption, SetOptionOptions};
pub use com_sleep::ComSleep;
pub use com_statistics::ComStatistics;
// This is an enum of text protocol packet tags.
// Tags are the 5th byte of the packet (1st byte of packet body)
// and are used to determine which type of query was sent.
// The name of the enum variant represents the type of query, and
// the value is the byte value required by the server.
enum TextProtocol {
ComChangeUser = 0x11,
ComDebug = 0x0D,
ComInitDb = 0x02,
ComPing = 0x0e,
ComProcessKill = 0x0C,
ComQuery = 0x03,
ComQuit = 0x01,
ComResetConnection = 0x1F,
ComSetOption = 0x1B,
ComShutdown = 0x0A,
ComSleep = 0x00,
ComStatistics = 0x09,
}

View File

@ -0,0 +1,39 @@
// https://dev.mysql.com/doc/dev/mysql-server/8.0.12/binary__log__types_8h.html
// https://mariadb.com/kb/en/library/resultset/#field-types
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Type(pub u8);
impl Type {
pub const BIT: Type = Type(16);
pub const BLOB: Type = Type(252);
pub const DATE: Type = Type(10);
pub const DATETIME: Type = Type(12);
pub const DECIMAL: Type = Type(0);
pub const DOUBLE: Type = Type(5);
pub const ENUM: Type = Type(247);
pub const FLOAT: Type = Type(4);
pub const GEOMETRY: Type = Type(255);
pub const INT24: Type = Type(9);
pub const JSON: Type = Type(245); // MySQL Only
pub const LONG: Type = Type(3);
pub const LONGLONG: Type = Type(8);
pub const LONG_BLOB: Type = Type(251);
pub const MEDIUM_BLOB: Type = Type(250);
pub const NULL: Type = Type(6);
pub const SET: Type = Type(248);
pub const SHORT: Type = Type(2);
pub const STRING: Type = Type(254);
pub const TIME: Type = Type(11);
pub const TIMESTAMP: Type = Type(7);
pub const TINY: Type = Type(1);
pub const TINY_BLOB: Type = Type(249);
pub const VARCHAR: Type = Type(15);
pub const VAR_STRING: Type = Type(253);
pub const YEAR: Type = Type(13);
}
impl Default for Type {
fn default() -> Type {
Type::NULL
}
}

View File

@ -1,8 +0,0 @@
pub enum SessionChangeType {
SessionTrackSystemVariables = 0,
SessionTrackSchema = 1,
SessionTrackStateChange = 2,
SessionTrackGTIDS = 3,
SessionTrackTransactionCharacteristics = 4,
SessionTrackTransactionState = 5,
}

View File

@ -1,41 +0,0 @@
use super::Connection;
use crate::{encode::{Encode, IsNull}, mysql::types::MySqlTypeMetadata, params::QueryParameters, types::HasSqlType, MySql};
#[derive(Default)]
pub struct MySqlDbParameters {
pub(crate) param_types: Vec<MySqlTypeMetadata>,
pub(crate) params: Vec<u8>,
pub(crate) null_bitmap: Vec<u8>,
}
impl QueryParameters for MySqlDbParameters {
type Backend = MySql;
fn reserve(&mut self, binds: usize, bytes: usize) {
self.param_types.reserve(binds);
self.params.reserve(bytes);
// ensure we have enough bytes in the bitmap to hold at least `binds` extra bits
// the second `& 7` gives us 0 spare bits when param_types.len() is a multiple of 8
let spare_bits = (8 - (self.param_types.len()) & 7) & 7;
// ensure that if there are no spare bits left, `binds = 1` reserves another byte
self.null_bitmap.reserve( (binds + 7 - spare_bits) / 8);
}
fn bind<T>(&mut self, value: T)
where
Self: Sized,
Self::Backend: HasSqlType<T>,
T: Encode<Self::Backend>,
{
let metadata = <MySql as HasSqlType<T>>::metadata();
let index = self.param_types.len();
self.param_types.push(metadata);
self.null_bitmap.resize((index / 8) + 1, 0);
if let IsNull::Yes = value.encode(&mut self.params) {
self.null_bitmap[index / 8] &= (1 << index % 8) as u8;
}
}
}

View File

@ -1,17 +1,58 @@
use crate::{mysql::{protocol::ResultRow, Connection}, row::Row, MySql};
use std::collections::HashMap;
use std::sync::Arc;
impl Row for ResultRow {
type Backend = MySql;
use crate::decode::Decode;
use crate::mysql::protocol;
use crate::mysql::MySql;
use crate::row::{Row, RowIndex};
use crate::types::HasSqlType;
pub struct MySqlRow {
pub(super) row: protocol::Row,
pub(super) columns: Arc<HashMap<Box<str>, usize>>,
}
impl Row for MySqlRow {
type Database = MySql;
#[inline]
fn len(&self) -> usize {
self.values.len()
self.row.len()
}
#[inline]
fn get_raw(&self, index: usize) -> Option<&[u8]> {
self.values[index]
.as_ref()
.map(|value| unsafe { value.as_ref() })
fn get<T, I>(&self, index: I) -> T
where
Self::Database: HasSqlType<T>,
I: RowIndex<Self>,
T: Decode<Self::Database>,
{
index.try_get(self).unwrap()
}
}
impl RowIndex<MySqlRow> for usize {
fn try_get<T>(&self, row: &MySqlRow) -> crate::Result<T>
where
<MySqlRow as Row>::Database: HasSqlType<T>,
T: Decode<<MySqlRow as Row>::Database>,
{
Ok(Decode::decode_nullable(row.row.get(*self))?)
}
}
impl RowIndex<MySqlRow> for &'_ str {
fn try_get<T>(&self, row: &MySqlRow) -> crate::Result<T>
where
<MySqlRow as Row>::Database: HasSqlType<T>,
T: Decode<<MySqlRow as Row>::Database>,
{
let index = row
.columns
.get(*self)
.ok_or_else(|| crate::Error::ColumnNotFound((*self).into()))?;
let value = Decode::decode_nullable(row.row.get(*index))?;
Ok(value)
}
}
impl_from_row_for_row!(MySqlRow);

View File

@ -1,42 +0,0 @@
use crate::{
encode::IsNull,
mysql::{
protocol::{FieldType, ParameterFlag},
types::MySqlTypeMetadata,
},
Decode, Encode, HasSqlType, MySql,
};
impl HasSqlType<[u8]> for MySql {
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata {
field_type: FieldType::MYSQL_TYPE_BLOB,
param_flag: ParameterFlag::empty(),
}
}
}
impl HasSqlType<Vec<u8>> for MySql {
fn metadata() -> MySqlTypeMetadata {
<Self as HasSqlType<[u8]>>::metadata()
}
}
impl Encode<MySql> for [u8] {
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
buf.extend_from_slice(self);
IsNull::No
}
}
impl Encode<MySql> for Vec<u8> {
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
<[u8] as Encode<MySql>>::encode(self, buf)
}
}
impl Decode<MySql> for Vec<u8> {
fn decode(raw: Option<&[u8]>) -> Self {
raw.unwrap().into()
}
}

View File

@ -0,0 +1,25 @@
use crate::decode::{Decode, DecodeError};
use crate::encode::Encode;
use crate::mysql::protocol::Type;
use crate::mysql::types::MySqlTypeMetadata;
use crate::mysql::MySql;
use crate::types::HasSqlType;
impl HasSqlType<bool> for MySql {
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata::new(Type::TINY)
}
}
impl Encode<MySql> for bool {
fn encode(&self, buf: &mut Vec<u8>) {
buf.push(*self as u8);
}
}
impl Decode<MySql> for bool {
fn decode(buf: &[u8]) -> Result<Self, DecodeError> {
// FIXME: Return an error if the buffer size is not (at least) 1
Ok(buf[0] != 0)
}
}

View File

@ -1,34 +0,0 @@
use super::{MySql, MySqlTypeMetadata};
use crate::{
decode::Decode,
encode::{Encode, IsNull},
mysql::protocol::{FieldType, ParameterFlag},
types::HasSqlType,
};
impl HasSqlType<bool> for MySql {
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata {
// MYSQL_TYPE_TINY
field_type: FieldType::MYSQL_TYPE_TINY,
param_flag: ParameterFlag::empty(),
}
}
}
impl Encode<MySql> for bool {
#[inline]
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
buf.push(*self as u8);
IsNull::No
}
}
impl Decode<MySql> for bool {
#[inline]
fn decode(buf: Option<&[u8]>) -> Self {
// TODO: Handle optionals
buf.unwrap()[0] != 0
}
}

View File

@ -1,60 +0,0 @@
use super::{MySql, MySqlTypeMetadata};
use crate::{
decode::Decode,
encode::{Encode, IsNull},
mysql::protocol::{FieldType, ParameterFlag},
types::HasSqlType,
};
use std::str;
use crate::mysql::io::BufMutExt;
use byteorder::LittleEndian;
impl HasSqlType<str> for MySql {
#[inline]
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata {
// MYSQL_TYPE_VAR_STRING
field_type: FieldType::MYSQL_TYPE_VAR_STRING,
param_flag: ParameterFlag::empty(),
}
}
}
impl Encode<MySql> for str {
#[inline]
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
buf.put_str_lenenc::<LittleEndian>(self);
IsNull::No
}
}
impl HasSqlType<String> for MySql {
#[inline]
fn metadata() -> MySqlTypeMetadata {
<MySql as HasSqlType<&str>>::metadata()
}
}
impl Encode<MySql> for String {
#[inline]
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
<str as Encode<MySql>>::encode(self.as_str(), buf)
}
}
impl Decode<MySql> for String {
#[inline]
fn decode(buf: Option<&[u8]>) -> Self {
// TODO: Handle nulls
let s = if cfg!(debug_assertions) {
str::from_utf8(buf.unwrap()).expect("mysql returned non UTF-8 data for VAR_STRING")
} else {
// TODO: Determine how to treat string if different collation
unsafe { str::from_utf8_unchecked(buf.unwrap()) }
};
s.to_owned()
}
}

View File

@ -1,48 +1,21 @@
use crate::{HasSqlType, MySql, HasTypeMetadata, Encode, Decode};
use chrono::{NaiveDateTime, Datelike, Timelike, NaiveTime, NaiveDate};
use crate::mysql::types::MySqlTypeMetadata;
use crate::mysql::protocol::{FieldType, ParameterFlag};
use crate::encode::IsNull;
use chrono::{NaiveDateTime, Timelike};
use crate::io::Buf;
use std::convert::{TryFrom, TryInto};
use byteorder::{LittleEndian, ByteOrder};
use chrono::format::Item::Literal;
impl HasSqlType<NaiveDateTime> for MySql {
fn metadata() -> Self::TypeMetadata {
MySqlTypeMetadata {
field_type: FieldType::MYSQL_TYPE_DATETIME,
param_flag: ParameterFlag::empty()
}
}
}
use crate::decode::{Decode, DecodeError};
use crate::encode::Encode;
use crate::mysql::MySql;
impl Encode<MySql> for NaiveDateTime {
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
// subtract the length byte
let length = Encode::<MySql>::size_hint(self) - 1;
buf.push(length as u8);
encode_date(self.date(), buf);
if length >= 7 {
buf.push(self.hour() as u8);
buf.push(self.minute() as u8);
buf.push(self.second() as u8);
}
if length == 11 {
buf.extend_from_slice(&self.timestamp_subsec_micros().to_le_bytes());
}
IsNull::No
fn encode(&self, buf: &mut Vec<u8>) {
unimplemented!()
}
fn size_hint(&self) -> usize {
match (self.hour(), self.minute(), self.second(), self.timestamp_subsec_micros()) {
match (
self.hour(),
self.minute(),
self.second(),
self.timestamp_subsec_micros(),
) {
// include the length byte
(0, 0, 0, 0) => 5,
(_, _, _, 0) => 8,
@ -52,126 +25,7 @@ impl Encode<MySql> for NaiveDateTime {
}
impl Decode<MySql> for NaiveDateTime {
fn decode(raw: Option<&[u8]>) -> Self {
let raw = raw.unwrap();
let len = raw[0];
assert_ne!(len, 0, "MySQL zero-dates are not supported");
let date = decode_date(&raw[1..]);
if len >= 7 {
date.and_hms_micro(
raw[5] as u32,
raw[6] as u32,
raw[7] as u32,
if len == 11 {
LittleEndian::read_u32(&raw[8..])
} else {
0
}
)
} else {
date.and_hms(0, 0, 0)
}
fn decode(raw: &[u8]) -> Result<Self, DecodeError> {
unimplemented!()
}
}
impl HasSqlType<NaiveDate> for MySql {
fn metadata() -> Self::TypeMetadata {
MySqlTypeMetadata {
field_type: FieldType::MYSQL_TYPE_DATE,
param_flag: ParameterFlag::empty()
}
}
}
impl Encode<MySql> for NaiveDate {
fn encode(&self, buf: &mut Vec<u8>) -> IsNull {
buf.push(4);
encode_date(*self, buf);
IsNull::No
}
fn size_hint(&self) -> usize {
5
}
}
impl Decode<MySql> for NaiveDate {
fn decode(raw: Option<&[u8]>) -> Self {
let raw = raw.unwrap();
assert_eq!(raw[0], 4, "expected only 4 bytes");
decode_date(&raw[1..])
}
}
fn encode_date(date: NaiveDate, buf: &mut Vec<u8>) {
// MySQL supports years from 1000 - 9999
let year = u16::try_from(date.year())
.unwrap_or_else(|_| panic!("NaiveDateTime out of range for Mysql: {}", date));
buf.extend_from_slice(&year.to_le_bytes());
buf.push(date.month() as u8);
buf.push(date.day() as u8);
}
fn decode_date(raw: &[u8]) -> NaiveDate {
NaiveDate::from_ymd(
LittleEndian::read_u16(raw) as i32,
raw[2] as u32,
raw[3] as u32
)
}
#[test]
fn test_encode_date_time() {
let mut buf = Vec::new();
// test values from https://dev.mysql.com/doc/internals/en/binary-protocol-value.html
let date1: NaiveDateTime = "2010-10-17T19:27:30.000001".parse().unwrap();
Encode::<MySql>::encode(&date1, &mut buf);
assert_eq!(*buf, [11, 218, 7, 10, 17, 19, 27, 30, 1, 0, 0, 0]);
buf.clear();
let date2: NaiveDateTime = "2010-10-17T19:27:30".parse().unwrap();
Encode::<MySql>::encode(&date2, &mut buf);
assert_eq!(*buf, [7, 218, 7, 10, 17, 19, 27, 30]);
buf.clear();
let date3: NaiveDateTime = "2010-10-17T00:00:00".parse().unwrap();
Encode::<MySql>::encode(&date3, &mut buf);
assert_eq!(*buf, [4, 218, 7, 10, 17]);
}
#[test]
fn test_decode_date_time() {
// test values from https://dev.mysql.com/doc/internals/en/binary-protocol-value.html
let buf = [11, 218, 7, 10, 17, 19, 27, 30, 1, 0, 0, 0];
let date1 = <NaiveDateTime as Decode<MySql>>::decode(Some(&buf));
assert_eq!(date1.to_string(), "2010-10-17 19:27:30.000001");
let buf = [7, 218, 7, 10, 17, 19, 27, 30];
let date2 = <NaiveDateTime as Decode<MySql>>::decode(Some(&buf));
assert_eq!(date2.to_string(), "2010-10-17 19:27:30");
let buf = [4, 218, 7, 10, 17];
let date3 = <NaiveDateTime as Decode<MySql>>::decode(Some(&buf));
assert_eq!(date3.to_string(), "2010-10-17 00:00:00");
}
#[test]
fn test_encode_date() {
let mut buf = Vec::new();
let date: NaiveDate = "2010-10-17".parse().unwrap();
Encode::<MySql>::encode(&date, &mut buf);
assert_eq!(*buf, [4, 218, 7, 10, 17]);
}
#[test]
fn test_decode_date() {
let buf = [4, 218, 7, 10, 17];
let date = <NaiveDate as Decode<MySql>>::decode(Some(&buf));
assert_eq!(date.to_string(), "2010-10-17");
}

View File

@ -0,0 +1,47 @@
use byteorder::LittleEndian;
use crate::decode::{Decode, DecodeError};
use crate::encode::Encode;
use crate::io::{Buf, BufMut};
use crate::mysql::protocol::Type;
use crate::mysql::types::MySqlTypeMetadata;
use crate::mysql::MySql;
use crate::types::HasSqlType;
impl HasSqlType<f32> for MySql {
#[inline]
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata::new(Type::FLOAT)
}
}
impl Encode<MySql> for f32 {
fn encode(&self, buf: &mut Vec<u8>) {
<i32 as Encode<MySql>>::encode(&(self.to_bits() as i32), buf);
}
}
impl Decode<MySql> for f32 {
fn decode(mut buf: &[u8]) -> Result<Self, DecodeError> {
Ok(f32::from_bits(<i32 as Decode<MySql>>::decode(buf)? as u32))
}
}
impl HasSqlType<f64> for MySql {
#[inline]
fn metadata() -> MySqlTypeMetadata {
MySqlTypeMetadata::new(Type::DOUBLE)
}
}
impl Encode<MySql> for f64 {
fn encode(&self, buf: &mut Vec<u8>) {
<i64 as Encode<MySql>>::encode(&(self.to_bits() as i64), buf);
}
}
impl Decode<MySql> for f64 {
fn decode(mut buf: &[u8]) -> Result<Self, DecodeError> {
Ok(f64::from_bits(<i64 as Decode<MySql>>::decode(buf)? as u64))
}
}

Some files were not shown because too many files have changed in this diff Show More