mirror of
https://github.com/launchbadge/sqlx.git
synced 2025-12-30 13:20:59 +00:00
* refactor: introduce `SqlSafeStr` API * rebase main * Add SqlStr + remove Statement lifetime * Update the definition of Executor and AnyConnectionBackend + update Postgres driver * Update MySql driver * Update Sqlite driver * remove debug clone count * Reduce the amount of SqlStr clones * improve QueryBuilder error message * cargo fmt * fix clippy warnings * fix doc test * Avoid panic in `QueryBuilder::reset` * Use `QueryBuilder` when removing all test db's * Add comment to `SqlStr` Co-authored-by: Austin Bonander <austin.bonander@gmail.com> * Update sqlx-core/src/query_builder.rs Co-authored-by: Austin Bonander <austin.bonander@gmail.com> * Add `Clone` as supertrait to `Statement` * Move `Connection`, `AnyConnectionBackend` and `TransactionManager` to `SqlStr` * Replace `sql_cloned` with `sql` in `Statement` * Update `Executor` trait * Update unit tests + QueryBuilder changes * Remove code in comments * Update comment in `QueryBuilder` * Fix clippy warnings * Update `Migrate` comment * Small changes * Move `Migration` to `SqlStr` --------- Co-authored-by: Austin Bonander <austin.bonander@gmail.com>
208 lines
7.0 KiB
Rust
208 lines
7.0 KiB
Rust
use std::future::Future;
|
|
use std::ops::Deref;
|
|
use std::str::FromStr;
|
|
use std::sync::OnceLock;
|
|
use std::time::Duration;
|
|
|
|
use sqlx_core::connection::Connection;
|
|
use sqlx_core::query_builder::QueryBuilder;
|
|
use sqlx_core::query_scalar::query_scalar;
|
|
use sqlx_core::sql_str::AssertSqlSafe;
|
|
|
|
use crate::error::Error;
|
|
use crate::executor::Executor;
|
|
use crate::pool::{Pool, PoolOptions};
|
|
use crate::query::query;
|
|
use crate::{PgConnectOptions, PgConnection, Postgres};
|
|
|
|
pub(crate) use sqlx_core::testing::*;
|
|
|
|
// Using a blocking `OnceLock` here because the critical sections are short.
|
|
static MASTER_POOL: OnceLock<Pool<Postgres>> = OnceLock::new();
|
|
// Automatically delete any databases created before the start of the test binary.
|
|
|
|
impl TestSupport for Postgres {
|
|
fn test_context(
|
|
args: &TestArgs,
|
|
) -> impl Future<Output = Result<TestContext<Self>, Error>> + Send + '_ {
|
|
test_context(args)
|
|
}
|
|
|
|
async fn cleanup_test(db_name: &str) -> Result<(), Error> {
|
|
let mut conn = MASTER_POOL
|
|
.get()
|
|
.expect("cleanup_test() invoked outside `#[sqlx::test]`")
|
|
.acquire()
|
|
.await?;
|
|
|
|
do_cleanup(&mut conn, db_name).await
|
|
}
|
|
|
|
async fn cleanup_test_dbs() -> Result<Option<usize>, Error> {
|
|
let url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
|
|
|
let mut conn = PgConnection::connect(&url).await?;
|
|
|
|
let delete_db_names: Vec<String> = query_scalar("select db_name from _sqlx_test.databases")
|
|
.fetch_all(&mut conn)
|
|
.await?;
|
|
|
|
if delete_db_names.is_empty() {
|
|
return Ok(None);
|
|
}
|
|
|
|
let mut deleted_db_names = Vec::with_capacity(delete_db_names.len());
|
|
|
|
let mut builder = QueryBuilder::new("drop database if exists ");
|
|
|
|
for db_name in &delete_db_names {
|
|
builder.push(db_name);
|
|
|
|
match builder.build().execute(&mut conn).await {
|
|
Ok(_deleted) => {
|
|
deleted_db_names.push(db_name);
|
|
}
|
|
// Assume a database error just means the DB is still in use.
|
|
Err(Error::Database(dbe)) => {
|
|
eprintln!("could not clean test database {db_name:?}: {dbe}")
|
|
}
|
|
// Bubble up other errors
|
|
Err(e) => return Err(e),
|
|
}
|
|
|
|
builder.reset();
|
|
}
|
|
|
|
query("delete from _sqlx_test.databases where db_name = any($1::text[])")
|
|
.bind(&deleted_db_names)
|
|
.execute(&mut conn)
|
|
.await?;
|
|
|
|
let _ = conn.close().await;
|
|
Ok(Some(delete_db_names.len()))
|
|
}
|
|
|
|
async fn snapshot(_conn: &mut Self::Connection) -> Result<FixtureSnapshot<Self>, Error> {
|
|
// TODO: I want to get the testing feature out the door so this will have to wait,
|
|
// but I'm keeping the code around for now because I plan to come back to it.
|
|
todo!()
|
|
}
|
|
}
|
|
|
|
async fn test_context(args: &TestArgs) -> Result<TestContext<Postgres>, Error> {
|
|
let url = dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
|
|
|
let master_opts = PgConnectOptions::from_str(&url).expect("failed to parse DATABASE_URL");
|
|
|
|
let pool = PoolOptions::new()
|
|
// Postgres' normal connection limit is 100 plus 3 superuser connections
|
|
// We don't want to use the whole cap and there may be fuzziness here due to
|
|
// concurrently running tests anyway.
|
|
.max_connections(20)
|
|
// Immediately close master connections. Tokio's I/O streams don't like hopping runtimes.
|
|
.after_release(|_conn, _| Box::pin(async move { Ok(false) }))
|
|
.connect_lazy_with(master_opts);
|
|
|
|
let master_pool = match once_lock_try_insert_polyfill(&MASTER_POOL, pool) {
|
|
Ok(inserted) => inserted,
|
|
Err((existing, pool)) => {
|
|
// Sanity checks.
|
|
assert_eq!(
|
|
existing.connect_options().host,
|
|
pool.connect_options().host,
|
|
"DATABASE_URL changed at runtime, host differs"
|
|
);
|
|
|
|
assert_eq!(
|
|
existing.connect_options().database,
|
|
pool.connect_options().database,
|
|
"DATABASE_URL changed at runtime, database differs"
|
|
);
|
|
|
|
existing
|
|
}
|
|
};
|
|
|
|
let mut conn = master_pool.acquire().await?;
|
|
|
|
// language=PostgreSQL
|
|
conn.execute(
|
|
// Explicit lock avoids this latent bug: https://stackoverflow.com/a/29908840
|
|
// I couldn't find a bug on the mailing list for `CREATE SCHEMA` specifically,
|
|
// but a clearly related bug with `CREATE TABLE` has been known since 2007:
|
|
// https://www.postgresql.org/message-id/200710222037.l9MKbCJZ098744%40wwwmaster.postgresql.org
|
|
// magic constant 8318549251334697844 is just 8 ascii bytes 'sqlxtest'.
|
|
r#"
|
|
select pg_advisory_xact_lock(8318549251334697844);
|
|
|
|
create schema if not exists _sqlx_test;
|
|
|
|
create table if not exists _sqlx_test.databases (
|
|
db_name text primary key,
|
|
test_path text not null,
|
|
created_at timestamptz not null default now()
|
|
);
|
|
|
|
create index if not exists databases_created_at
|
|
on _sqlx_test.databases(created_at);
|
|
|
|
create sequence if not exists _sqlx_test.database_ids;
|
|
"#,
|
|
)
|
|
.await?;
|
|
|
|
let db_name = Postgres::db_name(args);
|
|
do_cleanup(&mut conn, &db_name).await?;
|
|
|
|
query(
|
|
r#"
|
|
insert into _sqlx_test.databases(db_name, test_path) values ($1, $2)
|
|
"#,
|
|
)
|
|
.bind(&db_name)
|
|
.bind(args.test_path)
|
|
.execute(&mut *conn)
|
|
.await?;
|
|
|
|
let create_command = format!("create database {db_name:?}");
|
|
debug_assert!(create_command.starts_with("create database \""));
|
|
conn.execute(AssertSqlSafe(create_command)).await?;
|
|
|
|
Ok(TestContext {
|
|
pool_opts: PoolOptions::new()
|
|
// Don't allow a single test to take all the connections.
|
|
// Most tests shouldn't require more than 5 connections concurrently,
|
|
// or else they're likely doing too much in one test.
|
|
.max_connections(5)
|
|
// Close connections ASAP if left in the idle queue.
|
|
.idle_timeout(Some(Duration::from_secs(1)))
|
|
.parent(master_pool.clone()),
|
|
connect_opts: master_pool
|
|
.connect_options()
|
|
.deref()
|
|
.clone()
|
|
.database(&db_name),
|
|
db_name,
|
|
})
|
|
}
|
|
|
|
async fn do_cleanup(conn: &mut PgConnection, db_name: &str) -> Result<(), Error> {
|
|
let delete_db_command = format!("drop database if exists {db_name:?};");
|
|
conn.execute(AssertSqlSafe(delete_db_command)).await?;
|
|
query("delete from _sqlx_test.databases where db_name = $1::text")
|
|
.bind(db_name)
|
|
.execute(&mut *conn)
|
|
.await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn once_lock_try_insert_polyfill<T>(this: &OnceLock<T>, value: T) -> Result<&T, (&T, T)> {
|
|
let mut value = Some(value);
|
|
let res = this.get_or_init(|| value.take().unwrap());
|
|
match value {
|
|
None => Ok(res),
|
|
Some(value) => Err((res, value)),
|
|
}
|
|
}
|