feat: create sqlx.toml format (#3383)

* feat: create `sqlx.toml` format

* feat: add support for ignored_chars config to sqlx_core::migrate

* chore: test ignored_chars with `U+FEFF` (ZWNBSP/BOM)

https://en.wikipedia.org/wiki/Byte_order_mark

* refactor: make `Config` always compiled

simplifies usage while still making parsing optional for less generated code

* refactor: add origin information to `Column`

* feat(macros): implement `type_override` and `column_override` from `sqlx.toml`

* refactor(sqlx.toml): make all keys kebab-case, create `macros.preferred-crates`

* feat: make macros aware of `macros.preferred-crates`

* feat: make `sqlx-cli` aware of `database-url-var`

* feat: teach macros about `migrate.table-name`, `migrations-dir`

* feat: teach macros about `migrate.ignored-chars`

* chore: delete unused source file `sqlx-cli/src/migration.rs`

* feat: teach `sqlx-cli` about `migrate.defaults`

* feat: teach `sqlx-cli` about `migrate.migrations-dir`

* feat: teach `sqlx-cli` about `migrate.table-name`

* feat: introduce `migrate.create-schemas`

* WIP feat: create multi-tenant database example

* fix(postgres): don't fetch `ColumnOrigin` for transparently-prepared statements

* feat: progress on axum-multi-tenant example

* feat(config): better errors for mislabeled fields

* WIP feat: filling out axum-multi-tenant example

* feat: multi-tenant example

No longer Axum-based because filling out the request routes would have distracted from the purpose of the example.

* chore(ci): test multi-tenant example

* fixup after merge

* fix(ci): enable `sqlx-toml` in CLI build for examples

* fix: CI, README for `multi-tenant`

* fix: clippy warnings

* fix: multi-tenant README

* fix: sequential versioning inference for migrations

* fix: migration versioning with explicit overrides

* fix: only warn on ambiguous crates if the invocation relies on it

* fix: remove unused imports

* fix: doctest

* fix: `sqlx mig add` behavior and tests

* fix: restore original type-checking order

* fix: deprecation warning in `tests/postgres/macros.rs`

* feat: create postgres/multi-database example

* fix: examples/postgres/multi-database

* fix: cargo fmt

* chore: add tests for config `migrate.defaults`

* fix: sqlx-cli/tests/add.rs

* feat(cli): add `--config` override to all relevant commands

* chore: run `sqlx mig add` test with `RUST_BACKTRACE=1`

* fix: properly canonicalize config path for `sqlx mig add` test

* fix: get `sqlx mig add` test passing

* fix(cli): test `migrate.ignored-chars`, fix bugs

* feat: create `macros.preferred-crates` example

* fix(examples): use workspace `sqlx`

* fix: examples

* fix(sqlite): unexpected feature flags in `type_checking.rs`

* fix: run `cargo fmt`

* fix: more example fixes

* fix(ci): preferred-crates setup

* fix(examples): enable default-features for workspace `sqlx`

* fix(examples): issues in `preferred-crates`

* chore: adjust error message for missing param type in `query!()`

* doc: mention new `sqlx.toml` configuration

* chore: add `CHANGELOG` entry

Normally I generate these when cutting the release, but I wanted to take time to editorialize this one.

* doc: fix new example titles

* refactor: make `sqlx-toml` feature non-default, improve errors

* refactor: eliminate panics in `Config` read path

* chore: remove unused `axum` dependency from new examples

* fix(config): restore fallback to default config for macros

* chore(config): remove use of `once_cell` (to match `main`)
This commit is contained in:
Austin Bonander
2025-06-30 16:34:46 -07:00
committed by GitHub
parent 764ae2f702
commit 25cbeedab4
127 changed files with 6443 additions and 1138 deletions

View File

@@ -28,11 +28,6 @@ path = "src/bin/cargo-sqlx.rs"
[dependencies]
dotenvy = "0.15.0"
tokio = { version = "1.15.0", features = ["macros", "rt", "rt-multi-thread", "signal"] }
sqlx = { workspace = true, default-features = false, features = [
"runtime-tokio",
"migrate",
"any",
] }
futures-util = { version = "0.3.19", features = ["alloc"] }
clap = { version = "4.3.10", features = ["derive", "env", "wrap_help"] }
clap_complete = { version = "4.3.1", optional = true }
@@ -48,8 +43,18 @@ filetime = "0.2"
backoff = { version = "0.4.0", features = ["futures", "tokio"] }
[dependencies.sqlx]
workspace = true
default-features = false
features = [
"runtime-tokio",
"migrate",
"any",
]
[features]
default = ["postgres", "sqlite", "mysql", "native-tls", "completions"]
default = ["postgres", "sqlite", "mysql", "native-tls", "completions", "sqlx-toml"]
rustls = ["sqlx/tls-rustls"]
native-tls = ["sqlx/tls-native-tls"]
@@ -64,6 +69,8 @@ openssl-vendored = ["openssl/vendored"]
completions = ["dep:clap_complete"]
sqlx-toml = ["sqlx/sqlx-toml"]
# Conditional compilation only
_sqlite = []

View File

@@ -1,5 +1,5 @@
use crate::migrate;
use crate::opt::ConnectOpts;
use crate::opt::{ConnectOpts, MigrationSourceOpt};
use crate::{migrate, Config};
use console::{style, Term};
use dialoguer::Confirm;
use sqlx::any::Any;
@@ -19,14 +19,14 @@ pub async fn create(connect_opts: &ConnectOpts) -> anyhow::Result<()> {
std::sync::atomic::Ordering::Release,
);
Any::create_database(connect_opts.required_db_url()?).await?;
Any::create_database(connect_opts.expect_db_url()?).await?;
}
Ok(())
}
pub async fn drop(connect_opts: &ConnectOpts, confirm: bool, force: bool) -> anyhow::Result<()> {
if confirm && !ask_to_continue_drop(connect_opts.required_db_url()?.to_owned()).await {
if confirm && !ask_to_continue_drop(connect_opts.expect_db_url()?.to_owned()).await {
return Ok(());
}
@@ -36,9 +36,9 @@ pub async fn drop(connect_opts: &ConnectOpts, confirm: bool, force: bool) -> any
if exists {
if force {
Any::force_drop_database(connect_opts.required_db_url()?).await?;
Any::force_drop_database(connect_opts.expect_db_url()?).await?;
} else {
Any::drop_database(connect_opts.required_db_url()?).await?;
Any::drop_database(connect_opts.expect_db_url()?).await?;
}
}
@@ -46,18 +46,23 @@ pub async fn drop(connect_opts: &ConnectOpts, confirm: bool, force: bool) -> any
}
pub async fn reset(
migration_source: &str,
config: &Config,
migration_source: &MigrationSourceOpt,
connect_opts: &ConnectOpts,
confirm: bool,
force: bool,
) -> anyhow::Result<()> {
drop(connect_opts, confirm, force).await?;
setup(migration_source, connect_opts).await
setup(config, migration_source, connect_opts).await
}
pub async fn setup(migration_source: &str, connect_opts: &ConnectOpts) -> anyhow::Result<()> {
pub async fn setup(
config: &Config,
migration_source: &MigrationSourceOpt,
connect_opts: &ConnectOpts,
) -> anyhow::Result<()> {
create(connect_opts).await?;
migrate::run(migration_source, connect_opts, false, false, None).await
migrate::run(config, migration_source, connect_opts, false, false, None).await
}
async fn ask_to_continue_drop(db_url: String) -> bool {

View File

@@ -2,7 +2,6 @@ use std::future::Future;
use std::io;
use std::time::Duration;
use anyhow::Result;
use futures_util::TryFutureExt;
use sqlx::{AnyConnection, Connection};
@@ -22,6 +21,8 @@ mod prepare;
pub use crate::opt::Opt;
pub use sqlx::_unstable::config::{self, Config};
/// Check arguments for `--no-dotenv` _before_ Clap parsing, and apply `.env` if not set.
pub fn maybe_apply_dotenv() {
if std::env::args().any(|arg| arg == "--no-dotenv") {
@@ -31,7 +32,7 @@ pub fn maybe_apply_dotenv() {
dotenvy::dotenv().ok();
}
pub async fn run(opt: Opt) -> Result<()> {
pub async fn run(opt: Opt) -> anyhow::Result<()> {
// This `select!` is here so that when the process receives a `SIGINT` (CTRL + C),
// the futures currently running on this task get dropped before the program exits.
// This is currently necessary for the consumers of the `dialoguer` crate to restore
@@ -51,24 +52,24 @@ pub async fn run(opt: Opt) -> Result<()> {
}
}
async fn do_run(opt: Opt) -> Result<()> {
async fn do_run(opt: Opt) -> anyhow::Result<()> {
match opt.command {
Command::Migrate(migrate) => match migrate.command {
MigrateCommand::Add {
source,
description,
reversible,
sequential,
timestamp,
} => migrate::add(&source, &description, reversible, sequential, timestamp).await?,
MigrateCommand::Add(opts) => migrate::add(opts).await?,
MigrateCommand::Run {
source,
config,
dry_run,
ignore_missing,
connect_opts,
mut connect_opts,
target_version,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
migrate::run(
&config,
&source,
&connect_opts,
dry_run,
@@ -79,12 +80,18 @@ async fn do_run(opt: Opt) -> Result<()> {
}
MigrateCommand::Revert {
source,
config,
dry_run,
ignore_missing,
connect_opts,
mut connect_opts,
target_version,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
migrate::revert(
&config,
&source,
&connect_opts,
dry_run,
@@ -95,37 +102,83 @@ async fn do_run(opt: Opt) -> Result<()> {
}
MigrateCommand::Info {
source,
connect_opts,
} => migrate::info(&source, &connect_opts).await?,
MigrateCommand::BuildScript { source, force } => migrate::build_script(&source, force)?,
config,
mut connect_opts,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
migrate::info(&config, &source, &connect_opts).await?
}
MigrateCommand::BuildScript {
source,
config,
force,
} => {
let config = config.load_config().await?;
migrate::build_script(&config, &source, force)?
}
},
Command::Database(database) => match database.command {
DatabaseCommand::Create { connect_opts } => database::create(&connect_opts).await?,
DatabaseCommand::Create {
config,
mut connect_opts,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
database::create(&connect_opts).await?
}
DatabaseCommand::Drop {
confirmation,
connect_opts,
config,
mut connect_opts,
force,
} => database::drop(&connect_opts, !confirmation.yes, force).await?,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
database::drop(&connect_opts, !confirmation.yes, force).await?
}
DatabaseCommand::Reset {
confirmation,
source,
connect_opts,
config,
mut connect_opts,
force,
} => database::reset(&source, &connect_opts, !confirmation.yes, force).await?,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
database::reset(&config, &source, &connect_opts, !confirmation.yes, force).await?
}
DatabaseCommand::Setup {
source,
connect_opts,
} => database::setup(&source, &connect_opts).await?,
config,
mut connect_opts,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
database::setup(&config, &source, &connect_opts).await?
}
},
Command::Prepare {
check,
all,
workspace,
connect_opts,
mut connect_opts,
args,
} => prepare::run(check, all, workspace, connect_opts, args).await?,
config,
} => {
let config = config.load_config().await?;
connect_opts.populate_db_url(&config)?;
prepare::run(check, all, workspace, connect_opts, args).await?
}
#[cfg(feature = "completions")]
Command::Completions { shell } => completions::run(shell),
@@ -153,7 +206,7 @@ where
{
sqlx::any::install_default_drivers();
let db_url = opts.required_db_url()?;
let db_url = opts.expect_db_url()?;
backoff::future::retry(
backoff::ExponentialBackoffBuilder::new()

View File

@@ -1,6 +1,6 @@
use crate::opt::ConnectOpts;
use crate::config::Config;
use crate::opt::{AddMigrationOpts, ConnectOpts, MigrationSourceOpt};
use anyhow::{bail, Context};
use chrono::Utc;
use console::style;
use sqlx::migrate::{AppliedMigration, Migrate, MigrateError, MigrationType, Migrator};
use sqlx::Connection;
@@ -11,142 +11,47 @@ use std::fs::{self, File};
use std::path::Path;
use std::time::Duration;
fn create_file(
migration_source: &str,
file_prefix: &str,
description: &str,
migration_type: MigrationType,
) -> anyhow::Result<()> {
use std::path::PathBuf;
pub async fn add(opts: AddMigrationOpts) -> anyhow::Result<()> {
let config = opts.config.load_config().await?;
let mut file_name = file_prefix.to_string();
file_name.push('_');
file_name.push_str(&description.replace(' ', "_"));
file_name.push_str(migration_type.suffix());
let source = opts.source.resolve_path(&config);
let mut path = PathBuf::new();
path.push(migration_source);
path.push(&file_name);
fs::create_dir_all(source).context("Unable to create migrations directory")?;
println!("Creating {}", style(path.display()).cyan());
let migrator = opts.source.resolve(&config).await?;
let mut file = File::create(&path).context("Failed to create migration file")?;
let version_prefix = opts.version_prefix(&config, &migrator);
std::io::Write::write_all(&mut file, migration_type.file_content().as_bytes())?;
Ok(())
}
enum MigrationOrdering {
Timestamp(String),
Sequential(String),
}
impl MigrationOrdering {
fn timestamp() -> MigrationOrdering {
Self::Timestamp(Utc::now().format("%Y%m%d%H%M%S").to_string())
}
fn sequential(version: i64) -> MigrationOrdering {
Self::Sequential(format!("{version:04}"))
}
fn file_prefix(&self) -> &str {
match self {
MigrationOrdering::Timestamp(prefix) => prefix,
MigrationOrdering::Sequential(prefix) => prefix,
}
}
fn infer(sequential: bool, timestamp: bool, migrator: &Migrator) -> Self {
match (timestamp, sequential) {
(true, true) => panic!("Impossible to specify both timestamp and sequential mode"),
(true, false) => MigrationOrdering::timestamp(),
(false, true) => MigrationOrdering::sequential(
migrator
.iter()
.last()
.map_or(1, |last_migration| last_migration.version + 1),
),
(false, false) => {
// inferring the naming scheme
let migrations = migrator
.iter()
.filter(|migration| migration.migration_type.is_up_migration())
.rev()
.take(2)
.collect::<Vec<_>>();
if let [last, pre_last] = &migrations[..] {
// there are at least two migrations, compare the last twothere's only one existing migration
if last.version - pre_last.version == 1 {
// their version numbers differ by 1, infer sequential
MigrationOrdering::sequential(last.version + 1)
} else {
MigrationOrdering::timestamp()
}
} else if let [last] = &migrations[..] {
// there is only one existing migration
if last.version == 0 || last.version == 1 {
// infer sequential if the version number is 0 or 1
MigrationOrdering::sequential(last.version + 1)
} else {
MigrationOrdering::timestamp()
}
} else {
MigrationOrdering::timestamp()
}
}
}
}
}
pub async fn add(
migration_source: &str,
description: &str,
reversible: bool,
sequential: bool,
timestamp: bool,
) -> anyhow::Result<()> {
fs::create_dir_all(migration_source).context("Unable to create migrations directory")?;
let migrator = Migrator::new(Path::new(migration_source)).await?;
// Type of newly created migration will be the same as the first one
// or reversible flag if this is the first migration
let migration_type = MigrationType::infer(&migrator, reversible);
let ordering = MigrationOrdering::infer(sequential, timestamp, &migrator);
let file_prefix = ordering.file_prefix();
if migration_type.is_reversible() {
if opts.reversible(&config, &migrator) {
create_file(
migration_source,
file_prefix,
description,
source,
&version_prefix,
&opts.description,
MigrationType::ReversibleUp,
)?;
create_file(
migration_source,
file_prefix,
description,
source,
&version_prefix,
&opts.description,
MigrationType::ReversibleDown,
)?;
} else {
create_file(
migration_source,
file_prefix,
description,
source,
&version_prefix,
&opts.description,
MigrationType::Simple,
)?;
}
// if the migrations directory is empty
let has_existing_migrations = fs::read_dir(migration_source)
let has_existing_migrations = fs::read_dir(source)
.map(|mut dir| dir.next().is_some())
.unwrap_or(false);
if !has_existing_migrations {
let quoted_source = if migration_source != "migrations" {
format!("{migration_source:?}")
let quoted_source = if opts.source.source.is_some() {
format!("{source:?}")
} else {
"".to_string()
};
@@ -184,6 +89,32 @@ See: https://docs.rs/sqlx/{version}/sqlx/macro.migrate.html
Ok(())
}
fn create_file(
migration_source: &str,
file_prefix: &str,
description: &str,
migration_type: MigrationType,
) -> anyhow::Result<()> {
use std::path::PathBuf;
let mut file_name = file_prefix.to_string();
file_name.push('_');
file_name.push_str(&description.replace(' ', "_"));
file_name.push_str(migration_type.suffix());
let mut path = PathBuf::new();
path.push(migration_source);
path.push(&file_name);
println!("Creating {}", style(path.display()).cyan());
let mut file = File::create(&path).context("Failed to create migration file")?;
std::io::Write::write_all(&mut file, migration_type.file_content().as_bytes())?;
Ok(())
}
fn short_checksum(checksum: &[u8]) -> String {
let mut s = String::with_capacity(checksum.len() * 2);
for b in checksum {
@@ -192,14 +123,25 @@ fn short_checksum(checksum: &[u8]) -> String {
s
}
pub async fn info(migration_source: &str, connect_opts: &ConnectOpts) -> anyhow::Result<()> {
let migrator = Migrator::new(Path::new(migration_source)).await?;
pub async fn info(
config: &Config,
migration_source: &MigrationSourceOpt,
connect_opts: &ConnectOpts,
) -> anyhow::Result<()> {
let migrator = migration_source.resolve(config).await?;
let mut conn = crate::connect(connect_opts).await?;
conn.ensure_migrations_table().await?;
// FIXME: we shouldn't actually be creating anything here
for schema_name in &config.migrate.create_schemas {
conn.create_schema_if_not_exists(schema_name).await?;
}
conn.ensure_migrations_table(config.migrate.table_name())
.await?;
let applied_migrations: HashMap<_, _> = conn
.list_applied_migrations()
.list_applied_migrations(config.migrate.table_name())
.await?
.into_iter()
.map(|m| (m.version, m))
@@ -272,13 +214,15 @@ fn validate_applied_migrations(
}
pub async fn run(
migration_source: &str,
config: &Config,
migration_source: &MigrationSourceOpt,
connect_opts: &ConnectOpts,
dry_run: bool,
ignore_missing: bool,
target_version: Option<i64>,
) -> anyhow::Result<()> {
let migrator = Migrator::new(Path::new(migration_source)).await?;
let migrator = migration_source.resolve(config).await?;
if let Some(target_version) = target_version {
if !migrator.version_exists(target_version) {
bail!(MigrateError::VersionNotPresent(target_version));
@@ -287,14 +231,21 @@ pub async fn run(
let mut conn = crate::connect(connect_opts).await?;
conn.ensure_migrations_table().await?;
for schema_name in &config.migrate.create_schemas {
conn.create_schema_if_not_exists(schema_name).await?;
}
let version = conn.dirty_version().await?;
conn.ensure_migrations_table(config.migrate.table_name())
.await?;
let version = conn.dirty_version(config.migrate.table_name()).await?;
if let Some(version) = version {
bail!(MigrateError::Dirty(version));
}
let applied_migrations = conn.list_applied_migrations().await?;
let applied_migrations = conn
.list_applied_migrations(config.migrate.table_name())
.await?;
validate_applied_migrations(&applied_migrations, &migrator, ignore_missing)?;
let latest_version = applied_migrations
@@ -332,7 +283,7 @@ pub async fn run(
let elapsed = if dry_run || skip {
Duration::new(0, 0)
} else {
conn.apply(migration).await?
conn.apply(config.migrate.table_name(), migration).await?
};
let text = if skip {
"Skipped"
@@ -365,13 +316,15 @@ pub async fn run(
}
pub async fn revert(
migration_source: &str,
config: &Config,
migration_source: &MigrationSourceOpt,
connect_opts: &ConnectOpts,
dry_run: bool,
ignore_missing: bool,
target_version: Option<i64>,
) -> anyhow::Result<()> {
let migrator = Migrator::new(Path::new(migration_source)).await?;
let migrator = migration_source.resolve(config).await?;
if let Some(target_version) = target_version {
if target_version != 0 && !migrator.version_exists(target_version) {
bail!(MigrateError::VersionNotPresent(target_version));
@@ -380,14 +333,22 @@ pub async fn revert(
let mut conn = crate::connect(connect_opts).await?;
conn.ensure_migrations_table().await?;
// FIXME: we should not be creating anything here if it doesn't exist
for schema_name in &config.migrate.create_schemas {
conn.create_schema_if_not_exists(schema_name).await?;
}
let version = conn.dirty_version().await?;
conn.ensure_migrations_table(config.migrate.table_name())
.await?;
let version = conn.dirty_version(config.migrate.table_name()).await?;
if let Some(version) = version {
bail!(MigrateError::Dirty(version));
}
let applied_migrations = conn.list_applied_migrations().await?;
let applied_migrations = conn
.list_applied_migrations(config.migrate.table_name())
.await?;
validate_applied_migrations(&applied_migrations, &migrator, ignore_missing)?;
let latest_version = applied_migrations
@@ -421,7 +382,7 @@ pub async fn revert(
let elapsed = if dry_run || skip {
Duration::new(0, 0)
} else {
conn.revert(migration).await?
conn.revert(config.migrate.table_name(), migration).await?
};
let text = if skip {
"Skipped"
@@ -458,7 +419,13 @@ pub async fn revert(
Ok(())
}
pub fn build_script(migration_source: &str, force: bool) -> anyhow::Result<()> {
pub fn build_script(
config: &Config,
migration_source: &MigrationSourceOpt,
force: bool,
) -> anyhow::Result<()> {
let source = migration_source.resolve_path(config);
anyhow::ensure!(
Path::new("Cargo.toml").exists(),
"must be run in a Cargo project root"
@@ -473,7 +440,7 @@ pub fn build_script(migration_source: &str, force: bool) -> anyhow::Result<()> {
r#"// generated by `sqlx migrate build-script`
fn main() {{
// trigger recompilation when a new migration is added
println!("cargo:rerun-if-changed={migration_source}");
println!("cargo:rerun-if-changed={source}");
}}
"#,
);

View File

@@ -1,187 +0,0 @@
use anyhow::{bail, Context};
use console::style;
use std::fs::{self, File};
use std::io::{Read, Write};
const MIGRATION_FOLDER: &str = "migrations";
pub struct Migration {
pub name: String,
pub sql: String,
}
pub fn add_file(name: &str) -> anyhow::Result<()> {
use chrono::prelude::*;
use std::path::PathBuf;
fs::create_dir_all(MIGRATION_FOLDER).context("Unable to create migrations directory")?;
let dt = Utc::now();
let mut file_name = dt.format("%Y-%m-%d_%H-%M-%S").to_string();
file_name.push_str("_");
file_name.push_str(name);
file_name.push_str(".sql");
let mut path = PathBuf::new();
path.push(MIGRATION_FOLDER);
path.push(&file_name);
let mut file = File::create(path).context("Failed to create file")?;
file.write_all(b"-- Add migration script here")
.context("Could not write to file")?;
println!("Created migration: '{file_name}'");
Ok(())
}
pub async fn run() -> anyhow::Result<()> {
let migrator = crate::migrator::get()?;
if !migrator.can_migrate_database() {
bail!(
"Database migrations not supported for {}",
migrator.database_type()
);
}
migrator.create_migration_table().await?;
let migrations = load_migrations()?;
for mig in migrations.iter() {
let mut tx = migrator.begin_migration().await?;
if tx.check_if_applied(&mig.name).await? {
println!("Already applied migration: '{}'", mig.name);
continue;
}
println!("Applying migration: '{}'", mig.name);
tx.execute_migration(&mig.sql)
.await
.with_context(|| format!("Failed to run migration {:?}", &mig.name))?;
tx.save_applied_migration(&mig.name)
.await
.context("Failed to insert migration")?;
tx.commit().await.context("Failed")?;
}
Ok(())
}
pub async fn list() -> anyhow::Result<()> {
let migrator = crate::migrator::get()?;
if !migrator.can_migrate_database() {
bail!(
"Database migrations not supported for {}",
migrator.database_type()
);
}
let file_migrations = load_migrations()?;
if migrator
.check_if_database_exists(&migrator.get_database_name()?)
.await?
{
let applied_migrations = migrator.get_migrations().await.unwrap_or_else(|_| {
println!("Could not retrieve data from migration table");
Vec::new()
});
let mut width = 0;
for mig in file_migrations.iter() {
width = std::cmp::max(width, mig.name.len());
}
for mig in file_migrations.iter() {
let status = if applied_migrations
.iter()
.find(|&m| mig.name == *m)
.is_some()
{
style("Applied").green()
} else {
style("Not Applied").yellow()
};
println!("{:width$}\t{}", mig.name, status, width = width);
}
let orphans = check_for_orphans(file_migrations, applied_migrations);
if let Some(orphans) = orphans {
println!("\nFound migrations applied in the database that does not have a corresponding migration file:");
for name in orphans {
println!("{:width$}\t{}", name, style("Orphan").red(), width = width);
}
}
} else {
println!("No database found, listing migrations");
for mig in file_migrations {
println!("{}", mig.name);
}
}
Ok(())
}
fn load_migrations() -> anyhow::Result<Vec<Migration>> {
let entries = fs::read_dir(&MIGRATION_FOLDER).context("Could not find 'migrations' dir")?;
let mut migrations = Vec::new();
for e in entries {
if let Ok(e) = e {
if let Ok(meta) = e.metadata() {
if !meta.is_file() {
continue;
}
if let Some(ext) = e.path().extension() {
if ext != "sql" {
println!("Wrong ext: {ext:?}");
continue;
}
} else {
continue;
}
let mut file = File::open(e.path())
.with_context(|| format!("Failed to open: '{:?}'", e.file_name()))?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.with_context(|| format!("Failed to read: '{:?}'", e.file_name()))?;
migrations.push(Migration {
name: e.file_name().to_str().unwrap().to_string(),
sql: contents,
});
}
}
}
migrations.sort_by(|a, b| a.name.partial_cmp(&b.name).unwrap());
Ok(migrations)
}
fn check_for_orphans(
file_migrations: Vec<Migration>,
applied_migrations: Vec<String>,
) -> Option<Vec<String>> {
let orphans: Vec<String> = applied_migrations
.iter()
.filter(|m| !file_migrations.iter().any(|fm| fm.name == **m))
.cloned()
.collect();
if orphans.len() > 0 {
Some(orphans)
} else {
None
}
}

View File

@@ -1,11 +1,17 @@
use std::ops::{Deref, Not};
use crate::config::migrate::{DefaultMigrationType, DefaultVersioning};
use crate::config::Config;
use anyhow::Context;
use chrono::Utc;
use clap::{
builder::{styling::AnsiColor, Styles},
Args, Parser,
};
#[cfg(feature = "completions")]
use clap_complete::Shell;
use sqlx::migrate::{MigrateError, Migrator, ResolveWith};
use std::env;
use std::ops::{Deref, Not};
use std::path::PathBuf;
const HELP_STYLES: Styles = Styles::styled()
.header(AnsiColor::Blue.on_default().bold())
@@ -62,6 +68,9 @@ pub enum Command {
#[clap(flatten)]
connect_opts: ConnectOpts,
#[clap(flatten)]
config: ConfigOpt,
},
#[clap(alias = "mig")]
@@ -85,6 +94,9 @@ pub enum DatabaseCommand {
Create {
#[clap(flatten)]
connect_opts: ConnectOpts,
#[clap(flatten)]
config: ConfigOpt,
},
/// Drops the database specified in your DATABASE_URL.
@@ -92,6 +104,9 @@ pub enum DatabaseCommand {
#[clap(flatten)]
confirmation: Confirmation,
#[clap(flatten)]
config: ConfigOpt,
#[clap(flatten)]
connect_opts: ConnectOpts,
@@ -106,7 +121,10 @@ pub enum DatabaseCommand {
confirmation: Confirmation,
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
#[clap(flatten)]
connect_opts: ConnectOpts,
@@ -119,7 +137,10 @@ pub enum DatabaseCommand {
/// Creates the database specified in your DATABASE_URL and runs any pending migrations.
Setup {
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
#[clap(flatten)]
connect_opts: ConnectOpts,
@@ -137,8 +158,55 @@ pub struct MigrateOpt {
pub enum MigrateCommand {
/// Create a new migration with the given description.
///
/// --------------------------------
///
/// Migrations may either be simple, or reversible.
///
/// Reversible migrations can be reverted with `sqlx migrate revert`, simple migrations cannot.
///
/// Reversible migrations are created as a pair of two files with the same filename but
/// extensions `.up.sql` and `.down.sql` for the up-migration and down-migration, respectively.
///
/// The up-migration should contain the commands to be used when applying the migration,
/// while the down-migration should contain the commands to reverse the changes made by the
/// up-migration.
///
/// When writing down-migrations, care should be taken to ensure that they
/// do not leave the database in an inconsistent state.
///
/// Simple migrations have just `.sql` for their extension and represent an up-migration only.
///
/// Note that reverting a migration is **destructive** and will likely result in data loss.
/// Reverting a migration will not restore any data discarded by commands in the up-migration.
///
/// It is recommended to always back up the database before running migrations.
///
/// --------------------------------
///
/// For convenience, this command attempts to detect if reversible migrations are in-use.
///
/// If the latest existing migration is reversible, the new migration will also be reversible.
///
/// Otherwise, a simple migration is created.
///
/// This behavior can be overridden by `--simple` or `--reversible`, respectively.
///
/// The default type to use can also be set in `sqlx.toml`.
///
/// --------------------------------
///
/// A version number will be automatically assigned to the migration.
///
/// Migrations are applied in ascending order by version number.
/// Version numbers do not need to be strictly consecutive.
///
/// The migration process will abort if SQLx encounters a migration with a version number
/// less than _any_ previously applied migration.
///
/// Migrations should only be created with increasing version number.
///
/// --------------------------------
///
/// For convenience, this command will attempt to detect if sequential versioning is in use,
/// and if so, continue the sequence.
///
@@ -148,33 +216,20 @@ pub enum MigrateCommand {
///
/// * only one migration exists and its version number is either 0 or 1.
///
/// Otherwise timestamp versioning is assumed.
/// Otherwise, timestamp versioning (`YYYYMMDDHHMMSS`) is assumed.
///
/// This behavior can overridden by `--sequential` or `--timestamp`, respectively.
Add {
description: String,
#[clap(flatten)]
source: Source,
/// If true, creates a pair of up and down migration files with same version
/// else creates a single sql file
#[clap(short)]
reversible: bool,
/// If set, use timestamp versioning for the new migration. Conflicts with `--sequential`.
#[clap(short, long)]
timestamp: bool,
/// If set, use sequential versioning for the new migration. Conflicts with `--timestamp`.
#[clap(short, long, conflicts_with = "timestamp")]
sequential: bool,
},
/// This behavior can be overridden by `--timestamp` or `--sequential`, respectively.
///
/// The default versioning to use can also be set in `sqlx.toml`.
Add(AddMigrationOpts),
/// Run all pending migrations.
Run {
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
/// List all the migrations to be run without applying
#[clap(long)]
@@ -195,7 +250,10 @@ pub enum MigrateCommand {
/// Revert the latest migration with a down file.
Revert {
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
/// List the migration to be reverted without applying
#[clap(long)]
@@ -217,7 +275,10 @@ pub enum MigrateCommand {
/// List all available migrations.
Info {
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
#[clap(flatten)]
connect_opts: ConnectOpts,
@@ -228,7 +289,10 @@ pub enum MigrateCommand {
/// Must be run in a Cargo project root.
BuildScript {
#[clap(flatten)]
source: Source,
source: MigrationSourceOpt,
#[clap(flatten)]
config: ConfigOpt,
/// Overwrite the build script if it already exists.
#[clap(long)]
@@ -236,19 +300,62 @@ pub enum MigrateCommand {
},
}
/// Argument for the migration scripts source.
#[derive(Args, Debug)]
pub struct Source {
/// Path to folder containing migrations.
#[clap(long, default_value = "migrations")]
source: String,
pub struct AddMigrationOpts {
pub description: String,
#[clap(flatten)]
pub source: MigrationSourceOpt,
#[clap(flatten)]
pub config: ConfigOpt,
/// If set, create an up-migration only. Conflicts with `--reversible`.
#[clap(long, conflicts_with = "reversible")]
simple: bool,
/// If set, create a pair of up and down migration files with same version.
///
/// Conflicts with `--simple`.
#[clap(short, long, conflicts_with = "simple")]
reversible: bool,
/// If set, use timestamp versioning for the new migration. Conflicts with `--sequential`.
///
/// Timestamp format: `YYYYMMDDHHMMSS`
#[clap(short, long, conflicts_with = "sequential")]
timestamp: bool,
/// If set, use sequential versioning for the new migration. Conflicts with `--timestamp`.
#[clap(short, long, conflicts_with = "timestamp")]
sequential: bool,
}
impl Deref for Source {
type Target = String;
/// Argument for the migration scripts source.
#[derive(Args, Debug)]
pub struct MigrationSourceOpt {
/// Path to folder containing migrations.
///
/// Defaults to `migrations/` if not specified, but a different default may be set by `sqlx.toml`.
#[clap(long)]
pub source: Option<String>,
}
fn deref(&self) -> &Self::Target {
&self.source
impl MigrationSourceOpt {
pub fn resolve_path<'a>(&'a self, config: &'a Config) -> &'a str {
if let Some(source) = &self.source {
return source;
}
config.migrate.migrations_dir()
}
pub async fn resolve(&self, config: &Config) -> Result<Migrator, MigrateError> {
Migrator::new(ResolveWith(
self.resolve_path(config),
config.migrate.to_resolve_config(),
))
.await
}
}
@@ -259,7 +366,7 @@ pub struct ConnectOpts {
pub no_dotenv: NoDotenvOpt,
/// Location of the DB, by default will be read from the DATABASE_URL env var or `.env` files.
#[clap(long, short = 'D', env)]
#[clap(long, short = 'D')]
pub database_url: Option<String>,
/// The maximum time, in seconds, to try connecting to the database server before
@@ -290,15 +397,85 @@ pub struct NoDotenvOpt {
pub no_dotenv: bool,
}
#[derive(Args, Debug)]
pub struct ConfigOpt {
/// Override the path to the config file.
///
/// Defaults to `sqlx.toml` in the current directory, if it exists.
///
/// Configuration file loading may be bypassed with `--config=/dev/null` on Linux,
/// or `--config=NUL` on Windows.
///
/// Config file loading is enabled by the `sqlx-toml` feature.
#[clap(long)]
pub config: Option<PathBuf>,
}
impl ConnectOpts {
/// Require a database URL to be provided, otherwise
/// return an error.
pub fn required_db_url(&self) -> anyhow::Result<&str> {
self.database_url.as_deref().ok_or_else(
|| anyhow::anyhow!(
"the `--database-url` option or the `DATABASE_URL` environment variable must be provided"
)
)
pub fn expect_db_url(&self) -> anyhow::Result<&str> {
self.database_url
.as_deref()
.context("BUG: database_url not populated")
}
/// Populate `database_url` from the environment, if not set.
pub fn populate_db_url(&mut self, config: &Config) -> anyhow::Result<()> {
if self.database_url.is_some() {
return Ok(());
}
let var = config.common.database_url_var();
let context = if var != "DATABASE_URL" {
" (`common.database-url-var` in `sqlx.toml`)"
} else {
""
};
match env::var(var) {
Ok(url) => {
if !context.is_empty() {
eprintln!("Read database url from `{var}`{context}");
}
self.database_url = Some(url)
}
Err(env::VarError::NotPresent) => {
anyhow::bail!("`--database-url` or `{var}`{context} must be set")
}
Err(env::VarError::NotUnicode(_)) => {
anyhow::bail!("`{var}`{context} is not valid UTF-8");
}
}
Ok(())
}
}
impl ConfigOpt {
pub async fn load_config(&self) -> anyhow::Result<Config> {
let path = self.config.clone();
// Tokio does file I/O on a background task anyway
tokio::task::spawn_blocking(|| {
if let Some(path) = path {
let err_str = format!("error reading config from {path:?}");
Config::try_from_path(path).context(err_str)
} else {
let path = PathBuf::from("sqlx.toml");
if path.exists() {
eprintln!("Found `sqlx.toml` in current directory; reading...");
Ok(Config::try_from_path(path)?)
} else {
Ok(Config::default())
}
}
})
.await
.context("unexpected error loading config")?
}
}
@@ -334,3 +511,67 @@ impl Not for IgnoreMissing {
!self.ignore_missing
}
}
impl AddMigrationOpts {
pub fn reversible(&self, config: &Config, migrator: &Migrator) -> bool {
if self.reversible {
return true;
}
if self.simple {
return false;
}
match config.migrate.defaults.migration_type {
DefaultMigrationType::Inferred => migrator
.iter()
.last()
.is_some_and(|m| m.migration_type.is_reversible()),
DefaultMigrationType::Simple => false,
DefaultMigrationType::Reversible => true,
}
}
pub fn version_prefix(&self, config: &Config, migrator: &Migrator) -> String {
let default_versioning = &config.migrate.defaults.migration_versioning;
match (self.timestamp, self.sequential, default_versioning) {
(true, false, _) | (false, false, DefaultVersioning::Timestamp) => next_timestamp(),
(false, true, _) | (false, false, DefaultVersioning::Sequential) => fmt_sequential(
migrator
.migrations
.last()
.map_or(1, |migration| migration.version + 1),
),
(false, false, DefaultVersioning::Inferred) => {
migrator
.migrations
.rchunks(2)
.next()
.and_then(|migrations| {
match migrations {
[previous, latest] => {
// If the latest two versions differ by 1, infer sequential.
(latest.version - previous.version == 1)
.then_some(latest.version + 1)
}
[latest] => {
// If only one migration exists and its version is 0 or 1, infer sequential
matches!(latest.version, 0 | 1).then_some(latest.version + 1)
}
_ => unreachable!(),
}
})
.map_or_else(next_timestamp, fmt_sequential)
}
(true, true, _) => unreachable!("BUG: Clap should have rejected this case"),
}
}
}
fn next_timestamp() -> String {
Utc::now().format("%Y%m%d%H%M%S").to_string()
}
fn fmt_sequential(version: i64) -> String {
format!("{version:04}")
}

View File

@@ -1,20 +1,11 @@
use anyhow::Context;
use assert_cmd::Command;
use std::cmp::Ordering;
use std::fs::read_dir;
use std::ops::Index;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
#[test]
fn add_migration_ambiguous() -> anyhow::Result<()> {
for reversible in [true, false] {
let files = AddMigrations::new()?
.run("hello world", reversible, true, true, false)?
.fs_output()?;
assert_eq!(files.0, Vec::<FileName>::new());
}
Ok(())
}
#[derive(Debug, PartialEq, Eq)]
struct FileName {
id: u64,
@@ -34,11 +25,6 @@ impl PartialOrd<Self> for FileName {
impl FileName {
fn assert_is_timestamp(&self) {
//if the library is still used in 2050, this will need bumping ^^
assert!(
self.id < 20500101000000,
"{self:?} is too high for a timestamp"
);
assert!(
self.id > 20200101000000,
"{self:?} is too low for a timestamp"
@@ -59,6 +45,154 @@ impl From<PathBuf> for FileName {
}
}
}
struct AddMigrationsResult(Vec<FileName>);
impl AddMigrationsResult {
fn len(&self) -> usize {
self.0.len()
}
fn assert_is_reversible(&self) {
let mut up_cnt = 0;
let mut down_cnt = 0;
for file in self.0.iter() {
if file.suffix == "down.sql" {
down_cnt += 1;
} else if file.suffix == "up.sql" {
up_cnt += 1;
} else {
panic!("unknown suffix for {file:?}");
}
assert!(file.description.starts_with("hello_world"));
}
assert_eq!(up_cnt, down_cnt);
}
fn assert_is_not_reversible(&self) {
for file in self.0.iter() {
assert_eq!(file.suffix, "sql");
assert!(file.description.starts_with("hello_world"));
}
}
}
impl Index<usize> for AddMigrationsResult {
type Output = FileName;
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
struct AddMigrations {
tempdir: TempDir,
config_arg: Option<String>,
}
impl AddMigrations {
fn new() -> anyhow::Result<Self> {
anyhow::Ok(Self {
tempdir: TempDir::new()?,
config_arg: None,
})
}
fn with_config(mut self, filename: &str) -> anyhow::Result<Self> {
let path = format!("./tests/assets/{filename}");
let path = std::fs::canonicalize(&path)
.with_context(|| format!("error canonicalizing path {path:?}"))?;
let path = path
.to_str()
.with_context(|| format!("canonicalized version of path {path:?} is not UTF-8"))?;
self.config_arg = Some(format!("--config={path}"));
Ok(self)
}
fn run(
&self,
description: &str,
revesible: bool,
timestamp: bool,
sequential: bool,
expect_success: bool,
) -> anyhow::Result<&'_ Self> {
let cmd_result = Command::cargo_bin("cargo-sqlx")?
.current_dir(&self.tempdir)
.args(
[
vec!["sqlx", "migrate", "add", description],
self.config_arg.as_deref().map_or(vec![], |arg| vec![arg]),
match revesible {
true => vec!["-r"],
false => vec![],
},
match timestamp {
true => vec!["--timestamp"],
false => vec![],
},
match sequential {
true => vec!["--sequential"],
false => vec![],
},
]
.concat(),
)
.env("RUST_BACKTRACE", "1")
.assert();
if expect_success {
cmd_result.success();
} else {
cmd_result.failure();
}
anyhow::Ok(self)
}
fn fs_output(&self) -> anyhow::Result<AddMigrationsResult> {
let files = recurse_files(&self.tempdir)?;
let mut fs_paths = Vec::with_capacity(files.len());
for path in files {
let relative_path = path.strip_prefix(self.tempdir.path())?.to_path_buf();
fs_paths.push(FileName::from(relative_path));
}
Ok(AddMigrationsResult(fs_paths))
}
}
fn recurse_files(path: impl AsRef<Path>) -> anyhow::Result<Vec<PathBuf>> {
let mut buf = vec![];
let entries = read_dir(path)?;
for entry in entries {
let entry = entry?;
let meta = entry.metadata()?;
if meta.is_dir() {
let mut subdir = recurse_files(entry.path())?;
buf.append(&mut subdir);
}
if meta.is_file() {
buf.push(entry.path());
}
}
buf.sort();
Ok(buf)
}
#[test]
fn add_migration_error_ambiguous() -> anyhow::Result<()> {
for reversible in [true, false] {
let files = AddMigrations::new()?
// Passing both `--timestamp` and `--reversible` should result in an error.
.run("hello world", reversible, true, true, false)?
.fs_output()?;
// Assert that no files are created
assert_eq!(files.0, []);
}
Ok(())
}
#[test]
fn add_migration_sequential() -> anyhow::Result<()> {
{
@@ -74,10 +208,12 @@ fn add_migration_sequential() -> anyhow::Result<()> {
.run("hello world1", false, false, true, true)?
.run("hello world2", true, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_not_reversible();
assert_eq!(files.len(), 3);
assert_eq!(files.0[0].id, 1);
assert_eq!(files.0[1].id, 2);
assert_eq!(files.0[1].suffix, "down.sql");
assert_eq!(files.0[2].id, 2);
assert_eq!(files.0[2].suffix, "up.sql");
}
Ok(())
}
@@ -126,146 +262,145 @@ fn add_migration_timestamp() -> anyhow::Result<()> {
.run("hello world1", false, true, false, true)?
.run("hello world2", true, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_not_reversible();
assert_eq!(files.len(), 3);
files.0[0].assert_is_timestamp();
// sequential -> timestamp is one way
files.0[1].assert_is_timestamp();
files.0[2].assert_is_timestamp();
}
Ok(())
}
#[test]
fn add_migration_timestamp_reversible() -> anyhow::Result<()> {
{
let files = AddMigrations::new()?
.run("hello world", true, false, false, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
// .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
}
{
let files = AddMigrations::new()?
.run("hello world", true, true, false, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
// .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
}
{
let files = AddMigrations::new()?
.run("hello world1", true, true, false, true)?
.run("hello world2", true, false, true, true)?
// Reversible should be inferred, but sequential should be forced
.run("hello world2", false, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 4);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
files.0[2].assert_is_timestamp();
files.0[3].assert_is_timestamp();
// First pair: .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
// Second pair; we set `--sequential` so this version should be one higher
assert_eq!(files[2].id, files[1].id + 1);
assert_eq!(files[3].id, files[1].id + 1);
}
Ok(())
}
struct AddMigrationsResult(Vec<FileName>);
impl AddMigrationsResult {
fn len(&self) -> usize {
self.0.len()
}
fn assert_is_reversible(&self) {
let mut up_cnt = 0;
let mut down_cnt = 0;
for file in self.0.iter() {
if file.suffix == "down.sql" {
down_cnt += 1;
} else if file.suffix == "up.sql" {
up_cnt += 1;
} else {
panic!("unknown suffix for {file:?}");
}
assert!(file.description.starts_with("hello_world"));
}
assert_eq!(up_cnt, down_cnt);
}
fn assert_is_not_reversible(&self) {
for file in self.0.iter() {
assert_eq!(file.suffix, "sql");
assert!(file.description.starts_with("hello_world"));
}
}
}
struct AddMigrations(TempDir);
#[test]
fn add_migration_config_default_type_reversible() -> anyhow::Result<()> {
let files = AddMigrations::new()?
.with_config("config_default_type_reversible.toml")?
// Type should default to reversible without any flags
.run("hello world", false, false, false, true)?
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?
.fs_output()?;
impl AddMigrations {
fn new() -> anyhow::Result<Self> {
anyhow::Ok(Self(TempDir::new()?))
}
fn run(
self,
description: &str,
revesible: bool,
timestamp: bool,
sequential: bool,
expect_success: bool,
) -> anyhow::Result<Self> {
let cmd_result = Command::cargo_bin("cargo-sqlx")?
.current_dir(&self.0)
.args(
[
vec!["sqlx", "migrate", "add", description],
match revesible {
true => vec!["-r"],
false => vec![],
},
match timestamp {
true => vec!["--timestamp"],
false => vec![],
},
match sequential {
true => vec!["--sequential"],
false => vec![],
},
]
.concat(),
)
.assert();
if expect_success {
cmd_result.success();
} else {
cmd_result.failure();
}
anyhow::Ok(self)
}
fn fs_output(&self) -> anyhow::Result<AddMigrationsResult> {
let files = recurse_files(&self.0)?;
let mut fs_paths = Vec::with_capacity(files.len());
for path in files {
let relative_path = path.strip_prefix(self.0.path())?.to_path_buf();
fs_paths.push(FileName::from(relative_path));
}
Ok(AddMigrationsResult(fs_paths))
}
assert_eq!(files.len(), 6);
files.assert_is_reversible();
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
files[2].assert_is_timestamp();
assert_eq!(files[3].id, files[2].id);
files[4].assert_is_timestamp();
assert_eq!(files[5].id, files[4].id);
Ok(())
}
fn recurse_files(path: impl AsRef<Path>) -> anyhow::Result<Vec<PathBuf>> {
let mut buf = vec![];
let entries = read_dir(path)?;
#[test]
fn add_migration_config_default_versioning_sequential() -> anyhow::Result<()> {
let files = AddMigrations::new()?
.with_config("config_default_versioning_sequential.toml")?
// Versioning should default to timestamp without any flags
.run("hello world", false, false, false, true)?
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?
.fs_output()?;
for entry in entries {
let entry = entry?;
let meta = entry.metadata()?;
assert_eq!(files.len(), 3);
files.assert_is_not_reversible();
if meta.is_dir() {
let mut subdir = recurse_files(entry.path())?;
buf.append(&mut subdir);
}
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
if meta.is_file() {
buf.push(entry.path());
}
}
buf.sort();
Ok(buf)
Ok(())
}
#[test]
fn add_migration_config_default_versioning_timestamp() -> anyhow::Result<()> {
let migrations = AddMigrations::new()?;
migrations
.run("hello world", false, false, true, true)?
// Default config should infer sequential even without passing `--sequential`
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?;
let files = migrations.fs_output()?;
assert_eq!(files.len(), 3);
files.assert_is_not_reversible();
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
// Now set a config that uses `default-versioning = "timestamp"`
let migrations = migrations.with_config("config_default_versioning_timestamp.toml")?;
// Now the default should be a timestamp
migrations
.run("hello world4", false, false, false, true)?
.run("hello world5", false, false, false, true)?;
let files = migrations.fs_output()?;
assert_eq!(files.len(), 5);
files.assert_is_not_reversible();
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
files[3].assert_is_timestamp();
files[4].assert_is_timestamp();
Ok(())
}

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-type = "reversible"

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-versioning = "sequential"

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-versioning = "timestamp"

View File

@@ -1,25 +1,41 @@
use assert_cmd::{assert::Assert, Command};
use sqlx::_unstable::config::Config;
use sqlx::{migrate::Migrate, Connection, SqliteConnection};
use std::{
env::temp_dir,
fs::remove_file,
env, fs,
path::{Path, PathBuf},
};
pub struct TestDatabase {
file_path: PathBuf,
migrations: String,
migrations_path: PathBuf,
pub config_path: Option<PathBuf>,
}
impl TestDatabase {
pub fn new(name: &str, migrations: &str) -> Self {
let migrations_path = Path::new("tests").join(migrations);
let file_path = Path::new(&temp_dir()).join(format!("test-{}.db", name));
let ret = Self {
// Note: only set when _building_
let temp_dir = option_env!("CARGO_TARGET_TMPDIR").map_or_else(env::temp_dir, PathBuf::from);
let test_dir = temp_dir.join("migrate");
fs::create_dir_all(&test_dir)
.unwrap_or_else(|e| panic!("error creating directory: {test_dir:?}: {e}"));
let file_path = test_dir.join(format!("test-{name}.db"));
if file_path.exists() {
fs::remove_file(&file_path)
.unwrap_or_else(|e| panic!("error deleting test database {file_path:?}: {e}"));
}
let this = Self {
file_path,
migrations: String::from(migrations_path.to_str().unwrap()),
migrations_path: Path::new("tests").join(migrations),
config_path: None,
};
Command::cargo_bin("cargo-sqlx")
.unwrap()
.args([
@@ -27,11 +43,15 @@ impl TestDatabase {
"database",
"create",
"--database-url",
&ret.connection_string(),
&this.connection_string(),
])
.assert()
.success();
ret
this
}
pub fn set_migrations(&mut self, migrations: &str) {
self.migrations_path = Path::new("tests").join(migrations);
}
pub fn connection_string(&self) -> String {
@@ -39,55 +59,77 @@ impl TestDatabase {
}
pub fn run_migration(&self, revert: bool, version: Option<i64>, dry_run: bool) -> Assert {
let ver = match version {
Some(v) => v.to_string(),
None => String::from(""),
};
Command::cargo_bin("cargo-sqlx")
.unwrap()
.args(
[
vec![
"sqlx",
"migrate",
match revert {
true => "revert",
false => "run",
},
"--database-url",
&self.connection_string(),
"--source",
&self.migrations,
],
match version {
Some(_) => vec!["--target-version", &ver],
None => vec![],
},
match dry_run {
true => vec!["--dry-run"],
false => vec![],
},
]
.concat(),
)
.assert()
let mut command = Command::cargo_bin("sqlx").unwrap();
command
.args([
"migrate",
match revert {
true => "revert",
false => "run",
},
"--database-url",
&self.connection_string(),
"--source",
])
.arg(&self.migrations_path);
if let Some(config_path) = &self.config_path {
command.arg("--config").arg(config_path);
}
if let Some(version) = version {
command.arg("--target-version").arg(version.to_string());
}
if dry_run {
command.arg("--dry-run");
}
command.assert()
}
pub async fn applied_migrations(&self) -> Vec<i64> {
let mut conn = SqliteConnection::connect(&self.connection_string())
.await
.unwrap();
conn.list_applied_migrations()
let config = Config::default();
conn.list_applied_migrations(config.migrate.table_name())
.await
.unwrap()
.iter()
.map(|m| m.version)
.collect()
}
pub fn migrate_info(&self) -> Assert {
let mut command = Command::cargo_bin("sqlx").unwrap();
command
.args([
"migrate",
"info",
"--database-url",
&self.connection_string(),
"--source",
])
.arg(&self.migrations_path);
if let Some(config_path) = &self.config_path {
command.arg("--config").arg(config_path);
}
command.assert()
}
}
impl Drop for TestDatabase {
fn drop(&mut self) {
remove_file(&self.file_path).unwrap();
// Only remove the database if there isn't a failure.
if !std::thread::panicking() {
fs::remove_file(&self.file_path).unwrap_or_else(|e| {
panic!("error deleting test database {:?}: {e}", self.file_path)
});
}
}
}

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=crlf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1,7 @@
[migrate]
# Ignore common whitespace characters (beware syntatically significant whitespace!)
# Space, tab, CR, LF, zero-width non-breaking space (U+FEFF)
#
# U+FEFF is added by some editors as a magic number at the beginning of a text file indicating it is UTF-8 encoded,
# where it is known as a byte-order mark (BOM): https://en.wikipedia.org/wiki/Byte_order_mark
ignored-chars = [" ", "\t", "\r", "\n", "\uFEFF"]

View File

@@ -13,16 +13,13 @@ async fn run_reversible_migrations() {
];
// Without --target-version specified.k
{
let db = TestDatabase::new("migrate_run_reversible_latest", "migrations_reversible");
let db = TestDatabase::new("run_reversible_latest", "migrations_reversible");
db.run_migration(false, None, false).success();
assert_eq!(db.applied_migrations().await, all_migrations);
}
// With --target-version specified.
{
let db = TestDatabase::new(
"migrate_run_reversible_latest_explicit",
"migrations_reversible",
);
let db = TestDatabase::new("run_reversible_latest_explicit", "migrations_reversible");
// Move to latest, explicitly specified.
db.run_migration(false, Some(20230501000000), false)
@@ -41,10 +38,7 @@ async fn run_reversible_migrations() {
}
// With --target-version, incrementally upgrade.
{
let db = TestDatabase::new(
"migrate_run_reversible_incremental",
"migrations_reversible",
);
let db = TestDatabase::new("run_reversible_incremental", "migrations_reversible");
// First version
db.run_migration(false, Some(20230101000000), false)
@@ -92,7 +86,7 @@ async fn revert_migrations() {
// Without --target-version
{
let db = TestDatabase::new("migrate_revert_incremental", "migrations_reversible");
let db = TestDatabase::new("revert_incremental", "migrations_reversible");
db.run_migration(false, None, false).success();
// Dry-run
@@ -109,7 +103,7 @@ async fn revert_migrations() {
}
// With --target-version
{
let db = TestDatabase::new("migrate_revert_incremental", "migrations_reversible");
let db = TestDatabase::new("revert_incremental", "migrations_reversible");
db.run_migration(false, None, false).success();
// Dry-run downgrade to version 3.
@@ -142,6 +136,32 @@ async fn revert_migrations() {
// Downgrade to zero.
db.run_migration(true, Some(0), false).success();
assert_eq!(db.applied_migrations().await, vec![] as Vec<i64>);
assert_eq!(db.applied_migrations().await, Vec::<i64>::new());
}
}
#[tokio::test]
async fn ignored_chars() {
let mut db = TestDatabase::new("ignored-chars", "ignored-chars/LF");
db.config_path = Some("tests/ignored-chars/sqlx.toml".into());
db.run_migration(false, None, false).success();
db.set_migrations("ignored-chars/CRLF");
let expected_info = "1/installed user\n2/installed post\n3/installed comment\n";
// `ignored-chars` should produce the same migration checksum here
db.migrate_info().success().stdout(expected_info);
// Running migration should be a no-op
db.run_migration(false, None, false).success().stdout("");
db.set_migrations("ignored-chars/BOM");
db.migrate_info().success().stdout(expected_info);
db.run_migration(false, None, false).success().stdout("");
db.set_migrations("ignored-chars/oops-all-tabs");
db.migrate_info().success().stdout(expected_info);
db.run_migration(false, None, false).success().stdout("");
}