feat: create sqlx.toml format (#3383)

* feat: create `sqlx.toml` format

* feat: add support for ignored_chars config to sqlx_core::migrate

* chore: test ignored_chars with `U+FEFF` (ZWNBSP/BOM)

https://en.wikipedia.org/wiki/Byte_order_mark

* refactor: make `Config` always compiled

simplifies usage while still making parsing optional for less generated code

* refactor: add origin information to `Column`

* feat(macros): implement `type_override` and `column_override` from `sqlx.toml`

* refactor(sqlx.toml): make all keys kebab-case, create `macros.preferred-crates`

* feat: make macros aware of `macros.preferred-crates`

* feat: make `sqlx-cli` aware of `database-url-var`

* feat: teach macros about `migrate.table-name`, `migrations-dir`

* feat: teach macros about `migrate.ignored-chars`

* chore: delete unused source file `sqlx-cli/src/migration.rs`

* feat: teach `sqlx-cli` about `migrate.defaults`

* feat: teach `sqlx-cli` about `migrate.migrations-dir`

* feat: teach `sqlx-cli` about `migrate.table-name`

* feat: introduce `migrate.create-schemas`

* WIP feat: create multi-tenant database example

* fix(postgres): don't fetch `ColumnOrigin` for transparently-prepared statements

* feat: progress on axum-multi-tenant example

* feat(config): better errors for mislabeled fields

* WIP feat: filling out axum-multi-tenant example

* feat: multi-tenant example

No longer Axum-based because filling out the request routes would have distracted from the purpose of the example.

* chore(ci): test multi-tenant example

* fixup after merge

* fix(ci): enable `sqlx-toml` in CLI build for examples

* fix: CI, README for `multi-tenant`

* fix: clippy warnings

* fix: multi-tenant README

* fix: sequential versioning inference for migrations

* fix: migration versioning with explicit overrides

* fix: only warn on ambiguous crates if the invocation relies on it

* fix: remove unused imports

* fix: doctest

* fix: `sqlx mig add` behavior and tests

* fix: restore original type-checking order

* fix: deprecation warning in `tests/postgres/macros.rs`

* feat: create postgres/multi-database example

* fix: examples/postgres/multi-database

* fix: cargo fmt

* chore: add tests for config `migrate.defaults`

* fix: sqlx-cli/tests/add.rs

* feat(cli): add `--config` override to all relevant commands

* chore: run `sqlx mig add` test with `RUST_BACKTRACE=1`

* fix: properly canonicalize config path for `sqlx mig add` test

* fix: get `sqlx mig add` test passing

* fix(cli): test `migrate.ignored-chars`, fix bugs

* feat: create `macros.preferred-crates` example

* fix(examples): use workspace `sqlx`

* fix: examples

* fix(sqlite): unexpected feature flags in `type_checking.rs`

* fix: run `cargo fmt`

* fix: more example fixes

* fix(ci): preferred-crates setup

* fix(examples): enable default-features for workspace `sqlx`

* fix(examples): issues in `preferred-crates`

* chore: adjust error message for missing param type in `query!()`

* doc: mention new `sqlx.toml` configuration

* chore: add `CHANGELOG` entry

Normally I generate these when cutting the release, but I wanted to take time to editorialize this one.

* doc: fix new example titles

* refactor: make `sqlx-toml` feature non-default, improve errors

* refactor: eliminate panics in `Config` read path

* chore: remove unused `axum` dependency from new examples

* fix(config): restore fallback to default config for macros

* chore(config): remove use of `once_cell` (to match `main`)
This commit is contained in:
Austin Bonander
2025-06-30 16:34:46 -07:00
committed by GitHub
parent 764ae2f702
commit 25cbeedab4
127 changed files with 6443 additions and 1138 deletions

View File

@@ -1,20 +1,11 @@
use anyhow::Context;
use assert_cmd::Command;
use std::cmp::Ordering;
use std::fs::read_dir;
use std::ops::Index;
use std::path::{Path, PathBuf};
use tempfile::TempDir;
#[test]
fn add_migration_ambiguous() -> anyhow::Result<()> {
for reversible in [true, false] {
let files = AddMigrations::new()?
.run("hello world", reversible, true, true, false)?
.fs_output()?;
assert_eq!(files.0, Vec::<FileName>::new());
}
Ok(())
}
#[derive(Debug, PartialEq, Eq)]
struct FileName {
id: u64,
@@ -34,11 +25,6 @@ impl PartialOrd<Self> for FileName {
impl FileName {
fn assert_is_timestamp(&self) {
//if the library is still used in 2050, this will need bumping ^^
assert!(
self.id < 20500101000000,
"{self:?} is too high for a timestamp"
);
assert!(
self.id > 20200101000000,
"{self:?} is too low for a timestamp"
@@ -59,6 +45,154 @@ impl From<PathBuf> for FileName {
}
}
}
struct AddMigrationsResult(Vec<FileName>);
impl AddMigrationsResult {
fn len(&self) -> usize {
self.0.len()
}
fn assert_is_reversible(&self) {
let mut up_cnt = 0;
let mut down_cnt = 0;
for file in self.0.iter() {
if file.suffix == "down.sql" {
down_cnt += 1;
} else if file.suffix == "up.sql" {
up_cnt += 1;
} else {
panic!("unknown suffix for {file:?}");
}
assert!(file.description.starts_with("hello_world"));
}
assert_eq!(up_cnt, down_cnt);
}
fn assert_is_not_reversible(&self) {
for file in self.0.iter() {
assert_eq!(file.suffix, "sql");
assert!(file.description.starts_with("hello_world"));
}
}
}
impl Index<usize> for AddMigrationsResult {
type Output = FileName;
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
struct AddMigrations {
tempdir: TempDir,
config_arg: Option<String>,
}
impl AddMigrations {
fn new() -> anyhow::Result<Self> {
anyhow::Ok(Self {
tempdir: TempDir::new()?,
config_arg: None,
})
}
fn with_config(mut self, filename: &str) -> anyhow::Result<Self> {
let path = format!("./tests/assets/{filename}");
let path = std::fs::canonicalize(&path)
.with_context(|| format!("error canonicalizing path {path:?}"))?;
let path = path
.to_str()
.with_context(|| format!("canonicalized version of path {path:?} is not UTF-8"))?;
self.config_arg = Some(format!("--config={path}"));
Ok(self)
}
fn run(
&self,
description: &str,
revesible: bool,
timestamp: bool,
sequential: bool,
expect_success: bool,
) -> anyhow::Result<&'_ Self> {
let cmd_result = Command::cargo_bin("cargo-sqlx")?
.current_dir(&self.tempdir)
.args(
[
vec!["sqlx", "migrate", "add", description],
self.config_arg.as_deref().map_or(vec![], |arg| vec![arg]),
match revesible {
true => vec!["-r"],
false => vec![],
},
match timestamp {
true => vec!["--timestamp"],
false => vec![],
},
match sequential {
true => vec!["--sequential"],
false => vec![],
},
]
.concat(),
)
.env("RUST_BACKTRACE", "1")
.assert();
if expect_success {
cmd_result.success();
} else {
cmd_result.failure();
}
anyhow::Ok(self)
}
fn fs_output(&self) -> anyhow::Result<AddMigrationsResult> {
let files = recurse_files(&self.tempdir)?;
let mut fs_paths = Vec::with_capacity(files.len());
for path in files {
let relative_path = path.strip_prefix(self.tempdir.path())?.to_path_buf();
fs_paths.push(FileName::from(relative_path));
}
Ok(AddMigrationsResult(fs_paths))
}
}
fn recurse_files(path: impl AsRef<Path>) -> anyhow::Result<Vec<PathBuf>> {
let mut buf = vec![];
let entries = read_dir(path)?;
for entry in entries {
let entry = entry?;
let meta = entry.metadata()?;
if meta.is_dir() {
let mut subdir = recurse_files(entry.path())?;
buf.append(&mut subdir);
}
if meta.is_file() {
buf.push(entry.path());
}
}
buf.sort();
Ok(buf)
}
#[test]
fn add_migration_error_ambiguous() -> anyhow::Result<()> {
for reversible in [true, false] {
let files = AddMigrations::new()?
// Passing both `--timestamp` and `--reversible` should result in an error.
.run("hello world", reversible, true, true, false)?
.fs_output()?;
// Assert that no files are created
assert_eq!(files.0, []);
}
Ok(())
}
#[test]
fn add_migration_sequential() -> anyhow::Result<()> {
{
@@ -74,10 +208,12 @@ fn add_migration_sequential() -> anyhow::Result<()> {
.run("hello world1", false, false, true, true)?
.run("hello world2", true, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_not_reversible();
assert_eq!(files.len(), 3);
assert_eq!(files.0[0].id, 1);
assert_eq!(files.0[1].id, 2);
assert_eq!(files.0[1].suffix, "down.sql");
assert_eq!(files.0[2].id, 2);
assert_eq!(files.0[2].suffix, "up.sql");
}
Ok(())
}
@@ -126,146 +262,145 @@ fn add_migration_timestamp() -> anyhow::Result<()> {
.run("hello world1", false, true, false, true)?
.run("hello world2", true, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_not_reversible();
assert_eq!(files.len(), 3);
files.0[0].assert_is_timestamp();
// sequential -> timestamp is one way
files.0[1].assert_is_timestamp();
files.0[2].assert_is_timestamp();
}
Ok(())
}
#[test]
fn add_migration_timestamp_reversible() -> anyhow::Result<()> {
{
let files = AddMigrations::new()?
.run("hello world", true, false, false, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
// .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
}
{
let files = AddMigrations::new()?
.run("hello world", true, true, false, true)?
.fs_output()?;
assert_eq!(files.len(), 2);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
// .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
}
{
let files = AddMigrations::new()?
.run("hello world1", true, true, false, true)?
.run("hello world2", true, false, true, true)?
// Reversible should be inferred, but sequential should be forced
.run("hello world2", false, false, true, true)?
.fs_output()?;
assert_eq!(files.len(), 4);
files.assert_is_reversible();
files.0[0].assert_is_timestamp();
files.0[1].assert_is_timestamp();
files.0[2].assert_is_timestamp();
files.0[3].assert_is_timestamp();
// First pair: .up.sql and .down.sql
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
// Second pair; we set `--sequential` so this version should be one higher
assert_eq!(files[2].id, files[1].id + 1);
assert_eq!(files[3].id, files[1].id + 1);
}
Ok(())
}
struct AddMigrationsResult(Vec<FileName>);
impl AddMigrationsResult {
fn len(&self) -> usize {
self.0.len()
}
fn assert_is_reversible(&self) {
let mut up_cnt = 0;
let mut down_cnt = 0;
for file in self.0.iter() {
if file.suffix == "down.sql" {
down_cnt += 1;
} else if file.suffix == "up.sql" {
up_cnt += 1;
} else {
panic!("unknown suffix for {file:?}");
}
assert!(file.description.starts_with("hello_world"));
}
assert_eq!(up_cnt, down_cnt);
}
fn assert_is_not_reversible(&self) {
for file in self.0.iter() {
assert_eq!(file.suffix, "sql");
assert!(file.description.starts_with("hello_world"));
}
}
}
struct AddMigrations(TempDir);
#[test]
fn add_migration_config_default_type_reversible() -> anyhow::Result<()> {
let files = AddMigrations::new()?
.with_config("config_default_type_reversible.toml")?
// Type should default to reversible without any flags
.run("hello world", false, false, false, true)?
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?
.fs_output()?;
impl AddMigrations {
fn new() -> anyhow::Result<Self> {
anyhow::Ok(Self(TempDir::new()?))
}
fn run(
self,
description: &str,
revesible: bool,
timestamp: bool,
sequential: bool,
expect_success: bool,
) -> anyhow::Result<Self> {
let cmd_result = Command::cargo_bin("cargo-sqlx")?
.current_dir(&self.0)
.args(
[
vec!["sqlx", "migrate", "add", description],
match revesible {
true => vec!["-r"],
false => vec![],
},
match timestamp {
true => vec!["--timestamp"],
false => vec![],
},
match sequential {
true => vec!["--sequential"],
false => vec![],
},
]
.concat(),
)
.assert();
if expect_success {
cmd_result.success();
} else {
cmd_result.failure();
}
anyhow::Ok(self)
}
fn fs_output(&self) -> anyhow::Result<AddMigrationsResult> {
let files = recurse_files(&self.0)?;
let mut fs_paths = Vec::with_capacity(files.len());
for path in files {
let relative_path = path.strip_prefix(self.0.path())?.to_path_buf();
fs_paths.push(FileName::from(relative_path));
}
Ok(AddMigrationsResult(fs_paths))
}
assert_eq!(files.len(), 6);
files.assert_is_reversible();
files[0].assert_is_timestamp();
assert_eq!(files[1].id, files[0].id);
files[2].assert_is_timestamp();
assert_eq!(files[3].id, files[2].id);
files[4].assert_is_timestamp();
assert_eq!(files[5].id, files[4].id);
Ok(())
}
fn recurse_files(path: impl AsRef<Path>) -> anyhow::Result<Vec<PathBuf>> {
let mut buf = vec![];
let entries = read_dir(path)?;
#[test]
fn add_migration_config_default_versioning_sequential() -> anyhow::Result<()> {
let files = AddMigrations::new()?
.with_config("config_default_versioning_sequential.toml")?
// Versioning should default to timestamp without any flags
.run("hello world", false, false, false, true)?
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?
.fs_output()?;
for entry in entries {
let entry = entry?;
let meta = entry.metadata()?;
assert_eq!(files.len(), 3);
files.assert_is_not_reversible();
if meta.is_dir() {
let mut subdir = recurse_files(entry.path())?;
buf.append(&mut subdir);
}
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
if meta.is_file() {
buf.push(entry.path());
}
}
buf.sort();
Ok(buf)
Ok(())
}
#[test]
fn add_migration_config_default_versioning_timestamp() -> anyhow::Result<()> {
let migrations = AddMigrations::new()?;
migrations
.run("hello world", false, false, true, true)?
// Default config should infer sequential even without passing `--sequential`
.run("hello world2", false, false, false, true)?
.run("hello world3", false, false, false, true)?;
let files = migrations.fs_output()?;
assert_eq!(files.len(), 3);
files.assert_is_not_reversible();
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
// Now set a config that uses `default-versioning = "timestamp"`
let migrations = migrations.with_config("config_default_versioning_timestamp.toml")?;
// Now the default should be a timestamp
migrations
.run("hello world4", false, false, false, true)?
.run("hello world5", false, false, false, true)?;
let files = migrations.fs_output()?;
assert_eq!(files.len(), 5);
files.assert_is_not_reversible();
assert_eq!(files[0].id, 1);
assert_eq!(files[1].id, 2);
assert_eq!(files[2].id, 3);
files[3].assert_is_timestamp();
files[4].assert_is_timestamp();
Ok(())
}

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-type = "reversible"

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-versioning = "sequential"

View File

@@ -0,0 +1,2 @@
[migrate.defaults]
migration-versioning = "timestamp"

View File

@@ -1,25 +1,41 @@
use assert_cmd::{assert::Assert, Command};
use sqlx::_unstable::config::Config;
use sqlx::{migrate::Migrate, Connection, SqliteConnection};
use std::{
env::temp_dir,
fs::remove_file,
env, fs,
path::{Path, PathBuf},
};
pub struct TestDatabase {
file_path: PathBuf,
migrations: String,
migrations_path: PathBuf,
pub config_path: Option<PathBuf>,
}
impl TestDatabase {
pub fn new(name: &str, migrations: &str) -> Self {
let migrations_path = Path::new("tests").join(migrations);
let file_path = Path::new(&temp_dir()).join(format!("test-{}.db", name));
let ret = Self {
// Note: only set when _building_
let temp_dir = option_env!("CARGO_TARGET_TMPDIR").map_or_else(env::temp_dir, PathBuf::from);
let test_dir = temp_dir.join("migrate");
fs::create_dir_all(&test_dir)
.unwrap_or_else(|e| panic!("error creating directory: {test_dir:?}: {e}"));
let file_path = test_dir.join(format!("test-{name}.db"));
if file_path.exists() {
fs::remove_file(&file_path)
.unwrap_or_else(|e| panic!("error deleting test database {file_path:?}: {e}"));
}
let this = Self {
file_path,
migrations: String::from(migrations_path.to_str().unwrap()),
migrations_path: Path::new("tests").join(migrations),
config_path: None,
};
Command::cargo_bin("cargo-sqlx")
.unwrap()
.args([
@@ -27,11 +43,15 @@ impl TestDatabase {
"database",
"create",
"--database-url",
&ret.connection_string(),
&this.connection_string(),
])
.assert()
.success();
ret
this
}
pub fn set_migrations(&mut self, migrations: &str) {
self.migrations_path = Path::new("tests").join(migrations);
}
pub fn connection_string(&self) -> String {
@@ -39,55 +59,77 @@ impl TestDatabase {
}
pub fn run_migration(&self, revert: bool, version: Option<i64>, dry_run: bool) -> Assert {
let ver = match version {
Some(v) => v.to_string(),
None => String::from(""),
};
Command::cargo_bin("cargo-sqlx")
.unwrap()
.args(
[
vec![
"sqlx",
"migrate",
match revert {
true => "revert",
false => "run",
},
"--database-url",
&self.connection_string(),
"--source",
&self.migrations,
],
match version {
Some(_) => vec!["--target-version", &ver],
None => vec![],
},
match dry_run {
true => vec!["--dry-run"],
false => vec![],
},
]
.concat(),
)
.assert()
let mut command = Command::cargo_bin("sqlx").unwrap();
command
.args([
"migrate",
match revert {
true => "revert",
false => "run",
},
"--database-url",
&self.connection_string(),
"--source",
])
.arg(&self.migrations_path);
if let Some(config_path) = &self.config_path {
command.arg("--config").arg(config_path);
}
if let Some(version) = version {
command.arg("--target-version").arg(version.to_string());
}
if dry_run {
command.arg("--dry-run");
}
command.assert()
}
pub async fn applied_migrations(&self) -> Vec<i64> {
let mut conn = SqliteConnection::connect(&self.connection_string())
.await
.unwrap();
conn.list_applied_migrations()
let config = Config::default();
conn.list_applied_migrations(config.migrate.table_name())
.await
.unwrap()
.iter()
.map(|m| m.version)
.collect()
}
pub fn migrate_info(&self) -> Assert {
let mut command = Command::cargo_bin("sqlx").unwrap();
command
.args([
"migrate",
"info",
"--database-url",
&self.connection_string(),
"--source",
])
.arg(&self.migrations_path);
if let Some(config_path) = &self.config_path {
command.arg("--config").arg(config_path);
}
command.assert()
}
}
impl Drop for TestDatabase {
fn drop(&mut self) {
remove_file(&self.file_path).unwrap();
// Only remove the database if there isn't a failure.
if !std::thread::panicking() {
fs::remove_file(&self.file_path).unwrap_or_else(|e| {
panic!("error deleting test database {:?}: {e}", self.file_path)
});
}
}
}

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=crlf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1 @@
*.sql text eol=lf

View File

@@ -0,0 +1,6 @@
create table user
(
-- integer primary keys are the most efficient in SQLite
user_id integer primary key,
username text unique not null
);

View File

@@ -0,0 +1,10 @@
create table post
(
post_id integer primary key,
user_id integer not null references user (user_id),
content text not null,
-- Defaults have to be wrapped in parenthesis
created_at datetime default (datetime('now'))
);
create index post_created_at on post (created_at desc);

View File

@@ -0,0 +1,10 @@
create table comment
(
comment_id integer primary key,
post_id integer not null references post (post_id),
user_id integer not null references "user" (user_id),
content text not null,
created_at datetime default (datetime('now'))
);
create index comment_created_at on comment (created_at desc);

View File

@@ -0,0 +1,7 @@
[migrate]
# Ignore common whitespace characters (beware syntatically significant whitespace!)
# Space, tab, CR, LF, zero-width non-breaking space (U+FEFF)
#
# U+FEFF is added by some editors as a magic number at the beginning of a text file indicating it is UTF-8 encoded,
# where it is known as a byte-order mark (BOM): https://en.wikipedia.org/wiki/Byte_order_mark
ignored-chars = [" ", "\t", "\r", "\n", "\uFEFF"]

View File

@@ -13,16 +13,13 @@ async fn run_reversible_migrations() {
];
// Without --target-version specified.k
{
let db = TestDatabase::new("migrate_run_reversible_latest", "migrations_reversible");
let db = TestDatabase::new("run_reversible_latest", "migrations_reversible");
db.run_migration(false, None, false).success();
assert_eq!(db.applied_migrations().await, all_migrations);
}
// With --target-version specified.
{
let db = TestDatabase::new(
"migrate_run_reversible_latest_explicit",
"migrations_reversible",
);
let db = TestDatabase::new("run_reversible_latest_explicit", "migrations_reversible");
// Move to latest, explicitly specified.
db.run_migration(false, Some(20230501000000), false)
@@ -41,10 +38,7 @@ async fn run_reversible_migrations() {
}
// With --target-version, incrementally upgrade.
{
let db = TestDatabase::new(
"migrate_run_reversible_incremental",
"migrations_reversible",
);
let db = TestDatabase::new("run_reversible_incremental", "migrations_reversible");
// First version
db.run_migration(false, Some(20230101000000), false)
@@ -92,7 +86,7 @@ async fn revert_migrations() {
// Without --target-version
{
let db = TestDatabase::new("migrate_revert_incremental", "migrations_reversible");
let db = TestDatabase::new("revert_incremental", "migrations_reversible");
db.run_migration(false, None, false).success();
// Dry-run
@@ -109,7 +103,7 @@ async fn revert_migrations() {
}
// With --target-version
{
let db = TestDatabase::new("migrate_revert_incremental", "migrations_reversible");
let db = TestDatabase::new("revert_incremental", "migrations_reversible");
db.run_migration(false, None, false).success();
// Dry-run downgrade to version 3.
@@ -142,6 +136,32 @@ async fn revert_migrations() {
// Downgrade to zero.
db.run_migration(true, Some(0), false).success();
assert_eq!(db.applied_migrations().await, vec![] as Vec<i64>);
assert_eq!(db.applied_migrations().await, Vec::<i64>::new());
}
}
#[tokio::test]
async fn ignored_chars() {
let mut db = TestDatabase::new("ignored-chars", "ignored-chars/LF");
db.config_path = Some("tests/ignored-chars/sqlx.toml".into());
db.run_migration(false, None, false).success();
db.set_migrations("ignored-chars/CRLF");
let expected_info = "1/installed user\n2/installed post\n3/installed comment\n";
// `ignored-chars` should produce the same migration checksum here
db.migrate_info().success().stdout(expected_info);
// Running migration should be a no-op
db.run_migration(false, None, false).success().stdout("");
db.set_migrations("ignored-chars/BOM");
db.migrate_info().success().stdout(expected_info);
db.run_migration(false, None, false).success().stdout("");
db.set_migrations("ignored-chars/oops-all-tabs");
db.migrate_info().success().stdout(expected_info);
db.run_migration(false, None, false).success().stdout("");
}