feat: create sqlx.toml format (#3383)

* feat: create `sqlx.toml` format

* feat: add support for ignored_chars config to sqlx_core::migrate

* chore: test ignored_chars with `U+FEFF` (ZWNBSP/BOM)

https://en.wikipedia.org/wiki/Byte_order_mark

* refactor: make `Config` always compiled

simplifies usage while still making parsing optional for less generated code

* refactor: add origin information to `Column`

* feat(macros): implement `type_override` and `column_override` from `sqlx.toml`

* refactor(sqlx.toml): make all keys kebab-case, create `macros.preferred-crates`

* feat: make macros aware of `macros.preferred-crates`

* feat: make `sqlx-cli` aware of `database-url-var`

* feat: teach macros about `migrate.table-name`, `migrations-dir`

* feat: teach macros about `migrate.ignored-chars`

* chore: delete unused source file `sqlx-cli/src/migration.rs`

* feat: teach `sqlx-cli` about `migrate.defaults`

* feat: teach `sqlx-cli` about `migrate.migrations-dir`

* feat: teach `sqlx-cli` about `migrate.table-name`

* feat: introduce `migrate.create-schemas`

* WIP feat: create multi-tenant database example

* fix(postgres): don't fetch `ColumnOrigin` for transparently-prepared statements

* feat: progress on axum-multi-tenant example

* feat(config): better errors for mislabeled fields

* WIP feat: filling out axum-multi-tenant example

* feat: multi-tenant example

No longer Axum-based because filling out the request routes would have distracted from the purpose of the example.

* chore(ci): test multi-tenant example

* fixup after merge

* fix(ci): enable `sqlx-toml` in CLI build for examples

* fix: CI, README for `multi-tenant`

* fix: clippy warnings

* fix: multi-tenant README

* fix: sequential versioning inference for migrations

* fix: migration versioning with explicit overrides

* fix: only warn on ambiguous crates if the invocation relies on it

* fix: remove unused imports

* fix: doctest

* fix: `sqlx mig add` behavior and tests

* fix: restore original type-checking order

* fix: deprecation warning in `tests/postgres/macros.rs`

* feat: create postgres/multi-database example

* fix: examples/postgres/multi-database

* fix: cargo fmt

* chore: add tests for config `migrate.defaults`

* fix: sqlx-cli/tests/add.rs

* feat(cli): add `--config` override to all relevant commands

* chore: run `sqlx mig add` test with `RUST_BACKTRACE=1`

* fix: properly canonicalize config path for `sqlx mig add` test

* fix: get `sqlx mig add` test passing

* fix(cli): test `migrate.ignored-chars`, fix bugs

* feat: create `macros.preferred-crates` example

* fix(examples): use workspace `sqlx`

* fix: examples

* fix(sqlite): unexpected feature flags in `type_checking.rs`

* fix: run `cargo fmt`

* fix: more example fixes

* fix(ci): preferred-crates setup

* fix(examples): enable default-features for workspace `sqlx`

* fix(examples): issues in `preferred-crates`

* chore: adjust error message for missing param type in `query!()`

* doc: mention new `sqlx.toml` configuration

* chore: add `CHANGELOG` entry

Normally I generate these when cutting the release, but I wanted to take time to editorialize this one.

* doc: fix new example titles

* refactor: make `sqlx-toml` feature non-default, improve errors

* refactor: eliminate panics in `Config` read path

* chore: remove unused `axum` dependency from new examples

* fix(config): restore fallback to default config for macros

* chore(config): remove use of `once_cell` (to match `main`)
This commit is contained in:
Austin Bonander
2025-06-30 16:34:46 -07:00
committed by GitHub
parent 764ae2f702
commit 25cbeedab4
127 changed files with 6443 additions and 1138 deletions

View File

@@ -1,6 +1,7 @@
use crate::ext::ustr::UStr;
use crate::{PgTypeInfo, Postgres};
use sqlx_core::column::ColumnOrigin;
pub(crate) use sqlx_core::column::{Column, ColumnIndex};
#[derive(Debug, Clone)]
@@ -9,6 +10,10 @@ pub struct PgColumn {
pub(crate) ordinal: usize,
pub(crate) name: UStr,
pub(crate) type_info: PgTypeInfo,
#[cfg_attr(feature = "offline", serde(default))]
pub(crate) origin: ColumnOrigin,
#[cfg_attr(feature = "offline", serde(skip))]
pub(crate) relation_id: Option<crate::types::Oid>,
#[cfg_attr(feature = "offline", serde(skip))]
@@ -51,4 +56,8 @@ impl Column for PgColumn {
fn type_info(&self) -> &PgTypeInfo {
&self.type_info
}
fn origin(&self) -> ColumnOrigin {
self.origin.clone()
}
}

View File

@@ -1,3 +1,4 @@
use crate::connection::TableColumns;
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::io::StatementId;
@@ -11,6 +12,7 @@ use crate::types::Oid;
use crate::HashMap;
use crate::{PgColumn, PgConnection, PgTypeInfo};
use smallvec::SmallVec;
use sqlx_core::column::{ColumnOrigin, TableColumn};
use sqlx_core::query_builder::QueryBuilder;
use std::sync::Arc;
@@ -100,7 +102,8 @@ impl PgConnection {
pub(super) async fn handle_row_description(
&mut self,
desc: Option<RowDescription>,
should_fetch: bool,
fetch_type_info: bool,
fetch_column_description: bool,
) -> Result<(Vec<PgColumn>, HashMap<UStr, usize>), Error> {
let mut columns = Vec::new();
let mut column_names = HashMap::new();
@@ -119,15 +122,25 @@ impl PgConnection {
let name = UStr::from(field.name);
let type_info = self
.maybe_fetch_type_info_by_oid(field.data_type_id, should_fetch)
.maybe_fetch_type_info_by_oid(field.data_type_id, fetch_type_info)
.await?;
let origin = if let (Some(relation_oid), Some(attribute_no)) =
(field.relation_id, field.relation_attribute_no)
{
self.maybe_fetch_column_origin(relation_oid, attribute_no, fetch_column_description)
.await?
} else {
ColumnOrigin::Expression
};
let column = PgColumn {
ordinal: index,
name: name.clone(),
type_info,
relation_id: field.relation_id,
relation_attribute_no: field.relation_attribute_no,
origin,
};
columns.push(column);
@@ -190,6 +203,69 @@ impl PgConnection {
}
}
async fn maybe_fetch_column_origin(
&mut self,
relation_id: Oid,
attribute_no: i16,
should_fetch: bool,
) -> Result<ColumnOrigin, Error> {
if let Some(origin) = self
.inner
.cache_table_to_column_names
.get(&relation_id)
.and_then(|table_columns| {
let column_name = table_columns.columns.get(&attribute_no).cloned()?;
Some(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: column_name,
}))
})
{
return Ok(origin);
}
if !should_fetch {
return Ok(ColumnOrigin::Unknown);
}
// Looking up the table name _may_ end up being redundant,
// but the round-trip to the server is by far the most expensive part anyway.
let Some((table_name, column_name)): Option<(String, String)> = query_as(
// language=PostgreSQL
"SELECT $1::oid::regclass::text, attname \
FROM pg_catalog.pg_attribute \
WHERE attrelid = $1 AND attnum = $2",
)
.bind(relation_id)
.bind(attribute_no)
.fetch_optional(&mut *self)
.await?
else {
// The column/table doesn't exist anymore for whatever reason.
return Ok(ColumnOrigin::Unknown);
};
let table_columns = self
.inner
.cache_table_to_column_names
.entry(relation_id)
.or_insert_with(|| TableColumns {
table_name: table_name.into(),
columns: Default::default(),
});
let column_name = table_columns
.columns
.entry(attribute_no)
.or_insert(column_name.into());
Ok(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: Arc::clone(column_name),
}))
}
async fn fetch_type_by_oid(&mut self, oid: Oid) -> Result<PgTypeInfo, Error> {
let (name, typ_type, category, relation_id, element, base_type): (
String,

View File

@@ -148,6 +148,7 @@ impl PgConnection {
cache_type_oid: HashMap::new(),
cache_type_info: HashMap::new(),
cache_elem_type_to_array: HashMap::new(),
cache_table_to_column_names: HashMap::new(),
log_settings: options.log_settings.clone(),
}),
})

View File

@@ -26,6 +26,7 @@ async fn prepare(
parameters: &[PgTypeInfo],
metadata: Option<Arc<PgStatementMetadata>>,
persistent: bool,
fetch_column_origin: bool,
) -> Result<(StatementId, Arc<PgStatementMetadata>), Error> {
let id = if persistent {
let id = conn.inner.next_statement_id;
@@ -85,7 +86,9 @@ async fn prepare(
let parameters = conn.handle_parameter_description(parameters).await?;
let (columns, column_names) = conn.handle_row_description(rows, true).await?;
let (columns, column_names) = conn
.handle_row_description(rows, true, fetch_column_origin)
.await?;
// ensure that if we did fetch custom data, we wait until we are fully ready before
// continuing
@@ -173,12 +176,21 @@ impl PgConnection {
// optional metadata that was provided by the user, this means they are reusing
// a statement object
metadata: Option<Arc<PgStatementMetadata>>,
fetch_column_origin: bool,
) -> Result<(StatementId, Arc<PgStatementMetadata>), Error> {
if let Some(statement) = self.inner.cache_statement.get_mut(sql) {
return Ok((*statement).clone());
}
let statement = prepare(self, sql, parameters, metadata, persistent).await?;
let statement = prepare(
self,
sql,
parameters,
metadata,
persistent,
fetch_column_origin,
)
.await?;
if persistent && self.inner.cache_statement.is_enabled() {
if let Some((id, _)) = self.inner.cache_statement.insert(sql, statement.clone()) {
@@ -226,7 +238,7 @@ impl PgConnection {
// prepare the statement if this our first time executing it
// always return the statement ID here
let (statement, metadata_) = self
.get_or_prepare(query, &arguments.types, persistent, metadata_opt)
.get_or_prepare(query, &arguments.types, persistent, metadata_opt, false)
.await?;
metadata = metadata_;
@@ -333,7 +345,7 @@ impl PgConnection {
BackendMessageFormat::RowDescription => {
// indicates that a *new* set of rows are about to be returned
let (columns, column_names) = self
.handle_row_description(Some(message.decode()?), false)
.handle_row_description(Some(message.decode()?), false, false)
.await?;
metadata = Arc::new(PgStatementMetadata {
@@ -453,7 +465,9 @@ impl<'c> Executor<'c> for &'c mut PgConnection {
Box::pin(async move {
self.wait_until_ready().await?;
let (_, metadata) = self.get_or_prepare(sql, parameters, true, None).await?;
let (_, metadata) = self
.get_or_prepare(sql, parameters, true, None, true)
.await?;
Ok(PgStatement {
sql: Cow::Borrowed(sql),
@@ -472,7 +486,7 @@ impl<'c> Executor<'c> for &'c mut PgConnection {
Box::pin(async move {
self.wait_until_ready().await?;
let (stmt_id, metadata) = self.get_or_prepare(sql, &[], true, None).await?;
let (stmt_id, metadata) = self.get_or_prepare(sql, &[], true, None, true).await?;
let nullable = self.get_nullable_for_columns(stmt_id, &metadata).await?;

View File

@@ -1,4 +1,5 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fmt::{self, Debug, Formatter};
use std::sync::Arc;
@@ -64,6 +65,7 @@ pub struct PgConnectionInner {
cache_type_info: HashMap<Oid, PgTypeInfo>,
cache_type_oid: HashMap<UStr, Oid>,
cache_elem_type_to_array: HashMap<Oid, Oid>,
cache_table_to_column_names: HashMap<Oid, TableColumns>,
// number of ReadyForQuery messages that we are currently expecting
pub(crate) pending_ready_for_query_count: usize,
@@ -75,6 +77,12 @@ pub struct PgConnectionInner {
log_settings: LogSettings,
}
pub(crate) struct TableColumns {
table_name: Arc<str>,
/// Attribute number -> name.
columns: BTreeMap<i16, Arc<str>>,
}
impl PgConnection {
/// the version number of the server in `libpq` format
pub fn server_version_num(&self) -> Option<u32> {

View File

@@ -111,12 +111,28 @@ impl MigrateDatabase for Postgres {
}
impl Migrate for PgConnection {
fn ensure_migrations_table(&mut self) -> BoxFuture<'_, Result<(), MigrateError>> {
fn create_schema_if_not_exists<'e>(
&'e mut self,
schema_name: &'e str,
) -> BoxFuture<'e, Result<(), MigrateError>> {
Box::pin(async move {
// language=SQL
self.execute(
self.execute(&*format!(r#"CREATE SCHEMA IF NOT EXISTS {schema_name};"#))
.await?;
Ok(())
})
}
fn ensure_migrations_table<'e>(
&'e mut self,
table_name: &'e str,
) -> BoxFuture<'e, Result<(), MigrateError>> {
Box::pin(async move {
// language=SQL
self.execute(&*format!(
r#"
CREATE TABLE IF NOT EXISTS _sqlx_migrations (
CREATE TABLE IF NOT EXISTS {table_name} (
version BIGINT PRIMARY KEY,
description TEXT NOT NULL,
installed_on TIMESTAMPTZ NOT NULL DEFAULT now(),
@@ -124,20 +140,23 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
checksum BYTEA NOT NULL,
execution_time BIGINT NOT NULL
);
"#,
)
"#
))
.await?;
Ok(())
})
}
fn dirty_version(&mut self) -> BoxFuture<'_, Result<Option<i64>, MigrateError>> {
fn dirty_version<'e>(
&'e mut self,
table_name: &'e str,
) -> BoxFuture<'e, Result<Option<i64>, MigrateError>> {
Box::pin(async move {
// language=SQL
let row: Option<(i64,)> = query_as(
"SELECT version FROM _sqlx_migrations WHERE success = false ORDER BY version LIMIT 1",
)
let row: Option<(i64,)> = query_as(&format!(
"SELECT version FROM {table_name} WHERE success = false ORDER BY version LIMIT 1"
))
.fetch_optional(self)
.await?;
@@ -145,15 +164,17 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
})
}
fn list_applied_migrations(
&mut self,
) -> BoxFuture<'_, Result<Vec<AppliedMigration>, MigrateError>> {
fn list_applied_migrations<'e>(
&'e mut self,
table_name: &'e str,
) -> BoxFuture<'e, Result<Vec<AppliedMigration>, MigrateError>> {
Box::pin(async move {
// language=SQL
let rows: Vec<(i64, Vec<u8>)> =
query_as("SELECT version, checksum FROM _sqlx_migrations ORDER BY version")
.fetch_all(self)
.await?;
let rows: Vec<(i64, Vec<u8>)> = query_as(&format!(
"SELECT version, checksum FROM {table_name} ORDER BY version"
))
.fetch_all(self)
.await?;
let migrations = rows
.into_iter()
@@ -203,16 +224,17 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
})
}
fn apply<'e: 'm, 'm>(
fn apply<'e>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
table_name: &'e str,
migration: &'e Migration,
) -> BoxFuture<'e, Result<Duration, MigrateError>> {
Box::pin(async move {
let start = Instant::now();
// execute migration queries
if migration.no_tx {
execute_migration(self, migration).await?;
execute_migration(self, table_name, migration).await?;
} else {
// Use a single transaction for the actual migration script and the essential bookeeping so we never
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
@@ -220,7 +242,7 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
// data lineage and debugging reasons, so it is not super important if it is lost. So we initialize it to -1
// and update it once the actual transaction completed.
let mut tx = self.begin().await?;
execute_migration(&mut tx, migration).await?;
execute_migration(&mut tx, table_name, migration).await?;
tx.commit().await?;
}
@@ -231,13 +253,13 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
// language=SQL
#[allow(clippy::cast_possible_truncation)]
let _ = query(
let _ = query(&format!(
r#"
UPDATE _sqlx_migrations
UPDATE {table_name}
SET execution_time = $1
WHERE version = $2
"#,
)
"#
))
.bind(elapsed.as_nanos() as i64)
.bind(migration.version)
.execute(self)
@@ -247,21 +269,22 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
})
}
fn revert<'e: 'm, 'm>(
fn revert<'e>(
&'e mut self,
migration: &'m Migration,
) -> BoxFuture<'m, Result<Duration, MigrateError>> {
table_name: &'e str,
migration: &'e Migration,
) -> BoxFuture<'e, Result<Duration, MigrateError>> {
Box::pin(async move {
let start = Instant::now();
// execute migration queries
if migration.no_tx {
revert_migration(self, migration).await?;
revert_migration(self, table_name, migration).await?;
} else {
// Use a single transaction for the actual migration script and the essential bookeeping so we never
// execute migrations twice. See https://github.com/launchbadge/sqlx/issues/1966.
let mut tx = self.begin().await?;
revert_migration(&mut tx, migration).await?;
revert_migration(&mut tx, table_name, migration).await?;
tx.commit().await?;
}
@@ -274,6 +297,7 @@ CREATE TABLE IF NOT EXISTS _sqlx_migrations (
async fn execute_migration(
conn: &mut PgConnection,
table_name: &str,
migration: &Migration,
) -> Result<(), MigrateError> {
let _ = conn
@@ -282,12 +306,12 @@ async fn execute_migration(
.map_err(|e| MigrateError::ExecuteMigration(e, migration.version))?;
// language=SQL
let _ = query(
let _ = query(&format!(
r#"
INSERT INTO _sqlx_migrations ( version, description, success, checksum, execution_time )
INSERT INTO {table_name} ( version, description, success, checksum, execution_time )
VALUES ( $1, $2, TRUE, $3, -1 )
"#,
)
"#
))
.bind(migration.version)
.bind(&*migration.description)
.bind(&*migration.checksum)
@@ -299,6 +323,7 @@ async fn execute_migration(
async fn revert_migration(
conn: &mut PgConnection,
table_name: &str,
migration: &Migration,
) -> Result<(), MigrateError> {
let _ = conn
@@ -307,7 +332,7 @@ async fn revert_migration(
.map_err(|e| MigrateError::ExecuteMigration(e, migration.version))?;
// language=SQL
let _ = query(r#"DELETE FROM _sqlx_migrations WHERE version = $1"#)
let _ = query(&format!(r#"DELETE FROM {table_name} WHERE version = $1"#))
.bind(migration.version)
.execute(conn)
.await?;

View File

@@ -49,42 +49,6 @@ impl_type_checking!(
#[cfg(feature = "uuid")]
sqlx::types::Uuid,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::types::chrono::NaiveTime,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::types::chrono::NaiveDate,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::types::chrono::NaiveDateTime,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc> | sqlx::types::chrono::DateTime<_>,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::postgres::types::PgTimeTz<sqlx::types::chrono::NaiveTime, sqlx::types::chrono::FixedOffset>,
#[cfg(feature = "time")]
sqlx::types::time::Time,
#[cfg(feature = "time")]
sqlx::types::time::Date,
#[cfg(feature = "time")]
sqlx::types::time::PrimitiveDateTime,
#[cfg(feature = "time")]
sqlx::types::time::OffsetDateTime,
#[cfg(feature = "time")]
sqlx::postgres::types::PgTimeTz<sqlx::types::time::Time, sqlx::types::time::UtcOffset>,
#[cfg(feature = "bigdecimal")]
sqlx::types::BigDecimal,
#[cfg(feature = "rust_decimal")]
sqlx::types::Decimal,
#[cfg(feature = "ipnetwork")]
sqlx::types::ipnetwork::IpNetwork,
@@ -119,36 +83,6 @@ impl_type_checking!(
#[cfg(feature = "uuid")]
Vec<sqlx::types::Uuid> | &[sqlx::types::Uuid],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::types::chrono::NaiveTime> | &[sqlx::types::chrono::NaiveTime],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::types::chrono::NaiveDate> | &[sqlx::types::chrono::NaiveDate],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::types::chrono::NaiveDateTime> | &[sqlx::types::chrono::NaiveDateTime],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>> | &[sqlx::types::chrono::DateTime<_>],
#[cfg(feature = "time")]
Vec<sqlx::types::time::Time> | &[sqlx::types::time::Time],
#[cfg(feature = "time")]
Vec<sqlx::types::time::Date> | &[sqlx::types::time::Date],
#[cfg(feature = "time")]
Vec<sqlx::types::time::PrimitiveDateTime> | &[sqlx::types::time::PrimitiveDateTime],
#[cfg(feature = "time")]
Vec<sqlx::types::time::OffsetDateTime> | &[sqlx::types::time::OffsetDateTime],
#[cfg(feature = "bigdecimal")]
Vec<sqlx::types::BigDecimal> | &[sqlx::types::BigDecimal],
#[cfg(feature = "rust_decimal")]
Vec<sqlx::types::Decimal> | &[sqlx::types::Decimal],
#[cfg(feature = "ipnetwork")]
Vec<sqlx::types::ipnetwork::IpNetwork> | &[sqlx::types::ipnetwork::IpNetwork],
@@ -168,72 +102,114 @@ impl_type_checking!(
sqlx::postgres::types::PgRange<i32>,
sqlx::postgres::types::PgRange<i64>,
#[cfg(feature = "bigdecimal")]
sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>,
#[cfg(feature = "rust_decimal")]
sqlx::postgres::types::PgRange<sqlx::types::Decimal>,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>,
#[cfg(all(feature = "chrono", not(feature = "time")))]
sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>> |
sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<_>>,
#[cfg(feature = "time")]
sqlx::postgres::types::PgRange<sqlx::types::time::Date>,
#[cfg(feature = "time")]
sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>,
#[cfg(feature = "time")]
sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>,
// Range arrays
Vec<sqlx::postgres::types::PgRange<i32>> | &[sqlx::postgres::types::PgRange<i32>],
Vec<sqlx::postgres::types::PgRange<i64>> | &[sqlx::postgres::types::PgRange<i64>],
#[cfg(feature = "bigdecimal")]
Vec<sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>> |
&[sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>],
#[cfg(feature = "rust_decimal")]
Vec<sqlx::postgres::types::PgRange<sqlx::types::Decimal>> |
&[sqlx::postgres::types::PgRange<sqlx::types::Decimal>],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<_>>],
#[cfg(all(feature = "chrono", not(feature = "time")))]
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<_>>],
#[cfg(feature = "time")]
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::Date>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::Date>],
#[cfg(feature = "time")]
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>],
#[cfg(feature = "time")]
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>],
},
ParamChecking::Strong,
feature-types: info => info.__type_feature_gate(),
// The expansion of the macro automatically applies the correct feature name
// and checks `[macros.preferred-crates]`
datetime-types: {
chrono: {
// Scalar types
sqlx::types::chrono::NaiveTime,
sqlx::types::chrono::NaiveDate,
sqlx::types::chrono::NaiveDateTime,
sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc> | sqlx::types::chrono::DateTime<_>,
sqlx::postgres::types::PgTimeTz<sqlx::types::chrono::NaiveTime, sqlx::types::chrono::FixedOffset>,
// Array types
Vec<sqlx::types::chrono::NaiveTime> | &[sqlx::types::chrono::NaiveTime],
Vec<sqlx::types::chrono::NaiveDate> | &[sqlx::types::chrono::NaiveDate],
Vec<sqlx::types::chrono::NaiveDateTime> | &[sqlx::types::chrono::NaiveDateTime],
Vec<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>> | &[sqlx::types::chrono::DateTime<_>],
// Range types
sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>,
sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>,
sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>> |
sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<_>>,
// Arrays of ranges
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDate>],
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::NaiveDateTime>],
Vec<sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<sqlx::types::chrono::Utc>>> |
&[sqlx::postgres::types::PgRange<sqlx::types::chrono::DateTime<_>>],
},
time: {
// Scalar types
sqlx::types::time::Time,
sqlx::types::time::Date,
sqlx::types::time::PrimitiveDateTime,
sqlx::types::time::OffsetDateTime,
sqlx::postgres::types::PgTimeTz<sqlx::types::time::Time, sqlx::types::time::UtcOffset>,
// Array types
Vec<sqlx::types::time::Time> | &[sqlx::types::time::Time],
Vec<sqlx::types::time::Date> | &[sqlx::types::time::Date],
Vec<sqlx::types::time::PrimitiveDateTime> | &[sqlx::types::time::PrimitiveDateTime],
Vec<sqlx::types::time::OffsetDateTime> | &[sqlx::types::time::OffsetDateTime],
// Range types
sqlx::postgres::types::PgRange<sqlx::types::time::Date>,
sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>,
sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>,
// Arrays of ranges
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::Date>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::Date>],
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::PrimitiveDateTime>],
Vec<sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>> |
&[sqlx::postgres::types::PgRange<sqlx::types::time::OffsetDateTime>],
},
},
numeric-types: {
bigdecimal: {
sqlx::types::BigDecimal,
Vec<sqlx::types::BigDecimal> | &[sqlx::types::BigDecimal],
sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>,
Vec<sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>> |
&[sqlx::postgres::types::PgRange<sqlx::types::BigDecimal>],
},
rust_decimal: {
sqlx::types::Decimal,
Vec<sqlx::types::Decimal> | &[sqlx::types::Decimal],
sqlx::postgres::types::PgRange<sqlx::types::Decimal>,
Vec<sqlx::postgres::types::PgRange<sqlx::types::Decimal>> |
&[sqlx::postgres::types::PgRange<sqlx::types::Decimal>],
},
},
);