breaking: make offline optional to allow building without serde (#4077)

* refactor(postgres): split describe into resolve and explain

* refactor(postgres): remove duplicated code from resolve and explain

* breaking: make `offline` optional to allow building without `serde`
This commit is contained in:
Cathal 2026-02-06 01:06:30 +00:00 committed by GitHub
parent 9645a9ad8d
commit 7248f64244
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 602 additions and 546 deletions

6
Cargo.lock generated
View File

@ -1421,9 +1421,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.2.1"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
dependencies = [
"percent-encoding",
]
@ -3986,6 +3986,7 @@ dependencies = [
"atoi",
"chrono",
"flume",
"form_urlencoded",
"futures-channel",
"futures-core",
"futures-executor",
@ -3996,7 +3997,6 @@ dependencies = [
"percent-encoding",
"regex",
"serde",
"serde_urlencoded",
"sqlx",
"sqlx-core",
"thiserror 2.0.17",

View File

@ -63,7 +63,7 @@ rustdoc-args = ["--cfg", "docsrs"]
default = ["any", "macros", "migrate", "json"]
derive = ["sqlx-macros/derive"]
macros = ["derive", "sqlx-macros/macros"]
macros = ["derive", "sqlx-macros/macros", "sqlx-core/offline", "sqlx-mysql?/offline", "sqlx-postgres?/offline", "sqlx-sqlite?/offline"]
migrate = ["sqlx-core/migrate", "sqlx-macros?/migrate", "sqlx-mysql?/migrate", "sqlx-postgres?/migrate", "sqlx-sqlite?/migrate"]
# Enable parsing of `sqlx.toml` for configuring macros and migrations.
@ -211,7 +211,7 @@ features = ["time", "net", "sync", "fs", "io-util", "rt"]
default-features = false
[dependencies]
sqlx-core = { workspace = true, features = ["offline", "migrate"] }
sqlx-core = { workspace = true, features = ["migrate"] }
sqlx-macros = { workspace = true, optional = true }
sqlx-mysql = { workspace = true, optional = true }

View File

@ -1,5 +1,4 @@
use crate::any::{Any, AnyArguments, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo};
use crate::describe::Describe;
use crate::any::{AnyArguments, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo};
use crate::sql_str::SqlStr;
use either::Either;
use futures_core::future::BoxFuture;
@ -114,5 +113,9 @@ pub trait AnyConnectionBackend: std::any::Any + Debug + Send + 'static {
parameters: &[AnyTypeInfo],
) -> BoxFuture<'c, crate::Result<AnyStatement>>;
fn describe(&mut self, sql: SqlStr) -> BoxFuture<'_, crate::Result<Describe<Any>>>;
#[cfg(feature = "offline")]
fn describe(
&mut self,
sql: SqlStr,
) -> BoxFuture<'_, crate::Result<crate::describe::Describe<crate::any::Any>>>;
}

View File

@ -1,5 +1,4 @@
use crate::any::{Any, AnyConnection, AnyQueryResult, AnyRow, AnyStatement, AnyTypeInfo};
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::sql_str::SqlStr;
@ -56,7 +55,11 @@ impl<'c> Executor<'c> for &'c mut AnyConnection {
self.backend.prepare_with(sql, parameters)
}
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<Self::Database>, Error>>
where
'c: 'e,
{

View File

@ -1,5 +1,4 @@
use crate::database::Database;
use crate::describe::Describe;
use crate::error::{BoxDynError, Error};
use crate::sql_str::{SqlSafeStr, SqlStr};
@ -178,7 +177,11 @@ pub trait Executor<'c>: Send + Debug + Sized {
/// This is used by compile-time verification in the query macros to
/// power their type inference.
#[doc(hidden)]
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<Self::Database>, Error>>
where
'c: 'e;
}

View File

@ -4,7 +4,6 @@ use futures_core::stream::BoxStream;
use futures_util::TryStreamExt;
use crate::database::Database;
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::pool::Pool;
@ -63,7 +62,11 @@ where
}
#[doc(hidden)]
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>> {
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<Self::Database>, Error>> {
let pool = self.clone();
Box::pin(async move { pool.acquire().await?.describe(sql).await })
@ -127,6 +130,7 @@ where
// }
//
// #[doc(hidden)]
// #[cfg(feature = "offline")]
// #[inline]
// fn describe<'e, 'q: 'e>(
// self,

View File

@ -189,6 +189,7 @@ where
// }
//
// #[doc(hidden)]
// #[cfg(feature = "offline")]
// fn describe<'e, 'q: 'e>(
// self,
// query: &'q str,

View File

@ -12,7 +12,7 @@ rust-version.workspace = true
[features]
json = ["sqlx-core/json", "serde"]
any = ["sqlx-core/any"]
offline = ["sqlx-core/offline", "serde/derive"]
offline = ["sqlx-core/offline", "serde/derive", "bitflags/serde"]
migrate = ["sqlx-core/migrate"]
# Type Integration features
@ -52,7 +52,7 @@ uuid = { workspace = true, optional = true }
# Misc
atoi = "2.0"
base64 = { version = "0.22.0", default-features = false, features = ["std"] }
bitflags = { version = "2", default-features = false, features = ["serde"] }
bitflags = { version = "2", default-features = false }
byteorder = { version = "1.4.3", default-features = false, features = ["std"] }
bytes = "1.1.0"
either = "1.6.1"

View File

@ -8,12 +8,11 @@ use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use futures_util::{stream, FutureExt, StreamExt, TryFutureExt, TryStreamExt};
use sqlx_core::any::{
Any, AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyStatement, AnyTypeInfo, AnyTypeInfoKind,
};
use sqlx_core::connection::Connection;
use sqlx_core::database::Database;
use sqlx_core::describe::Describe;
use sqlx_core::executor::Executor;
use sqlx_core::sql_str::SqlStr;
use sqlx_core::transaction::TransactionManager;
@ -141,7 +140,11 @@ impl AnyConnectionBackend for MySqlConnection {
})
}
fn describe(&mut self, sql: SqlStr) -> BoxFuture<'_, sqlx_core::Result<Describe<Any>>> {
#[cfg(feature = "offline")]
fn describe(
&mut self,
sql: SqlStr,
) -> BoxFuture<'_, sqlx_core::Result<sqlx_core::describe::Describe<sqlx_core::any::Any>>> {
Box::pin(async move {
let describe = Executor::describe(self, sql).await?;
describe.try_into_any()

View File

@ -13,6 +13,7 @@ pub struct MySqlColumn {
#[cfg_attr(feature = "offline", serde(default))]
pub(crate) origin: ColumnOrigin,
#[allow(unused)]
#[cfg_attr(feature = "offline", serde(skip))]
pub(crate) flags: Option<ColumnFlags>,
}

View File

@ -1,6 +1,5 @@
use super::MySqlStream;
use crate::connection::stream::Waiting;
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::ext::ustr::UStr;
@ -10,7 +9,7 @@ use crate::protocol::response::Status;
use crate::protocol::statement::{
BinaryRow, Execute as StatementExecute, Prepare, PrepareOk, StmtClose,
};
use crate::protocol::text::{ColumnDefinition, ColumnFlags, Query, TextRow};
use crate::protocol::text::{ColumnDefinition, Query, TextRow};
use crate::statement::{MySqlStatement, MySqlStatementMetadata};
use crate::HashMap;
use crate::{
@ -359,7 +358,11 @@ impl<'c> Executor<'c> for &'c mut MySqlConnection {
}
#[doc(hidden)]
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<MySql>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<MySql>, Error>>
where
'c: 'e,
{
@ -379,11 +382,11 @@ impl<'c> Executor<'c> for &'c mut MySqlConnection {
.iter()
.map(|col| {
col.flags
.map(|flags| !flags.contains(ColumnFlags::NOT_NULL))
.map(|flags| !flags.contains(crate::protocol::text::ColumnFlags::NOT_NULL))
})
.collect();
Ok(Describe {
Ok(crate::describe::Describe {
parameters: Some(Either::Right(metadata.parameters)),
columns,
nullable,

View File

@ -11,9 +11,9 @@ rust-version.workspace = true
[features]
any = ["sqlx-core/any"]
json = ["sqlx-core/json"]
json = ["dep:serde", "dep:serde_json", "sqlx-core/json"]
migrate = ["sqlx-core/migrate"]
offline = ["sqlx-core/offline"]
offline = ["json", "sqlx-core/offline", "smallvec/serde"]
# Type Integration features
bigdecimal = ["dep:bigdecimal", "dep:num-bigint", "sqlx-core/bigdecimal"]
@ -62,7 +62,7 @@ itoa = "1.0.1"
log = "0.4.18"
memchr = { version = "2.4.1", default-features = false }
num-bigint = { version = "0.4.3", optional = true }
smallvec = { version = "1.7.0", features = ["serde"] }
smallvec = { version = "1.7.0" }
stringprep = "0.1.2"
tracing = { version = "0.1.37", features = ["log"] }
whoami = { version = "2.0.2", default-features = false }
@ -70,13 +70,11 @@ whoami = { version = "2.0.2", default-features = false }
dotenvy.workspace = true
thiserror.workspace = true
serde = { version = "1.0.144", features = ["derive"] }
serde_json = { version = "1.0.85", features = ["raw_value"] }
serde = { version = "1.0.144", optional = true, features = ["derive"] }
serde_json = { version = "1.0.85", optional = true, features = ["raw_value"] }
[dependencies.sqlx-core]
workspace = true
# We use JSON in the driver implementation itself so there's no reason not to enable it here.
features = ["json"]
[dev-dependencies.sqlx]
# FIXME: https://github.com/rust-lang/cargo/issues/15622

View File

@ -9,14 +9,13 @@ use sqlx_core::sql_str::SqlStr;
use std::{future, pin::pin};
use sqlx_core::any::{
Any, AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyStatement, AnyTypeInfo, AnyTypeInfoKind,
};
use crate::type_info::PgType;
use sqlx_core::connection::Connection;
use sqlx_core::database::Database;
use sqlx_core::describe::Describe;
use sqlx_core::executor::Executor;
use sqlx_core::ext::ustr::UStr;
use sqlx_core::transaction::TransactionManager;
@ -141,7 +140,11 @@ impl AnyConnectionBackend for PgConnection {
})
}
fn describe<'c>(&mut self, sql: SqlStr) -> BoxFuture<'_, sqlx_core::Result<Describe<Any>>> {
#[cfg(feature = "offline")]
fn describe<'c>(
&mut self,
sql: SqlStr,
) -> BoxFuture<'_, sqlx_core::Result<sqlx_core::describe::Describe<sqlx_core::any::Any>>> {
Box::pin(async move {
let describe = Executor::describe(self, sql).await?;
@ -172,7 +175,7 @@ impl AnyConnectionBackend for PgConnection {
None => None,
};
Ok(Describe {
Ok(sqlx_core::describe::Describe {
columns,
parameters,
nullable: describe.nullable,

View File

@ -1,494 +1,14 @@
use crate::connection::TableColumns;
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::io::StatementId;
use crate::message::{ParameterDescription, RowDescription};
use crate::query_as::query_as;
use crate::query_scalar::query_scalar;
use crate::statement::PgStatementMetadata;
use crate::type_info::{PgArrayOf, PgCustomType, PgType, PgTypeKind};
use crate::types::Json;
use crate::types::Oid;
use crate::HashMap;
use crate::{PgColumn, PgConnection, PgTypeInfo};
use crate::PgConnection;
use smallvec::SmallVec;
use sqlx_core::column::{ColumnOrigin, TableColumn};
use sqlx_core::query_builder::QueryBuilder;
use sqlx_core::sql_str::AssertSqlSafe;
use std::sync::Arc;
/// Describes the type of the `pg_type.typtype` column
///
/// See <https://www.postgresql.org/docs/13/catalog-pg-type.html>
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum TypType {
Base,
Composite,
Domain,
Enum,
Pseudo,
Range,
}
impl TryFrom<i8> for TypType {
type Error = ();
fn try_from(t: i8) -> Result<Self, Self::Error> {
let t = u8::try_from(t).or(Err(()))?;
let t = match t {
b'b' => Self::Base,
b'c' => Self::Composite,
b'd' => Self::Domain,
b'e' => Self::Enum,
b'p' => Self::Pseudo,
b'r' => Self::Range,
_ => return Err(()),
};
Ok(t)
}
}
/// Describes the type of the `pg_type.typcategory` column
///
/// See <https://www.postgresql.org/docs/13/catalog-pg-type.html#CATALOG-TYPCATEGORY-TABLE>
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum TypCategory {
Array,
Boolean,
Composite,
DateTime,
Enum,
Geometric,
Network,
Numeric,
Pseudo,
Range,
String,
Timespan,
User,
BitString,
Unknown,
}
impl TryFrom<i8> for TypCategory {
type Error = ();
fn try_from(c: i8) -> Result<Self, Self::Error> {
let c = u8::try_from(c).or(Err(()))?;
let c = match c {
b'A' => Self::Array,
b'B' => Self::Boolean,
b'C' => Self::Composite,
b'D' => Self::DateTime,
b'E' => Self::Enum,
b'G' => Self::Geometric,
b'I' => Self::Network,
b'N' => Self::Numeric,
b'P' => Self::Pseudo,
b'R' => Self::Range,
b'S' => Self::String,
b'T' => Self::Timespan,
b'U' => Self::User,
b'V' => Self::BitString,
b'X' => Self::Unknown,
_ => return Err(()),
};
Ok(c)
}
}
impl PgConnection {
pub(super) async fn handle_row_description(
&mut self,
desc: Option<RowDescription>,
fetch_type_info: bool,
fetch_column_description: bool,
) -> Result<(Vec<PgColumn>, HashMap<UStr, usize>), Error> {
let mut columns = Vec::new();
let mut column_names = HashMap::new();
let desc = if let Some(desc) = desc {
desc
} else {
// no rows
return Ok((columns, column_names));
};
columns.reserve(desc.fields.len());
column_names.reserve(desc.fields.len());
for (index, field) in desc.fields.into_iter().enumerate() {
let name = UStr::from(field.name);
let type_info = self
.maybe_fetch_type_info_by_oid(field.data_type_id, fetch_type_info)
.await?;
let origin = if let (Some(relation_oid), Some(attribute_no)) =
(field.relation_id, field.relation_attribute_no)
{
self.maybe_fetch_column_origin(relation_oid, attribute_no, fetch_column_description)
.await?
} else {
ColumnOrigin::Expression
};
let column = PgColumn {
ordinal: index,
name: name.clone(),
type_info,
relation_id: field.relation_id,
relation_attribute_no: field.relation_attribute_no,
origin,
};
columns.push(column);
column_names.insert(name, index);
}
Ok((columns, column_names))
}
pub(super) async fn handle_parameter_description(
&mut self,
desc: ParameterDescription,
) -> Result<Vec<PgTypeInfo>, Error> {
let mut params = Vec::with_capacity(desc.types.len());
for ty in desc.types {
params.push(self.maybe_fetch_type_info_by_oid(ty, true).await?);
}
Ok(params)
}
async fn maybe_fetch_type_info_by_oid(
&mut self,
oid: Oid,
should_fetch: bool,
) -> Result<PgTypeInfo, Error> {
// first we check if this is a built-in type
// in the average application, the vast majority of checks should flow through this
if let Some(info) = PgTypeInfo::try_from_oid(oid) {
return Ok(info);
}
// next we check a local cache for user-defined type names <-> object id
if let Some(info) = self.inner.cache_type_info.get(&oid) {
return Ok(info.clone());
}
// fallback to asking the database directly for a type name
if should_fetch {
// we're boxing this future here so we can use async recursion
let info = Box::pin(async { self.fetch_type_by_oid(oid).await }).await?;
// cache the type name <-> oid relationship in a paired hashmap
// so we don't come down this road again
self.inner.cache_type_info.insert(oid, info.clone());
self.inner
.cache_type_oid
.insert(info.0.name().to_string().into(), oid);
Ok(info)
} else {
// we are not in a place that *can* run a query
// this generally means we are in the middle of another query
// this _should_ only happen for complex types sent through the TEXT protocol
// we're open to ideas to correct this.. but it'd probably be more efficient to figure
// out a way to "prime" the type cache for connections rather than make this
// fallback work correctly for complex user-defined types for the TEXT protocol
Ok(PgTypeInfo(PgType::DeclareWithOid(oid)))
}
}
async fn maybe_fetch_column_origin(
&mut self,
relation_id: Oid,
attribute_no: i16,
should_fetch: bool,
) -> Result<ColumnOrigin, Error> {
if let Some(origin) = self
.inner
.cache_table_to_column_names
.get(&relation_id)
.and_then(|table_columns| {
let column_name = table_columns.columns.get(&attribute_no).cloned()?;
Some(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: column_name,
}))
})
{
return Ok(origin);
}
if !should_fetch {
return Ok(ColumnOrigin::Unknown);
}
// Looking up the table name _may_ end up being redundant,
// but the round-trip to the server is by far the most expensive part anyway.
let Some((table_name, column_name)): Option<(String, String)> = query_as(
// language=PostgreSQL
"SELECT $1::oid::regclass::text, attname \
FROM pg_catalog.pg_attribute \
WHERE attrelid = $1 AND attnum = $2",
)
.bind(relation_id)
.bind(attribute_no)
.fetch_optional(&mut *self)
.await?
else {
// The column/table doesn't exist anymore for whatever reason.
return Ok(ColumnOrigin::Unknown);
};
let table_columns = self
.inner
.cache_table_to_column_names
.entry(relation_id)
.or_insert_with(|| TableColumns {
table_name: table_name.into(),
columns: Default::default(),
});
let column_name = table_columns
.columns
.entry(attribute_no)
.or_insert(column_name.into());
Ok(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: Arc::clone(column_name),
}))
}
async fn fetch_type_by_oid(&mut self, oid: Oid) -> Result<PgTypeInfo, Error> {
let (name, typ_type, category, relation_id, element, base_type): (
String,
i8,
i8,
Oid,
Oid,
Oid,
) = query_as(
// Converting the OID to `regtype` and then `text` will give us the name that
// the type will need to be found at by search_path.
"SELECT oid::regtype::text, \
typtype, \
typcategory, \
typrelid, \
typelem, \
typbasetype \
FROM pg_catalog.pg_type \
WHERE oid = $1",
)
.bind(oid)
.fetch_one(&mut *self)
.await?;
let typ_type = TypType::try_from(typ_type);
let category = TypCategory::try_from(category);
match (typ_type, category) {
(Ok(TypType::Domain), _) => self.fetch_domain_by_oid(oid, base_type, name).await,
(Ok(TypType::Base), Ok(TypCategory::Array)) => {
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Array(
self.maybe_fetch_type_info_by_oid(element, true).await?,
),
name: name.into(),
oid,
}))))
}
(Ok(TypType::Pseudo), Ok(TypCategory::Pseudo)) => {
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Pseudo,
name: name.into(),
oid,
}))))
}
(Ok(TypType::Range), Ok(TypCategory::Range)) => {
self.fetch_range_by_oid(oid, name).await
}
(Ok(TypType::Enum), Ok(TypCategory::Enum)) => self.fetch_enum_by_oid(oid, name).await,
(Ok(TypType::Composite), Ok(TypCategory::Composite)) => {
self.fetch_composite_by_oid(oid, relation_id, name).await
}
_ => Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Simple,
name: name.into(),
oid,
})))),
}
}
async fn fetch_enum_by_oid(&mut self, oid: Oid, name: String) -> Result<PgTypeInfo, Error> {
let variants: Vec<String> = query_scalar(
r#"
SELECT enumlabel
FROM pg_catalog.pg_enum
WHERE enumtypid = $1
ORDER BY enumsortorder
"#,
)
.bind(oid)
.fetch_all(self)
.await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Enum(Arc::from(variants)),
}))))
}
async fn fetch_composite_by_oid(
&mut self,
oid: Oid,
relation_id: Oid,
name: String,
) -> Result<PgTypeInfo, Error> {
let raw_fields: Vec<(String, Oid)> = query_as(
r#"
SELECT attname, atttypid
FROM pg_catalog.pg_attribute
WHERE attrelid = $1
AND NOT attisdropped
AND attnum > 0
ORDER BY attnum
"#,
)
.bind(relation_id)
.fetch_all(&mut *self)
.await?;
let mut fields = Vec::new();
for (field_name, field_oid) in raw_fields.into_iter() {
let field_type = self.maybe_fetch_type_info_by_oid(field_oid, true).await?;
fields.push((field_name, field_type));
}
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Composite(Arc::from(fields)),
}))))
}
async fn fetch_domain_by_oid(
&mut self,
oid: Oid,
base_type: Oid,
name: String,
) -> Result<PgTypeInfo, Error> {
let base_type = self.maybe_fetch_type_info_by_oid(base_type, true).await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Domain(base_type),
}))))
}
async fn fetch_range_by_oid(&mut self, oid: Oid, name: String) -> Result<PgTypeInfo, Error> {
let element_oid: Oid = query_scalar(
r#"
SELECT rngsubtype
FROM pg_catalog.pg_range
WHERE rngtypid = $1
"#,
)
.bind(oid)
.fetch_one(&mut *self)
.await?;
let element = self.maybe_fetch_type_info_by_oid(element_oid, true).await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Range(element),
name: name.into(),
oid,
}))))
}
pub(crate) async fn resolve_type_id(&mut self, ty: &PgType) -> Result<Oid, Error> {
if let Some(oid) = ty.try_oid() {
return Ok(oid);
}
match ty {
PgType::DeclareWithName(name) => self.fetch_type_id_by_name(name).await,
PgType::DeclareArrayOf(array) => self.fetch_array_type_id(array).await,
// `.try_oid()` should return `Some()` or it should be covered here
_ => unreachable!("(bug) OID should be resolvable for type {ty:?}"),
}
}
pub(crate) async fn fetch_type_id_by_name(&mut self, name: &str) -> Result<Oid, Error> {
if let Some(oid) = self.inner.cache_type_oid.get(name) {
return Ok(*oid);
}
// language=SQL
let (oid,): (Oid,) = query_as("SELECT $1::regtype::oid")
.bind(name)
.fetch_optional(&mut *self)
.await?
.ok_or_else(|| Error::TypeNotFound {
type_name: name.into(),
})?;
self.inner
.cache_type_oid
.insert(name.to_string().into(), oid);
Ok(oid)
}
pub(crate) async fn fetch_array_type_id(&mut self, array: &PgArrayOf) -> Result<Oid, Error> {
if let Some(oid) = self
.inner
.cache_type_oid
.get(&array.elem_name)
.and_then(|elem_oid| self.inner.cache_elem_type_to_array.get(elem_oid))
{
return Ok(*oid);
}
// language=SQL
let (elem_oid, array_oid): (Oid, Oid) =
query_as("SELECT oid, typarray FROM pg_catalog.pg_type WHERE oid = $1::regtype::oid")
.bind(&*array.elem_name)
.fetch_optional(&mut *self)
.await?
.ok_or_else(|| Error::TypeNotFound {
type_name: array.name.to_string(),
})?;
// Avoids copying `elem_name` until necessary
self.inner
.cache_type_oid
.entry_ref(&array.elem_name)
.insert(elem_oid);
self.inner
.cache_elem_type_to_array
.insert(elem_oid, array_oid);
Ok(array_oid)
}
/// Check whether EXPLAIN statements are supported by the current connection
fn is_explain_available(&self) -> bool {
let parameter_statuses = &self.inner.stream.parameter_statuses;

View File

@ -1,4 +1,3 @@
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::io::{PortalId, StatementId};
@ -475,7 +474,11 @@ impl<'c> Executor<'c> for &'c mut PgConnection {
})
}
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<Self::Database>, Error>>
where
'c: 'e,
{
@ -488,7 +491,7 @@ impl<'c> Executor<'c> for &'c mut PgConnection {
let nullable = self.get_nullable_for_columns(stmt_id, &metadata).await?;
Ok(Describe {
Ok(crate::describe::Describe {
columns: metadata.columns.clone(),
nullable,
parameters: Some(Either::Left(metadata.parameters.clone())),

View File

@ -23,9 +23,11 @@ use sqlx_core::sql_str::SqlSafeStr;
pub use self::stream::PgStream;
pub(crate) mod describe;
#[cfg(feature = "offline")]
mod describe;
mod establish;
mod executor;
mod resolve;
mod sasl;
mod stream;
mod tls;

View File

@ -0,0 +1,485 @@
use crate::connection::TableColumns;
use crate::error::Error;
use crate::ext::ustr::UStr;
use crate::message::{ParameterDescription, RowDescription};
use crate::query_as::query_as;
use crate::query_scalar::query_scalar;
use crate::type_info::{PgArrayOf, PgCustomType, PgType, PgTypeKind};
use crate::types::Oid;
use crate::HashMap;
use crate::{PgColumn, PgConnection, PgTypeInfo};
use sqlx_core::column::{ColumnOrigin, TableColumn};
use std::sync::Arc;
/// Describes the type of the `pg_type.typtype` column
///
/// See <https://www.postgresql.org/docs/13/catalog-pg-type.html>
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum TypType {
Base,
Composite,
Domain,
Enum,
Pseudo,
Range,
}
impl TryFrom<i8> for TypType {
type Error = ();
fn try_from(t: i8) -> Result<Self, Self::Error> {
let t = u8::try_from(t).or(Err(()))?;
let t = match t {
b'b' => Self::Base,
b'c' => Self::Composite,
b'd' => Self::Domain,
b'e' => Self::Enum,
b'p' => Self::Pseudo,
b'r' => Self::Range,
_ => return Err(()),
};
Ok(t)
}
}
/// Describes the type of the `pg_type.typcategory` column
///
/// See <https://www.postgresql.org/docs/13/catalog-pg-type.html#CATALOG-TYPCATEGORY-TABLE>
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum TypCategory {
Array,
Boolean,
Composite,
DateTime,
Enum,
Geometric,
Network,
Numeric,
Pseudo,
Range,
String,
Timespan,
User,
BitString,
Unknown,
}
impl TryFrom<i8> for TypCategory {
type Error = ();
fn try_from(c: i8) -> Result<Self, Self::Error> {
let c = u8::try_from(c).or(Err(()))?;
let c = match c {
b'A' => Self::Array,
b'B' => Self::Boolean,
b'C' => Self::Composite,
b'D' => Self::DateTime,
b'E' => Self::Enum,
b'G' => Self::Geometric,
b'I' => Self::Network,
b'N' => Self::Numeric,
b'P' => Self::Pseudo,
b'R' => Self::Range,
b'S' => Self::String,
b'T' => Self::Timespan,
b'U' => Self::User,
b'V' => Self::BitString,
b'X' => Self::Unknown,
_ => return Err(()),
};
Ok(c)
}
}
impl PgConnection {
pub(super) async fn handle_row_description(
&mut self,
desc: Option<RowDescription>,
fetch_type_info: bool,
fetch_column_description: bool,
) -> Result<(Vec<PgColumn>, HashMap<UStr, usize>), Error> {
let mut columns = Vec::new();
let mut column_names = HashMap::new();
let desc = if let Some(desc) = desc {
desc
} else {
// no rows
return Ok((columns, column_names));
};
columns.reserve(desc.fields.len());
column_names.reserve(desc.fields.len());
for (index, field) in desc.fields.into_iter().enumerate() {
let name = UStr::from(field.name);
let type_info = self
.maybe_fetch_type_info_by_oid(field.data_type_id, fetch_type_info)
.await?;
let origin = if let (Some(relation_oid), Some(attribute_no)) =
(field.relation_id, field.relation_attribute_no)
{
self.maybe_fetch_column_origin(relation_oid, attribute_no, fetch_column_description)
.await?
} else {
ColumnOrigin::Expression
};
let column = PgColumn {
ordinal: index,
name: name.clone(),
type_info,
relation_id: field.relation_id,
relation_attribute_no: field.relation_attribute_no,
origin,
};
columns.push(column);
column_names.insert(name, index);
}
Ok((columns, column_names))
}
pub(super) async fn handle_parameter_description(
&mut self,
desc: ParameterDescription,
) -> Result<Vec<PgTypeInfo>, Error> {
let mut params = Vec::with_capacity(desc.types.len());
for ty in desc.types {
params.push(self.maybe_fetch_type_info_by_oid(ty, true).await?);
}
Ok(params)
}
async fn maybe_fetch_type_info_by_oid(
&mut self,
oid: Oid,
should_fetch: bool,
) -> Result<PgTypeInfo, Error> {
// first we check if this is a built-in type
// in the average application, the vast majority of checks should flow through this
if let Some(info) = PgTypeInfo::try_from_oid(oid) {
return Ok(info);
}
// next we check a local cache for user-defined type names <-> object id
if let Some(info) = self.inner.cache_type_info.get(&oid) {
return Ok(info.clone());
}
// fallback to asking the database directly for a type name
if should_fetch {
// we're boxing this future here so we can use async recursion
let info = Box::pin(async { self.fetch_type_by_oid(oid).await }).await?;
// cache the type name <-> oid relationship in a paired hashmap
// so we don't come down this road again
self.inner.cache_type_info.insert(oid, info.clone());
self.inner
.cache_type_oid
.insert(info.0.name().to_string().into(), oid);
Ok(info)
} else {
// we are not in a place that *can* run a query
// this generally means we are in the middle of another query
// this _should_ only happen for complex types sent through the TEXT protocol
// we're open to ideas to correct this.. but it'd probably be more efficient to figure
// out a way to "prime" the type cache for connections rather than make this
// fallback work correctly for complex user-defined types for the TEXT protocol
Ok(PgTypeInfo(PgType::DeclareWithOid(oid)))
}
}
async fn maybe_fetch_column_origin(
&mut self,
relation_id: Oid,
attribute_no: i16,
should_fetch: bool,
) -> Result<ColumnOrigin, Error> {
if let Some(origin) = self
.inner
.cache_table_to_column_names
.get(&relation_id)
.and_then(|table_columns| {
let column_name = table_columns.columns.get(&attribute_no).cloned()?;
Some(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: column_name,
}))
})
{
return Ok(origin);
}
if !should_fetch {
return Ok(ColumnOrigin::Unknown);
}
// Looking up the table name _may_ end up being redundant,
// but the round-trip to the server is by far the most expensive part anyway.
let Some((table_name, column_name)): Option<(String, String)> = query_as(
// language=PostgreSQL
"SELECT $1::oid::regclass::text, attname \
FROM pg_catalog.pg_attribute \
WHERE attrelid = $1 AND attnum = $2",
)
.bind(relation_id)
.bind(attribute_no)
.fetch_optional(&mut *self)
.await?
else {
// The column/table doesn't exist anymore for whatever reason.
return Ok(ColumnOrigin::Unknown);
};
let table_columns = self
.inner
.cache_table_to_column_names
.entry(relation_id)
.or_insert_with(|| TableColumns {
table_name: table_name.into(),
columns: Default::default(),
});
let column_name = table_columns
.columns
.entry(attribute_no)
.or_insert(column_name.into());
Ok(ColumnOrigin::Table(TableColumn {
table: table_columns.table_name.clone(),
name: Arc::clone(column_name),
}))
}
async fn fetch_type_by_oid(&mut self, oid: Oid) -> Result<PgTypeInfo, Error> {
let (name, typ_type, category, relation_id, element, base_type): (
String,
i8,
i8,
Oid,
Oid,
Oid,
) = query_as(
// Converting the OID to `regtype` and then `text` will give us the name that
// the type will need to be found at by search_path.
"SELECT oid::regtype::text, \
typtype, \
typcategory, \
typrelid, \
typelem, \
typbasetype \
FROM pg_catalog.pg_type \
WHERE oid = $1",
)
.bind(oid)
.fetch_one(&mut *self)
.await?;
let typ_type = TypType::try_from(typ_type);
let category = TypCategory::try_from(category);
match (typ_type, category) {
(Ok(TypType::Domain), _) => self.fetch_domain_by_oid(oid, base_type, name).await,
(Ok(TypType::Base), Ok(TypCategory::Array)) => {
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Array(
self.maybe_fetch_type_info_by_oid(element, true).await?,
),
name: name.into(),
oid,
}))))
}
(Ok(TypType::Pseudo), Ok(TypCategory::Pseudo)) => {
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Pseudo,
name: name.into(),
oid,
}))))
}
(Ok(TypType::Range), Ok(TypCategory::Range)) => {
self.fetch_range_by_oid(oid, name).await
}
(Ok(TypType::Enum), Ok(TypCategory::Enum)) => self.fetch_enum_by_oid(oid, name).await,
(Ok(TypType::Composite), Ok(TypCategory::Composite)) => {
self.fetch_composite_by_oid(oid, relation_id, name).await
}
_ => Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Simple,
name: name.into(),
oid,
})))),
}
}
async fn fetch_enum_by_oid(&mut self, oid: Oid, name: String) -> Result<PgTypeInfo, Error> {
let variants: Vec<String> = query_scalar(
r#"
SELECT enumlabel
FROM pg_catalog.pg_enum
WHERE enumtypid = $1
ORDER BY enumsortorder
"#,
)
.bind(oid)
.fetch_all(self)
.await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Enum(Arc::from(variants)),
}))))
}
async fn fetch_composite_by_oid(
&mut self,
oid: Oid,
relation_id: Oid,
name: String,
) -> Result<PgTypeInfo, Error> {
let raw_fields: Vec<(String, Oid)> = query_as(
r#"
SELECT attname, atttypid
FROM pg_catalog.pg_attribute
WHERE attrelid = $1
AND NOT attisdropped
AND attnum > 0
ORDER BY attnum
"#,
)
.bind(relation_id)
.fetch_all(&mut *self)
.await?;
let mut fields = Vec::new();
for (field_name, field_oid) in raw_fields.into_iter() {
let field_type = self.maybe_fetch_type_info_by_oid(field_oid, true).await?;
fields.push((field_name, field_type));
}
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Composite(Arc::from(fields)),
}))))
}
async fn fetch_domain_by_oid(
&mut self,
oid: Oid,
base_type: Oid,
name: String,
) -> Result<PgTypeInfo, Error> {
let base_type = self.maybe_fetch_type_info_by_oid(base_type, true).await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
oid,
name: name.into(),
kind: PgTypeKind::Domain(base_type),
}))))
}
async fn fetch_range_by_oid(&mut self, oid: Oid, name: String) -> Result<PgTypeInfo, Error> {
let element_oid: Oid = query_scalar(
r#"
SELECT rngsubtype
FROM pg_catalog.pg_range
WHERE rngtypid = $1
"#,
)
.bind(oid)
.fetch_one(&mut *self)
.await?;
let element = self.maybe_fetch_type_info_by_oid(element_oid, true).await?;
Ok(PgTypeInfo(PgType::Custom(Arc::new(PgCustomType {
kind: PgTypeKind::Range(element),
name: name.into(),
oid,
}))))
}
pub(crate) async fn resolve_type_id(&mut self, ty: &PgType) -> Result<Oid, Error> {
if let Some(oid) = ty.try_oid() {
return Ok(oid);
}
match ty {
PgType::DeclareWithName(name) => self.fetch_type_id_by_name(name).await,
PgType::DeclareArrayOf(array) => self.fetch_array_type_id(array).await,
// `.try_oid()` should return `Some()` or it should be covered here
_ => unreachable!("(bug) OID should be resolvable for type {ty:?}"),
}
}
pub(crate) async fn fetch_type_id_by_name(&mut self, name: &str) -> Result<Oid, Error> {
if let Some(oid) = self.inner.cache_type_oid.get(name) {
return Ok(*oid);
}
// language=SQL
let (oid,): (Oid,) = query_as("SELECT $1::regtype::oid")
.bind(name)
.fetch_optional(&mut *self)
.await?
.ok_or_else(|| Error::TypeNotFound {
type_name: name.into(),
})?;
self.inner
.cache_type_oid
.insert(name.to_string().into(), oid);
Ok(oid)
}
pub(crate) async fn fetch_array_type_id(&mut self, array: &PgArrayOf) -> Result<Oid, Error> {
if let Some(oid) = self
.inner
.cache_type_oid
.get(&array.elem_name)
.and_then(|elem_oid| self.inner.cache_elem_type_to_array.get(elem_oid))
{
return Ok(*oid);
}
// language=SQL
let (elem_oid, array_oid): (Oid, Oid) =
query_as("SELECT oid, typarray FROM pg_catalog.pg_type WHERE oid = $1::regtype::oid")
.bind(&*array.elem_name)
.fetch_optional(&mut *self)
.await?
.ok_or_else(|| Error::TypeNotFound {
type_name: array.name.to_string(),
})?;
// Avoids copying `elem_name` until necessary
self.inner
.cache_type_oid
.entry_ref(&array.elem_name)
.insert(elem_oid);
self.inner
.cache_elem_type_to_array
.insert(elem_oid, array_oid);
Ok(array_oid)
}
}

View File

@ -16,6 +16,7 @@ pub(crate) struct PortalId(IdInner);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
struct IdInner(Option<NonZeroU32>);
#[allow(unused)]
pub(crate) struct DisplayId {
prefix: &'static str,
id: NonZeroU32,
@ -43,6 +44,7 @@ impl StatementId {
/// Get a type to format this statement ID with [`Display`].
///
/// Returns `None` if this is the unnamed statement.
#[allow(unused)]
#[inline(always)]
pub fn display(&self) -> Option<DisplayId> {
self.0.display(Self::NAME_PREFIX)
@ -104,6 +106,7 @@ impl IdInner {
)
}
#[allow(unused)]
#[inline(always)]
fn display(&self, prefix: &'static str) -> Option<DisplayId> {
self.0.map(|id| DisplayId { prefix, id })

View File

@ -12,7 +12,6 @@ use sqlx_core::transaction::Transaction;
use sqlx_core::Either;
use tracing::Instrument;
use crate::describe::Describe;
use crate::error::Error;
use crate::executor::{Execute, Executor};
use crate::message::{BackendMessageFormat, Notification};
@ -439,7 +438,11 @@ impl<'c> Executor<'c> for &'c mut PgListener {
}
#[doc(hidden)]
fn describe<'e>(self, query: SqlStr) -> BoxFuture<'e, Result<Describe<Self::Database>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
query: SqlStr,
) -> BoxFuture<'e, Result<crate::describe::Describe<Self::Database>, Error>>
where
'c: 'e,
{

View File

@ -12,7 +12,6 @@ use crate::{
types::Type,
PgArgumentBuffer, PgHasArrayType, PgTypeInfo, PgValueRef, Postgres,
};
use serde::{Deserialize, Serialize};
use sqlx_core::bytes::Buf;
/// Key-value support (`hstore`) for Postgres.
@ -88,7 +87,8 @@ use sqlx_core::bytes::Buf;
/// }
/// ```
///
#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)]
#[derive(Clone, Debug, Default, Eq, PartialEq)]
#[cfg_attr(feature = "offline", derive(serde::Serialize, serde::Deserialize))]
pub struct PgHstore(pub BTreeMap<String, Option<String>>);
impl Deref for PgHstore {

View File

@ -211,7 +211,9 @@
use crate::type_info::PgTypeKind;
use crate::{PgTypeInfo, Postgres};
pub(crate) use sqlx_core::types::{Json, Type};
#[cfg(feature = "json")]
pub(crate) use sqlx_core::types::Json;
pub(crate) use sqlx_core::types::Type;
mod array;
mod bool;
@ -221,10 +223,10 @@ mod float;
mod hstore;
mod int;
mod interval;
#[cfg(feature = "json")]
mod json;
mod lquery;
mod ltree;
// Not behind a Cargo feature because we require JSON in the driver implementation.
mod json;
mod money;
mod oid;
mod range;

View File

@ -1,5 +1,4 @@
use byteorder::{BigEndian, ByteOrder};
use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize};
use crate::decode::Decode;
use crate::encode::{Encode, IsNull};
@ -46,19 +45,21 @@ impl Decode<'_, Postgres> for Oid {
}
}
impl Serialize for Oid {
#[cfg(feature = "offline")]
impl serde::Serialize for Oid {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
S: serde::Serializer,
{
self.0.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Oid {
#[cfg(feature = "offline")]
impl<'de> serde::Deserialize<'de> for Oid {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
D: serde::Deserializer<'de>,
{
u32::deserialize(deserializer).map(Self)
}

View File

@ -77,7 +77,7 @@ uuid = { workspace = true, optional = true }
url = { version = "2.2.2" }
percent-encoding = "2.1.0"
serde_urlencoded = "0.7"
form_urlencoded = "1.2.2"
flume = { version = "0.11.0", default-features = false, features = ["async"] }

View File

@ -7,7 +7,7 @@ use futures_core::stream::BoxStream;
use futures_util::{FutureExt, StreamExt, TryFutureExt, TryStreamExt};
use sqlx_core::any::{
Any, AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyArguments, AnyColumn, AnyConnectOptions, AnyConnectionBackend, AnyQueryResult, AnyRow,
AnyStatement, AnyTypeInfo, AnyTypeInfoKind, AnyValueKind,
};
use sqlx_core::sql_str::SqlStr;
@ -16,7 +16,6 @@ use crate::arguments::SqliteArgumentsBuffer;
use crate::type_info::DataType;
use sqlx_core::connection::{ConnectOptions, Connection};
use sqlx_core::database::Database;
use sqlx_core::describe::Describe;
use sqlx_core::executor::Executor;
use sqlx_core::transaction::TransactionManager;
use std::pin::pin;
@ -140,7 +139,11 @@ impl AnyConnectionBackend for SqliteConnection {
})
}
fn describe(&mut self, sql: SqlStr) -> BoxFuture<'_, sqlx_core::Result<Describe<Any>>> {
#[cfg(feature = "offline")]
fn describe(
&mut self,
sql: SqlStr,
) -> BoxFuture<'_, sqlx_core::Result<sqlx_core::describe::Describe<sqlx_core::any::Any>>> {
Box::pin(async move { Executor::describe(self, sql).await?.try_into_any() })
}
}

View File

@ -93,10 +93,15 @@ impl EstablishParams {
if !query_params.is_empty() {
filename = format!(
"file:{}?{}",
"file:{}?",
percent_encoding::percent_encode(filename.as_bytes(), NON_ALPHANUMERIC),
serde_urlencoded::to_string(&query_params).unwrap()
);
// Suffix serializer automatically handles `&` separators for us.
let filename_len = filename.len();
filename = form_urlencoded::Serializer::for_suffix(filename, filename_len)
.extend_pairs(query_params)
.finish();
}
let filename = CString::new(filename).map_err(|_| {

View File

@ -4,7 +4,6 @@ use crate::{
use futures_core::future::BoxFuture;
use futures_core::stream::BoxStream;
use futures_util::{stream, FutureExt, StreamExt, TryFutureExt, TryStreamExt};
use sqlx_core::describe::Describe;
use sqlx_core::error::Error;
use sqlx_core::executor::{Execute, Executor};
use sqlx_core::sql_str::SqlStr;
@ -89,7 +88,11 @@ impl<'c> Executor<'c> for &'c mut SqliteConnection {
}
#[doc(hidden)]
fn describe<'e>(self, sql: SqlStr) -> BoxFuture<'e, Result<Describe<Sqlite>, Error>>
#[cfg(feature = "offline")]
fn describe<'e>(
self,
sql: SqlStr,
) -> BoxFuture<'e, Result<sqlx_core::describe::Describe<Sqlite>, Error>>
where
'c: 'e,
{

View File

@ -8,18 +8,16 @@ use futures_intrusive::sync::{Mutex, MutexGuard};
use sqlx_core::sql_str::SqlStr;
use tracing::span::Span;
use sqlx_core::describe::Describe;
use sqlx_core::error::Error;
use sqlx_core::transaction::{
begin_ansi_transaction_sql, commit_ansi_transaction_sql, rollback_ansi_transaction_sql,
};
use sqlx_core::Either;
use crate::connection::describe::describe;
use crate::connection::establish::EstablishParams;
use crate::connection::execute;
use crate::connection::ConnectionState;
use crate::{Sqlite, SqliteArguments, SqliteQueryResult, SqliteRow, SqliteStatement};
use crate::{SqliteArguments, SqliteQueryResult, SqliteRow, SqliteStatement};
#[cfg(feature = "deserialize")]
use crate::connection::deserialize::{deserialize, serialize, SchemaName, SqliteOwnedBuf};
@ -57,9 +55,10 @@ enum Command {
query: SqlStr,
tx: oneshot::Sender<Result<SqliteStatement, Error>>,
},
#[cfg(feature = "offline")]
Describe {
query: SqlStr,
tx: oneshot::Sender<Result<Describe<Sqlite>, Error>>,
tx: oneshot::Sender<Result<sqlx_core::describe::Describe<crate::Sqlite>, Error>>,
},
Execute {
query: SqlStr,
@ -157,8 +156,9 @@ impl ConnectionWorker {
&shared.cached_statements_size,
);
}
#[cfg(feature = "offline")]
Command::Describe { query, tx } => {
tx.send(describe(&mut conn, query)).ok();
tx.send(crate::connection::describe::describe(&mut conn, query)).ok();
}
Command::Execute {
query,
@ -352,7 +352,11 @@ impl ConnectionWorker {
.await?
}
pub(crate) async fn describe(&mut self, query: SqlStr) -> Result<Describe<Sqlite>, Error> {
#[cfg(feature = "offline")]
pub(crate) async fn describe(
&mut self,
query: SqlStr,
) -> Result<sqlx_core::describe::Describe<crate::Sqlite>, Error> {
self.oneshot_cmd(|tx| Command::Describe { query, tx })
.await?
}