fix(macros): smarter .env loading, caching, and invalidation (#4053)

* fix(macros): smarter `.env` loading, caching, and invalidation

* feat(mysql): test `.env` loading in CI

* feat(postgres): test `.env` loading in CI

* feat(macros): allow `DATABASE_URL` to be empty

* fix(examples/postgres): make `cargo-sqlx` executable

* fix(examples/postgres): `cargo sqlx` invocation

* feat(examples/postgres): check offline prepare on more examples

* fix(examples/postgres): the name of this step

* fix(cli): don't suppress error from `dotenv()`

* fix(ci/examples/postgres): don't use heredoc in this step

* fix(ci/examples/postgres): multi-tenant

* fix(ci/examples/sqlite): test `.env` loading

* chore: add CHANGELOG entry
This commit is contained in:
Austin Bonander
2025-10-14 17:31:12 -07:00
committed by GitHub
parent 064d649abd
commit 388c424f48
18 changed files with 622 additions and 297 deletions

View File

@@ -26,7 +26,7 @@ _sqlite = []
# SQLx features
derive = []
macros = []
macros = ["thiserror"]
migrate = ["sqlx-core/migrate"]
sqlx-toml = ["sqlx-core/sqlx-toml", "sqlx-sqlite?/sqlx-toml"]
@@ -66,6 +66,7 @@ tokio = { workspace = true, optional = true }
cfg-if = { workspace = true}
dotenvy = { workspace = true }
thiserror = { workspace = true, optional = true }
hex = { version = "0.4.3" }
heck = { version = "0.5" }

View File

@@ -0,0 +1,3 @@
[[disallowed-methods]]
path = "std::env::var"
reason = "use `crate::env()` instead, which optionally calls `proc_macro::tracked_env::var()`"

View File

@@ -1,5 +1,4 @@
use proc_macro2::Span;
use std::env;
use std::path::{Path, PathBuf};
pub(crate) fn resolve_path(path: impl AsRef<Path>, err_span: Span) -> syn::Result<PathBuf> {
@@ -25,13 +24,9 @@ pub(crate) fn resolve_path(path: impl AsRef<Path>, err_span: Span) -> syn::Resul
));
}
let base_dir = env::var("CARGO_MANIFEST_DIR").map_err(|_| {
syn::Error::new(
err_span,
"CARGO_MANIFEST_DIR is not set; please use Cargo to build",
)
})?;
let base_dir_path = Path::new(&base_dir);
let mut out_path = crate::manifest_dir().map_err(|e| syn::Error::new(err_span, e))?;
Ok(base_dir_path.join(path))
out_path.push(path);
Ok(out_path)
}

View File

@@ -20,13 +20,14 @@
)]
use cfg_if::cfg_if;
use std::path::PathBuf;
#[cfg(feature = "macros")]
use crate::query::QueryDriver;
pub type Error = Box<dyn std::error::Error>;
pub type Result<T> = std::result::Result<T, Error>;
pub type Result<T, E = Error> = std::result::Result<T, E>;
mod common;
pub mod database;
@@ -84,3 +85,29 @@ where
}
}
}
pub fn env(var: &str) -> Result<String> {
env_opt(var)?
.ok_or_else(|| format!("env var {var:?} must be set to use the query macros").into())
}
#[allow(clippy::disallowed_methods)]
pub fn env_opt(var: &str) -> Result<Option<String>> {
use std::env::VarError;
#[cfg(any(sqlx_macros_unstable, procmacro2_semver_exempt))]
let res: Result<String, VarError> = proc_macro::tracked_env::var(var);
#[cfg(not(any(sqlx_macros_unstable, procmacro2_semver_exempt)))]
let res: Result<String, VarError> = std::env::var(var);
match res {
Ok(val) => Ok(Some(val)),
Err(VarError::NotPresent) => Ok(None),
Err(VarError::NotUnicode(_)) => Err(format!("env var {var:?} is not valid UTF-8").into()),
}
}
pub fn manifest_dir() -> Result<PathBuf> {
Ok(env("CARGO_MANIFEST_DIR")?.into())
}

View File

@@ -0,0 +1,97 @@
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use std::time::SystemTime;
/// A cached value derived from one or more files, which is automatically invalidated
/// if the modified-time of any watched file changes.
pub struct MtimeCache<T> {
inner: Mutex<Option<MtimeCacheInner<T>>>,
}
pub struct MtimeCacheBuilder {
file_mtimes: Vec<(PathBuf, Option<SystemTime>)>,
}
struct MtimeCacheInner<T> {
builder: MtimeCacheBuilder,
cached: T,
}
impl<T: Clone> MtimeCache<T> {
pub fn new() -> Self {
MtimeCache {
inner: Mutex::new(None),
}
}
/// Get the cached value, or (re)initialize it if it does not exist or a file's mtime has changed.
pub fn get_or_try_init<E>(
&self,
init: impl FnOnce(&mut MtimeCacheBuilder) -> Result<T, E>,
) -> Result<T, E> {
let mut inner = self.inner.lock().unwrap_or_else(|e| {
// Reset the cache on-panic.
let mut locked = e.into_inner();
*locked = None;
locked
});
if let Some(inner) = &*inner {
if !inner.builder.any_modified() {
return Ok(inner.cached.clone());
}
}
let mut builder = MtimeCacheBuilder::new();
let value = init(&mut builder)?;
*inner = Some(MtimeCacheInner {
builder,
cached: value.clone(),
});
Ok(value)
}
}
impl MtimeCacheBuilder {
fn new() -> Self {
MtimeCacheBuilder {
file_mtimes: Vec::new(),
}
}
/// Add a file path to watch.
///
/// The cached value will be automatically invalidated if the modified-time of the file changes,
/// or if the file does not exist but is created sometime after this call.
pub fn add_path(&mut self, path: PathBuf) {
let mtime = get_mtime(&path);
#[cfg(any(sqlx_macros_unstable, procmacro2_semver_exempt))]
{
proc_macro::tracked_path::path(&path);
}
self.file_mtimes.push((path, mtime));
}
fn any_modified(&self) -> bool {
for (path, expected_mtime) in &self.file_mtimes {
let actual_mtime = get_mtime(path);
if expected_mtime != &actual_mtime {
return true;
}
}
false
}
}
fn get_mtime(path: &Path) -> Option<SystemTime> {
std::fs::metadata(path)
.and_then(|metadata| metadata.modified())
.ok()
}

View File

@@ -1,17 +1,18 @@
use std::collections::HashMap;
use std::fmt::{Debug, Display, Formatter};
use std::fs;
use std::io::Write as _;
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use std::sync::{LazyLock, Mutex};
use std::sync::{Arc, LazyLock, Mutex};
use serde::{Serialize, Serializer};
use sqlx_core::database::Database;
use sqlx_core::describe::Describe;
use sqlx_core::HashMap;
use crate::database::DatabaseExt;
use crate::query::cache::MtimeCache;
#[derive(serde::Serialize)]
#[serde(bound(serialize = "Describe<DB>: serde::Serialize"))]
@@ -64,7 +65,7 @@ impl<DB: Database> Serialize for SerializeDbName<DB> {
}
}
static OFFLINE_DATA_CACHE: LazyLock<Mutex<HashMap<PathBuf, DynQueryData>>> =
static OFFLINE_DATA_CACHE: LazyLock<Mutex<HashMap<PathBuf, Arc<MtimeCache<DynQueryData>>>>> =
LazyLock::new(Default::default);
/// Offline query data
@@ -79,47 +80,33 @@ pub struct DynQueryData {
impl DynQueryData {
/// Loads a query given the path to its "query-<hash>.json" file. Subsequent calls for the same
/// path are retrieved from an in-memory cache.
pub fn from_data_file(path: impl AsRef<Path>, query: &str) -> crate::Result<Self> {
let path = path.as_ref();
let mut cache = OFFLINE_DATA_CACHE
pub fn from_data_file(path: &Path, query: &str) -> crate::Result<Self> {
let cache = OFFLINE_DATA_CACHE
.lock()
// Just reset the cache on error
.unwrap_or_else(|poison_err| {
let mut guard = poison_err.into_inner();
*guard = Default::default();
guard
});
if let Some(cached) = cache.get(path).cloned() {
if query != cached.query {
})
.entry_ref(path)
.or_insert_with(|| Arc::new(MtimeCache::new()))
.clone();
cache.get_or_try_init(|builder| {
builder.add_path(path.into());
let offline_data_contents = fs::read_to_string(path).map_err(|e| {
format!("failed to read saved query path {}: {}", path.display(), e)
})?;
let dyn_data: DynQueryData = serde_json::from_str(&offline_data_contents)?;
if query != dyn_data.query {
return Err("hash collision for saved query data".into());
}
return Ok(cached);
}
#[cfg(procmacro2_semver_exempt)]
{
let path = path.as_ref().canonicalize()?;
let path = path.to_str().ok_or_else(|| {
format!(
"query-<hash>.json path cannot be represented as a string: {:?}",
path
)
})?;
proc_macro::tracked_path::path(path);
}
let offline_data_contents = fs::read_to_string(path)
.map_err(|e| format!("failed to read saved query path {}: {}", path.display(), e))?;
let dyn_data: DynQueryData = serde_json::from_str(&offline_data_contents)?;
if query != dyn_data.query {
return Err("hash collision for saved query data".into());
}
let _ = cache.insert(path.to_owned(), dyn_data.clone());
Ok(dyn_data)
Ok(dyn_data)
})
}
}
@@ -149,41 +136,71 @@ where
}
}
pub(super) fn save_in(&self, dir: impl AsRef<Path>) -> crate::Result<()> {
pub(super) fn save_in(&self, dir: &Path) -> crate::Result<()> {
use std::io::ErrorKind;
let path = dir.as_ref().join(format!("query-{}.json", self.hash));
match std::fs::remove_file(&path) {
Ok(()) => {}
Err(err)
if matches!(
err.kind(),
ErrorKind::NotFound | ErrorKind::PermissionDenied,
) => {}
Err(err) => return Err(format!("failed to delete {path:?}: {err:?}").into()),
let path = dir.join(format!("query-{}.json", self.hash));
if let Err(err) = fs::remove_file(&path) {
match err.kind() {
ErrorKind::NotFound | ErrorKind::PermissionDenied => (),
ErrorKind::NotADirectory => {
return Err(format!(
"sqlx offline path exists, but is not a directory: {dir:?}"
)
.into());
}
_ => return Err(format!("failed to delete {path:?}: {err:?}").into()),
}
}
let mut file = match std::fs::OpenOptions::new()
// Prevent tearing from concurrent invocations possibly trying to write the same file
// by using the existence of the file itself as a mutex.
//
// By deleting the file first and then using `.create_new(true)`,
// we guarantee that this only succeeds if another invocation hasn't concurrently
// re-created the file.
let mut file = match fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)
{
Ok(file) => file,
// We overlapped with a concurrent invocation and the other one succeeded.
Err(err) if matches!(err.kind(), ErrorKind::AlreadyExists) => return Ok(()),
Err(err) => {
return Err(format!("failed to exclusively create {path:?}: {err:?}").into())
return match err.kind() {
// We overlapped with a concurrent invocation and the other one succeeded.
ErrorKind::AlreadyExists => Ok(()),
ErrorKind::NotFound => {
Err(format!("sqlx offline path does not exist: {dir:?}").into())
}
ErrorKind::NotADirectory => Err(format!(
"sqlx offline path exists, but is not a directory: {dir:?}"
)
.into()),
_ => Err(format!("failed to exclusively create {path:?}: {err:?}").into()),
};
}
};
let data = serde_json::to_string_pretty(self)
.map_err(|err| format!("failed to serialize query data: {err:?}"))?;
file.write_all(data.as_bytes())
.map_err(|err| format!("failed to write query data to file: {err:?}"))?;
// From a quick survey of the files generated by `examples/postgres/axum-social-with-tests`,
// which are generally in the 1-2 KiB range, this seems like a safe bet to avoid
// lots of reallocations without using too much memory.
//
// As of writing, `serde_json::to_vec_pretty()` only allocates 128 bytes up-front.
let mut data = Vec::with_capacity(4096);
serde_json::to_writer_pretty(&mut data, self).expect("BUG: failed to serialize query data");
// Ensure there is a newline at the end of the JSON file to avoid
// accidental modification by IDE and make github diff tool happier.
file.write_all(b"\n")
.map_err(|err| format!("failed to append a newline to file: {err:?}"))?;
data.push(b'\n');
// This ideally writes the data in as few syscalls as possible.
file.write_all(&data)
.map_err(|err| format!("failed to write query data to file {path:?}: {err:?}"))?;
// We don't really need to call `.sync_data()` since it's trivial to re-run the macro
// in the event a power loss results in incomplete flushing of the data to disk.
Ok(())
}

View File

@@ -0,0 +1,162 @@
use sqlx_core::config::Config;
use std::hash::{BuildHasherDefault, DefaultHasher};
use std::io;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use crate::query::cache::{MtimeCache, MtimeCacheBuilder};
use sqlx_core::HashMap;
pub struct Metadata {
pub manifest_dir: PathBuf,
pub config: Config,
env: MtimeCache<Arc<MacrosEnv>>,
workspace_root: Arc<Mutex<Option<PathBuf>>>,
}
pub struct MacrosEnv {
pub database_url: Option<String>,
pub offline_dir: Option<PathBuf>,
pub offline: Option<bool>,
}
impl Metadata {
pub fn env(&self) -> crate::Result<Arc<MacrosEnv>> {
self.env
.get_or_try_init(|builder| load_env(&self.manifest_dir, &self.config, builder))
}
pub fn workspace_root(&self) -> PathBuf {
let mut root = self.workspace_root.lock().unwrap();
if root.is_none() {
use serde::Deserialize;
use std::process::Command;
let cargo = crate::env("CARGO").unwrap();
let output = Command::new(cargo)
.args(["metadata", "--format-version=1", "--no-deps"])
.current_dir(&self.manifest_dir)
.env_remove("__CARGO_FIX_PLZ")
.output()
.expect("Could not fetch metadata");
#[derive(Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
let metadata: CargoMetadata =
serde_json::from_slice(&output.stdout).expect("Invalid `cargo metadata` output");
*root = Some(metadata.workspace_root);
}
root.clone().unwrap()
}
}
pub fn try_for_crate() -> crate::Result<Arc<Metadata>> {
/// The `MtimeCache` in this type covers the config itself,
/// any changes to which will indirectly invalidate the loaded env vars as well.
#[expect(clippy::type_complexity)]
static METADATA: Mutex<
HashMap<String, Arc<MtimeCache<Arc<Metadata>>>, BuildHasherDefault<DefaultHasher>>,
> = Mutex::new(HashMap::with_hasher(BuildHasherDefault::new()));
let manifest_dir = crate::env("CARGO_MANIFEST_DIR")?;
let cache = METADATA
.lock()
.expect("BUG: we shouldn't panic while holding this lock")
.entry_ref(&manifest_dir)
.or_insert_with(|| Arc::new(MtimeCache::new()))
.clone();
cache.get_or_try_init(|builder| {
let manifest_dir = PathBuf::from(manifest_dir);
let config_path = manifest_dir.join("sqlx.toml");
builder.add_path(config_path.clone());
let config = Config::try_from_path_or_default(config_path)?;
Ok(Arc::new(Metadata {
manifest_dir,
config,
env: MtimeCache::new(),
workspace_root: Default::default(),
}))
})
}
fn load_env(
manifest_dir: &Path,
config: &Config,
builder: &mut MtimeCacheBuilder,
) -> crate::Result<Arc<MacrosEnv>> {
#[derive(thiserror::Error, Debug)]
#[error("error reading dotenv file {path:?}")]
struct DotenvError {
path: PathBuf,
#[source]
error: dotenvy::Error,
}
let mut from_dotenv = MacrosEnv {
database_url: None,
offline_dir: None,
offline: None,
};
for dir in manifest_dir.ancestors() {
let path = dir.join(".env");
let dotenv = match dotenvy::from_path_iter(&path) {
Ok(iter) => {
builder.add_path(path.clone());
iter
}
Err(dotenvy::Error::Io(e)) if e.kind() == io::ErrorKind::NotFound => {
builder.add_path(dir.to_path_buf());
continue;
}
Err(e) => {
builder.add_path(path.clone());
return Err(DotenvError { path, error: e }.into());
}
};
for res in dotenv {
let (name, val) = res.map_err(|e| DotenvError {
path: path.clone(),
error: e,
})?;
match &*name {
"SQLX_OFFLINE_DIR" => from_dotenv.offline_dir = Some(val.into()),
"SQLX_OFFLINE" => from_dotenv.offline = Some(is_truthy_bool(&val)),
_ if name == config.common.database_url_var() => {
from_dotenv.database_url = Some(val)
}
_ => continue,
}
}
}
Ok(Arc::new(MacrosEnv {
// Make set variables take precedent
database_url: crate::env_opt(config.common.database_url_var())?
.or(from_dotenv.database_url),
offline_dir: crate::env_opt("SQLX_OFFLINE_DIR")?
.map(PathBuf::from)
.or(from_dotenv.offline_dir),
offline: crate::env_opt("SQLX_OFFLINE")?
.map(|val| is_truthy_bool(&val))
.or(from_dotenv.offline),
}))
}
/// Returns `true` if `val` is `"true"`,
fn is_truthy_bool(val: &str) -> bool {
val.eq_ignore_ascii_case("true") || val == "1"
}

View File

@@ -1,7 +1,4 @@
use std::collections::{hash_map, HashMap};
use std::path::{Path, PathBuf};
use std::sync::{Arc, LazyLock, Mutex};
use std::{fs, io};
use proc_macro2::TokenStream;
use syn::Type;
@@ -14,20 +11,25 @@ use sqlx_core::{column::Column, describe::Describe, type_info::TypeInfo};
use crate::database::DatabaseExt;
use crate::query::data::{hash_string, DynQueryData, QueryData};
use crate::query::input::RecordType;
use crate::query::metadata::MacrosEnv;
use either::Either;
use metadata::Metadata;
use sqlx_core::config::Config;
use url::Url;
mod args;
mod cache;
mod data;
mod input;
mod metadata;
mod output;
#[derive(Copy, Clone)]
pub struct QueryDriver {
db_name: &'static str,
url_schemes: &'static [&'static str],
expand: fn(&Config, QueryMacroInput, QueryDataSource) -> crate::Result<TokenStream>,
expand:
fn(&Config, QueryMacroInput, QueryDataSource, Option<&Path>) -> crate::Result<TokenStream>,
}
impl QueryDriver {
@@ -68,138 +70,64 @@ impl<'a> QueryDataSource<'a> {
}
}
}
struct Metadata {
#[allow(unused)]
manifest_dir: PathBuf,
offline: bool,
database_url: Option<String>,
offline_dir: Option<String>,
config: Config,
workspace_root: Arc<Mutex<Option<PathBuf>>>,
}
impl Metadata {
pub fn workspace_root(&self) -> PathBuf {
let mut root = self.workspace_root.lock().unwrap();
if root.is_none() {
use serde::Deserialize;
use std::process::Command;
let cargo = env("CARGO").expect("`CARGO` must be set");
let output = Command::new(cargo)
.args(["metadata", "--format-version=1", "--no-deps"])
.current_dir(&self.manifest_dir)
.env_remove("__CARGO_FIX_PLZ")
.output()
.expect("Could not fetch metadata");
#[derive(Deserialize)]
struct CargoMetadata {
workspace_root: PathBuf,
}
let metadata: CargoMetadata =
serde_json::from_slice(&output.stdout).expect("Invalid `cargo metadata` output");
*root = Some(metadata.workspace_root);
}
root.clone().unwrap()
}
}
static METADATA: LazyLock<Mutex<HashMap<String, Metadata>>> = LazyLock::new(Default::default);
// If we are in a workspace, lookup `workspace_root` since `CARGO_MANIFEST_DIR` won't
// reflect the workspace dir: https://github.com/rust-lang/cargo/issues/3946
fn init_metadata(manifest_dir: &String) -> crate::Result<Metadata> {
let manifest_dir: PathBuf = manifest_dir.into();
let (database_url, offline, offline_dir) = load_dot_env(&manifest_dir);
let offline = env("SQLX_OFFLINE")
.ok()
.or(offline)
.map(|s| s.eq_ignore_ascii_case("true") || s == "1")
.unwrap_or(false);
let offline_dir = env("SQLX_OFFLINE_DIR").ok().or(offline_dir);
let config = Config::try_from_crate_or_default()?;
let database_url = env(config.common.database_url_var()).ok().or(database_url);
Ok(Metadata {
manifest_dir,
offline,
database_url,
offline_dir,
config,
workspace_root: Arc::new(Mutex::new(None)),
})
}
pub fn expand_input<'a>(
input: QueryMacroInput,
drivers: impl IntoIterator<Item = &'a QueryDriver>,
) -> crate::Result<TokenStream> {
let manifest_dir = env("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` must be set");
let metadata = metadata::try_for_crate()?;
let mut metadata_lock = METADATA
.lock()
// Just reset the metadata on error
.unwrap_or_else(|poison_err| {
let mut guard = poison_err.into_inner();
*guard = Default::default();
guard
});
let metadata_env = metadata.env()?;
let metadata = match metadata_lock.entry(manifest_dir) {
hash_map::Entry::Occupied(occupied) => occupied.into_mut(),
hash_map::Entry::Vacant(vacant) => {
let metadata = init_metadata(vacant.key())?;
vacant.insert(metadata)
}
};
let data_source = match &metadata {
Metadata {
offline: false,
let data_source = match &*metadata_env {
MacrosEnv {
offline: None | Some(false),
database_url: Some(db_url),
..
} => QueryDataSource::live(db_url)?,
Metadata { offline, .. } => {
}
// Allow `DATABASE_URL=''`
if !db_url.is_empty() => QueryDataSource::live(db_url)?,
MacrosEnv {
offline,
offline_dir,
..
} => {
// Try load the cached query metadata file.
let filename = format!("query-{}.json", hash_string(&input.sql));
// Check SQLX_OFFLINE_DIR, then local .sqlx, then workspace .sqlx.
let dirs = [
|meta: &Metadata| meta.offline_dir.as_deref().map(PathBuf::from),
|meta: &Metadata| Some(meta.manifest_dir.join(".sqlx")),
|meta: &Metadata| Some(meta.workspace_root().join(".sqlx")),
|_: &Metadata, offline_dir: Option<&Path>| offline_dir.map(PathBuf::from),
|meta: &Metadata, _: Option<&Path>| Some(meta.manifest_dir.join(".sqlx")),
|meta: &Metadata, _: Option<&Path>| Some(meta.workspace_root().join(".sqlx")),
];
let Some(data_file_path) = dirs
.iter()
.filter_map(|path| path(metadata))
.filter_map(|path| path(&metadata, offline_dir.as_deref()))
.map(|path| path.join(&filename))
.find(|path| path.exists())
else {
return Err(
if *offline {
if offline.unwrap_or(false) {
"`SQLX_OFFLINE=true` but there is no cached data for this query, run `cargo sqlx prepare` to update the query cache or unset `SQLX_OFFLINE`"
} else {
"set `DATABASE_URL` to use query macros online, or run `cargo sqlx prepare` to update the query cache"
}.into()
);
};
QueryDataSource::Cached(DynQueryData::from_data_file(&data_file_path, &input.sql)?)
}
};
for driver in drivers {
if data_source.matches_driver(driver) {
return (driver.expand)(&metadata.config, input, data_source);
return (driver.expand)(
&metadata.config,
input,
data_source,
metadata_env.offline_dir.as_deref(),
);
}
}
@@ -224,19 +152,21 @@ fn expand_with<DB: DatabaseExt>(
config: &Config,
input: QueryMacroInput,
data_source: QueryDataSource,
offline_dir: Option<&Path>,
) -> crate::Result<TokenStream>
where
Describe<DB>: DescribeExt,
{
let (query_data, offline): (QueryData<DB>, bool) = match data_source {
QueryDataSource::Cached(dyn_data) => (QueryData::from_dyn_data(dyn_data)?, true),
let (query_data, save_dir): (QueryData<DB>, Option<&Path>) = match data_source {
// If the build is offline, the cache is our input so it's pointless to also write data for it.
QueryDataSource::Cached(dyn_data) => (QueryData::from_dyn_data(dyn_data)?, None),
QueryDataSource::Live { database_url, .. } => {
let describe = DB::describe_blocking(&input.sql, database_url, &config.drivers)?;
(QueryData::from_describe(&input.sql, describe), false)
(QueryData::from_describe(&input.sql, describe), offline_dir)
}
};
expand_with_data(config, input, query_data, offline)
expand_with_data(config, input, query_data, save_dir)
}
// marker trait for `Describe` that lets us conditionally require it to be `Serialize + Deserialize`
@@ -257,7 +187,7 @@ fn expand_with_data<DB: DatabaseExt>(
config: &Config,
input: QueryMacroInput,
data: QueryData<DB>,
offline: bool,
save_dir: Option<&Path>,
) -> crate::Result<TokenStream>
where
Describe<DB>: DescribeExt,
@@ -380,99 +310,9 @@ where
}
};
// Store query metadata only if offline support is enabled but the current build is online.
// If the build is offline, the cache is our input so it's pointless to also write data for it.
if !offline {
// Only save query metadata if SQLX_OFFLINE_DIR is set manually or by `cargo sqlx prepare`.
// Note: in a cargo workspace this path is relative to the root.
if let Ok(dir) = env("SQLX_OFFLINE_DIR") {
let path = PathBuf::from(&dir);
match fs::metadata(&path) {
Err(e) => {
if e.kind() != io::ErrorKind::NotFound {
// Can't obtain information about .sqlx
return Err(format!("{e}: {dir}").into());
}
// .sqlx doesn't exist.
return Err(format!("sqlx offline path does not exist: {dir}").into());
}
Ok(meta) => {
if !meta.is_dir() {
return Err(format!(
"sqlx offline path exists, but is not a directory: {dir}"
)
.into());
}
// .sqlx exists and is a directory, store data.
data.save_in(path)?;
}
}
}
if let Some(save_dir) = save_dir {
data.save_in(save_dir)?;
}
Ok(ret_tokens)
}
/// Get the value of an environment variable, telling the compiler about it if applicable.
fn env(name: &str) -> Result<String, std::env::VarError> {
#[cfg(procmacro2_semver_exempt)]
{
proc_macro::tracked_env::var(name)
}
#[cfg(not(procmacro2_semver_exempt))]
{
std::env::var(name)
}
}
/// Get `DATABASE_URL`, `SQLX_OFFLINE` and `SQLX_OFFLINE_DIR` from the `.env`.
fn load_dot_env(manifest_dir: &Path) -> (Option<String>, Option<String>, Option<String>) {
let mut env_path = manifest_dir.join(".env");
// If a .env file exists at CARGO_MANIFEST_DIR, load environment variables from this,
// otherwise fallback to default dotenv file.
#[cfg_attr(not(procmacro2_semver_exempt), allow(unused_variables))]
let env_file = if env_path.exists() {
let res = dotenvy::from_path_iter(&env_path);
match res {
Ok(iter) => Some(iter),
Err(e) => panic!("failed to load environment from {env_path:?}, {e}"),
}
} else {
#[allow(unused_assignments)]
{
env_path = PathBuf::from(".env");
}
dotenvy::dotenv_iter().ok()
};
let mut offline = None;
let mut database_url = None;
let mut offline_dir = None;
if let Some(env_file) = env_file {
// tell the compiler to watch the `.env` for changes.
#[cfg(procmacro2_semver_exempt)]
if let Some(env_path) = env_path.to_str() {
proc_macro::tracked_path::path(env_path);
}
for item in env_file {
let Ok((key, value)) = item else {
continue;
};
match key.as_str() {
"DATABASE_URL" => database_url = Some(value),
"SQLX_OFFLINE" => offline = Some(value),
"SQLX_OFFLINE_DIR" => offline_dir = Some(value),
_ => {}
};
}
}
(database_url, offline, offline_dir)
}