Compare commits

..

3 commits

Author SHA1 Message Date
Brent Schroeter
577cd76f2c bump version to v0.0.3 2026-01-11 20:43:23 +00:00
Brent Schroeter
b2d1bdcaac evaluate build() fns implicitly when executing 2026-01-11 20:42:42 +00:00
Brent Schroeter
4a76f13d58 improve documentation 2026-01-11 20:39:21 +00:00
12 changed files with 194 additions and 89 deletions

View file

@ -381,7 +381,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "ferrtable"
version = "0.0.1"
version = "0.0.3"
dependencies = [
"chrono",
"derive_builder",

View file

@ -32,7 +32,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new_from_access_token(&settings.access_token)?;
println!("Testing Client::list_bases()...");
let mut bases = client.list_bases().build()?.stream_items();
let mut bases = client.list_bases().stream_items()?;
while let Some(res) = bases.next().await {
dbg!(res?);
}
@ -44,19 +44,17 @@ async fn main() -> Result<(), Box<dyn Error>> {
notes: Some("Just for fun, no other reason.".to_owned()),
status: Some(RecordStatus::InProgress),
}])
.with_base_id(settings.base_id.clone())
.with_table_id(settings.table_id.clone())
.build()?
.with_base_id(&settings.base_id)
.with_table_id(&settings.table_id)
.execute()
.await?;
println!("Testing Client::list_records()...");
let records = client
.list_records()
.with_base_id(settings.base_id.clone())
.with_table_id(settings.table_id.clone())
.build()?
.stream_items::<TestRecord>()
.with_base_id(&settings.base_id)
.with_table_id(&settings.table_id)
.stream_items::<TestRecord>()?
.collect::<Vec<_>>()
.await
.into_iter()
@ -67,20 +65,18 @@ async fn main() -> Result<(), Box<dyn Error>> {
let record = client
.get_record()
.with_base_id(settings.base_id.clone())
.with_table_id(settings.table_id.clone())
.with_record_id("does_not_exist".to_string())
.build()?
.with_base_id(&settings.base_id)
.with_table_id(&settings.table_id)
.with_record_id("does_not_exist")
.fetch_optional::<TestRecord>()
.await?;
assert!(record.is_none());
let record = client
.get_record()
.with_base_id(settings.base_id.clone())
.with_table_id(settings.table_id.clone())
.with_record_id(records.first().unwrap().id.clone())
.build()?
.with_base_id(&settings.base_id)
.with_table_id(&settings.table_id)
.with_record_id(&records.first().unwrap().id)
.fetch_optional::<TestRecord>()
.await?;
dbg!(record);

View file

@ -1,6 +1,6 @@
[package]
name = "ferrtable"
version = "0.0.2"
version = "0.0.3"
categories = ["api-bindings"]
description = "Ferris the crab's favorite Airtable library"
homepage = "https://forge.secondsystemtech.com/brent/ferrtable"

View file

@ -7,16 +7,16 @@ that implement the `Clone`, `serde::Deserialize`, and `serde::Serialize` traits.
This crate follows in the footsteps of the
[airtable-api](https://crates.io/crates/airtable-api) crate from Oxide Computer
Company, which appears to have been archived and unmaintained since 2022.
By comparison, Ferrtable aims to provide a more flexible and expressive client
Company, which appears to have been archived and unmaintained since 2022. By
comparison, Ferrtable aims to provide a more flexible and expressive client
interface as well as greater control over paginated responses with the help of
async [streams](https://doc.rust-lang.org/book/ch17-04-streams.html).
## Status: Work in Progress
Only a limited set of operations (e.g., creating and listing records) are
currently supported. The goal is to implement coverage for at least the full
set of non-enterprise API endpoints, but my initial emphasis is on getting a
currently supported. The goal is to implement coverage for at least the full set
of non-enterprise API endpoints, but my initial emphasis is on getting a
relatively small subset built and tested well.
## Usage
@ -68,19 +68,17 @@ async fn main() -> Result<(), Box<dyn Error>> {
status: Status::InProgress,
attachments: vec![],
}])
.with_base_id("***".to_owned())
.with_table_id("***".to_owned())
.build()?
.with_base_id("***")
.with_table_id("***")
.execute()
.await?;
let mut rec_stream = client
.list_records()
.with_base_id("***".to_owned())
.with_table_id("***".to_owned())
.with_filter("{status} = 'Todo' || {status} = 'In Progress'".to_owned())
.build()?
.stream_items::<MyRecord>();
.with_base_id("***")
.with_table_id("***")
.with_filter("{status} = 'Todo' || {status} = 'In Progress'")
.stream_items::<MyRecord>()?;
while let Some(result) = rec_stream.next().await {
dbg!(result?.fields);
@ -89,3 +87,57 @@ async fn main() -> Result<(), Box<dyn Error>> {
Ok(())
}
```
## Crate Release Process
Maintainers: Eventually the release process should be automated, but until CI
runners are available for the repo, follow this checklist.
1. Lint:
```sh
cd ferrtable
cargo fmt --check
cargo clippy
cd ../ferrtable-test
cargo fmt --check
cargo clippy
```
2. Run integration tests:
```sh
cd ferrtable-test
cat <<EOF
access_token = "<AIRTABLE ACCESS TOKEN>"
base_id = "<BASE ID>"
table_id = "<TABLE ID>"
EOF > ferrtable-test.config.toml
cargo run
```
3. Run unit tests:
```sh
cd ferrtable
# At the time of this writing, nextest doesn't actually have anything to run,
# but it may in the future as tests are added outside of doc comments.
cargo nextest run --all-features --no-tests=warn
cargo test --doc
```
4. Bump `version` in `Cargo.toml`.
5. Update main package version in `ferrtable-test` lockfile:
```sh
cd ferrtable-test
cargo build
```
6. Commit and push changes.
7. Publish:
```sh
cargo publish
```

View file

@ -57,9 +57,8 @@ impl Client {
/// ("status".to_owned(), "In progress".to_owned()),
/// ]),
/// ])
/// .with_base_id("***".to_owned())
/// .with_table_id("***".to_owned())
/// .build()?
/// .with_base_id("***")
/// .with_table_id("***")
/// .execute()
/// .await?;
/// # Ok(())
@ -94,10 +93,9 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?;
/// let result = client
/// .get_record()
/// .with_base_id("***".to_owned())
/// .with_table_id("***".to_owned())
/// .with_record_id("***".to_owned())
/// .build()?
/// .with_base_id("***")
/// .with_table_id("***")
/// .with_record_id("***")
/// .fetch_optional::<HashMap<String, String>>()
/// .await?;
/// dbg!(result);
@ -124,8 +122,7 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?;
/// let mut base_stream = client
/// .list_bases()
/// .build()?
/// .stream_items();
/// .stream_items()?;
///
/// while let Some(result) = base_stream.next().await {
/// dbg!(result?);
@ -155,10 +152,9 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?;
/// let mut rec_stream = client
/// .list_records()
/// .with_base_id("***".to_owned())
/// .with_table_id("***".to_owned())
/// .build()?
/// .stream_items::<HashMap<String, serde_json::Value>>();
/// .with_base_id("***")
/// .with_table_id("***")
/// .stream_items::<HashMap<String, serde_json::Value>>()?;
///
/// while let Some(result) = rec_stream.next().await {
/// dbg!(result?.fields);

View file

@ -4,14 +4,23 @@ use derive_builder::Builder;
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
use crate::{
client::Client,
errors::{ExecutionError, Result},
types::AirtableRecord,
};
#[derive(Builder, Clone, Debug)]
#[builder(pattern = "owned", setter(prefix = "with"))]
#[builder(
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct CreateRecordsQuery<T>
where
T: Serialize,
{
#[builder(setter(into))]
base_id: String,
#[builder(vis = "pub(crate)")]
@ -20,6 +29,7 @@ where
#[builder(vis = "pub(crate)")]
records: Vec<T>,
#[builder(setter(into))]
table_id: String,
}
@ -44,7 +54,7 @@ pub struct CreateRecordsDetails {
pub reasons: Vec<String>,
}
impl<T> CreateRecordsQuery<T>
impl<T> CreateRecordsQueryBuilder<T>
where
T: Clone + Debug + DeserializeOwned + Serialize,
{
@ -54,7 +64,9 @@ where
/// underlying `reqwest::Error`. This may be improved in future releases
/// to better differentiate between network, serialization, deserialization,
/// and API errors.
pub async fn execute(self) -> Result<CreateRecordsResponse<T>, ExecutionError> {
pub async fn execute(self) -> Result<CreateRecordsResponse<T>> {
let query = self.build()?;
#[derive(Serialize)]
struct Record<RT: Serialize> {
fields: RT,
@ -63,13 +75,14 @@ where
struct RequestBody<RT: Serialize> {
records: Vec<Record<RT>>,
}
let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
let http_resp = self
let base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC).to_string();
let table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC).to_string();
let http_resp = query
.client
.post_path(&format!("v0/{base_id}/{table_id}"))
.json(&RequestBody {
records: self
records: query
.records
.into_iter()
.map(|rec| Record { fields: rec })

View file

@ -1,9 +1,13 @@
use thiserror::Error;
/// Errors that may occur when making an Airtable API request.
#[derive(Debug, Error)]
pub enum ExecutionError {
#[error("error making http request to airtable api: {0}")]
Reqwest(reqwest::Error),
#[error("incomplete airtable api request information: {0}")]
Builder(derive_builder::UninitializedFieldError),
}
impl From<reqwest::Error> for ExecutionError {
@ -11,3 +15,16 @@ impl From<reqwest::Error> for ExecutionError {
Self::Reqwest(value)
}
}
// In addition to the self-evident purpose of type conversion, this allows our
// type to be specified as the error type for auto-generated `build()` methods,
// by annotating the relevant struct declaration with
// `#[builder(build_fn(error = "crate::errors::ExecutionError"))]`.
impl From<derive_builder::UninitializedFieldError> for ExecutionError {
fn from(value: derive_builder::UninitializedFieldError) -> Self {
Self::Builder(value)
}
}
// Custom `Result` type helps to make complex method signatures more concise.
pub type Result<T> = std::result::Result<T, ExecutionError>;

View file

@ -4,36 +4,50 @@ use derive_builder::Builder;
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use serde::{Serialize, de::DeserializeOwned};
use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
use crate::{
client::Client,
errors::{ExecutionError, Result},
types::AirtableRecord,
};
#[derive(Builder, Clone, Debug, Serialize)]
#[builder(pattern = "owned", setter(prefix = "with"))]
#[builder(
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct GetRecordQuery {
#[builder(setter(into))]
#[serde(skip)]
base_id: String,
#[serde(skip)]
#[builder(vis = "pub(crate)")]
#[serde(skip)]
client: Client,
#[builder(setter(into))]
#[serde(skip)]
record_id: String,
#[builder(setter(into))]
#[serde(skip)]
table_id: String,
}
impl GetRecordQuery {
pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>, ExecutionError>
impl GetRecordQueryBuilder {
pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>>
where
T: Clone + Debug + DeserializeOwned,
{
let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
let record_id = utf8_percent_encode(&self.record_id, NON_ALPHANUMERIC).to_string();
let http_resp = self
let query = self.build()?;
let http_resp = query
.client
.get_path(&format!("v0/{base_id}/{table_id}/{record_id}"))
.get_path(&format!(
"v0/{base_id}/{table_id}/{record_id}",
base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC),
table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC),
record_id = utf8_percent_encode(&query.record_id, NON_ALPHANUMERIC),
))
.send()
.await?;
match http_resp.error_for_status() {

View file

@ -55,19 +55,17 @@
//! status: Status::InProgress,
//! attachments: vec![],
//! }])
//! .with_base_id("***".to_owned())
//! .with_table_id("***".to_owned())
//! .build()?
//! .with_base_id("***")
//! .with_table_id("***")
//! .execute()
//! .await?;
//!
//! let mut rec_stream = client
//! .list_records()
//! .with_base_id("***".to_owned())
//! .with_table_id("***".to_owned())
//! .with_filter(Some("{status} = 'Todo' || {status} = 'In Progress'".to_owned()))
//! .build()?
//! .stream_items::<MyRecord>();
//! .with_base_id("***")
//! .with_table_id("***")
//! .with_filter("{status} = 'Todo' || {status} = 'In Progress'")
//! .stream_items::<MyRecord>()?;
//!
//! while let Some(result) = rec_stream.next().await {
//! dbg!(result?.fields);

View file

@ -6,12 +6,16 @@ use serde::{Deserialize, Serialize};
use crate::{
client::Client,
errors::ExecutionError,
errors::{ExecutionError, Result},
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
};
#[derive(Builder, Clone, Debug, Serialize)]
#[builder(pattern = "owned", setter(prefix = "with"))]
#[builder(
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct ListBasesQuery {
#[serde(skip)]
#[builder(vis = "pub(crate)")]
@ -39,9 +43,10 @@ impl PaginatedQuery<Base, ListBasesResponse> for ListBasesQuery {
}
}
impl ListBasesQuery {
pub fn stream_items(self) -> Pin<Box<impl Stream<Item = Result<Base, ExecutionError>>>> {
execute_paginated::<Base, ListBasesResponse>(self)
impl ListBasesQueryBuilder {
pub fn stream_items(self) -> Result<Pin<Box<impl Stream<Item = Result<Base>>>>> {
self.build()
.map(execute_paginated::<Base, ListBasesResponse>)
}
}

View file

@ -7,25 +7,30 @@ use serde::{Deserialize, Serialize, de::DeserializeOwned};
use crate::{
client::Client,
errors::ExecutionError,
errors::{ExecutionError, Result},
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
types::AirtableRecord,
};
#[derive(Builder, Clone, Debug, Serialize)]
#[builder(pattern = "owned", setter(prefix = "with"))]
#[builder(
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct ListRecordsQuery {
#[builder(setter(into))]
#[serde(skip)]
base_id: String,
#[serde(skip)]
#[builder(vis = "pub(crate)")]
#[serde(skip)]
client: Client,
/// Only data for fields whose names or IDs are in this list will be
/// included in the result. If you don't need every field, you can use this
/// parameter to reduce the amount of data transferred.
#[builder(default)]
#[builder(default, setter(into, strip_option))]
fields: Option<Vec<String>>,
/// A formula used to filter records. The formula will be evaluated for
@ -34,7 +39,7 @@ pub struct ListRecordsQuery {
///
/// If combined with the view parameter, only records in that view which
/// satisfy the formula will be returned.
#[builder(default)]
#[builder(default, setter(into, strip_option))]
// filterByFormula is renamed so that the builder method, that is,
// `.with_filter()`, reads more cleanly.
#[serde(rename = "filterByFormula")]
@ -43,10 +48,11 @@ pub struct ListRecordsQuery {
#[builder(default, private)]
offset: Option<String>,
#[builder(default, setter(into, strip_option))]
#[serde(rename = "pageSize")]
#[builder(default)]
page_size: Option<usize>,
#[builder(setter(into))]
#[serde(skip)]
table_id: String,
}
@ -72,14 +78,13 @@ where
}
}
impl ListRecordsQuery {
pub fn stream_items<T>(
self,
) -> Pin<Box<impl Stream<Item = Result<AirtableRecord<T>, ExecutionError>>>>
impl ListRecordsQueryBuilder {
pub fn stream_items<T>(self) -> Result<Pin<Box<impl Stream<Item = Result<AirtableRecord<T>>>>>>
where
T: Clone + Debug + DeserializeOwned,
{
execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>(self)
self.build()
.map(execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>)
}
}

View file

@ -35,8 +35,17 @@ where
Q: Clone,
T: Clone,
{
/// Records remaining from the most recently fetched page but not yet
/// yielded to the stream consumer. Subsequent page should not be fetched
/// until this collection has been fully consumed.
buffered: VecDeque<T>,
/// Owned copy of the paginated query.
query: Q,
/// When `query.offset` is `None`, this flag indicates whether that is
/// because the first page still needs to be fetched (`started` = `false`)
/// or because there are no pages remaining (`started` = `true`).
started: bool,
}
@ -61,9 +70,9 @@ macro_rules! handle_stream_err {
};
}
// This could be brought into PaginatedQuery as a default implementation, but
// that forces that the traits in this module be exposed outside of the crate
// and additionally results in worse client ergonomics overall.
// This could be folded into the `PaginatedQuery` trait as a default
// implementation, but that forces that the traits in this module be exposed
// outside of the crate and results in worse caller ergonomics overall.
pub(crate) fn execute_paginated<T, R>(
query: impl PaginatedQuery<T, R>,
) -> Pin<Box<impl Stream<Item = Result<T, ExecutionError>>>>
@ -72,7 +81,7 @@ where
R: PaginatedResponse<T>,
{
// Stream has to be pinned to the heap so that the closure inside
// doesn't need to implement Unpin (which I don't think it can).
// doesn't need to implement Unpin.
Box::pin(futures::stream::unfold(
StreamState {
buffered: VecDeque::new(),