Compare commits

..

No commits in common. "577cd76f2cb9182bd5f157317ec9e71122a7da82" and "7c27482d0809d2802293d63f0003102802ff07e2" have entirely different histories.

12 changed files with 89 additions and 194 deletions

View file

@ -381,7 +381,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]] [[package]]
name = "ferrtable" name = "ferrtable"
version = "0.0.3" version = "0.0.1"
dependencies = [ dependencies = [
"chrono", "chrono",
"derive_builder", "derive_builder",

View file

@ -32,7 +32,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new_from_access_token(&settings.access_token)?; let client = Client::new_from_access_token(&settings.access_token)?;
println!("Testing Client::list_bases()..."); println!("Testing Client::list_bases()...");
let mut bases = client.list_bases().stream_items()?; let mut bases = client.list_bases().build()?.stream_items();
while let Some(res) = bases.next().await { while let Some(res) = bases.next().await {
dbg!(res?); dbg!(res?);
} }
@ -44,17 +44,19 @@ async fn main() -> Result<(), Box<dyn Error>> {
notes: Some("Just for fun, no other reason.".to_owned()), notes: Some("Just for fun, no other reason.".to_owned()),
status: Some(RecordStatus::InProgress), status: Some(RecordStatus::InProgress),
}]) }])
.with_base_id(&settings.base_id) .with_base_id(settings.base_id.clone())
.with_table_id(&settings.table_id) .with_table_id(settings.table_id.clone())
.build()?
.execute() .execute()
.await?; .await?;
println!("Testing Client::list_records()..."); println!("Testing Client::list_records()...");
let records = client let records = client
.list_records() .list_records()
.with_base_id(&settings.base_id) .with_base_id(settings.base_id.clone())
.with_table_id(&settings.table_id) .with_table_id(settings.table_id.clone())
.stream_items::<TestRecord>()? .build()?
.stream_items::<TestRecord>()
.collect::<Vec<_>>() .collect::<Vec<_>>()
.await .await
.into_iter() .into_iter()
@ -65,18 +67,20 @@ async fn main() -> Result<(), Box<dyn Error>> {
let record = client let record = client
.get_record() .get_record()
.with_base_id(&settings.base_id) .with_base_id(settings.base_id.clone())
.with_table_id(&settings.table_id) .with_table_id(settings.table_id.clone())
.with_record_id("does_not_exist") .with_record_id("does_not_exist".to_string())
.build()?
.fetch_optional::<TestRecord>() .fetch_optional::<TestRecord>()
.await?; .await?;
assert!(record.is_none()); assert!(record.is_none());
let record = client let record = client
.get_record() .get_record()
.with_base_id(&settings.base_id) .with_base_id(settings.base_id.clone())
.with_table_id(&settings.table_id) .with_table_id(settings.table_id.clone())
.with_record_id(&records.first().unwrap().id) .with_record_id(records.first().unwrap().id.clone())
.build()?
.fetch_optional::<TestRecord>() .fetch_optional::<TestRecord>()
.await?; .await?;
dbg!(record); dbg!(record);

View file

@ -1,6 +1,6 @@
[package] [package]
name = "ferrtable" name = "ferrtable"
version = "0.0.3" version = "0.0.2"
categories = ["api-bindings"] categories = ["api-bindings"]
description = "Ferris the crab's favorite Airtable library" description = "Ferris the crab's favorite Airtable library"
homepage = "https://forge.secondsystemtech.com/brent/ferrtable" homepage = "https://forge.secondsystemtech.com/brent/ferrtable"

View file

@ -7,16 +7,16 @@ that implement the `Clone`, `serde::Deserialize`, and `serde::Serialize` traits.
This crate follows in the footsteps of the This crate follows in the footsteps of the
[airtable-api](https://crates.io/crates/airtable-api) crate from Oxide Computer [airtable-api](https://crates.io/crates/airtable-api) crate from Oxide Computer
Company, which appears to have been archived and unmaintained since 2022. By Company, which appears to have been archived and unmaintained since 2022.
comparison, Ferrtable aims to provide a more flexible and expressive client By comparison, Ferrtable aims to provide a more flexible and expressive client
interface as well as greater control over paginated responses with the help of interface as well as greater control over paginated responses with the help of
async [streams](https://doc.rust-lang.org/book/ch17-04-streams.html). async [streams](https://doc.rust-lang.org/book/ch17-04-streams.html).
## Status: Work in Progress ## Status: Work in Progress
Only a limited set of operations (e.g., creating and listing records) are Only a limited set of operations (e.g., creating and listing records) are
currently supported. The goal is to implement coverage for at least the full set currently supported. The goal is to implement coverage for at least the full
of non-enterprise API endpoints, but my initial emphasis is on getting a set of non-enterprise API endpoints, but my initial emphasis is on getting a
relatively small subset built and tested well. relatively small subset built and tested well.
## Usage ## Usage
@ -68,17 +68,19 @@ async fn main() -> Result<(), Box<dyn Error>> {
status: Status::InProgress, status: Status::InProgress,
attachments: vec![], attachments: vec![],
}]) }])
.with_base_id("***") .with_base_id("***".to_owned())
.with_table_id("***") .with_table_id("***".to_owned())
.build()?
.execute() .execute()
.await?; .await?;
let mut rec_stream = client let mut rec_stream = client
.list_records() .list_records()
.with_base_id("***") .with_base_id("***".to_owned())
.with_table_id("***") .with_table_id("***".to_owned())
.with_filter("{status} = 'Todo' || {status} = 'In Progress'") .with_filter("{status} = 'Todo' || {status} = 'In Progress'".to_owned())
.stream_items::<MyRecord>()?; .build()?
.stream_items::<MyRecord>();
while let Some(result) = rec_stream.next().await { while let Some(result) = rec_stream.next().await {
dbg!(result?.fields); dbg!(result?.fields);
@ -87,57 +89,3 @@ async fn main() -> Result<(), Box<dyn Error>> {
Ok(()) Ok(())
} }
``` ```
## Crate Release Process
Maintainers: Eventually the release process should be automated, but until CI
runners are available for the repo, follow this checklist.
1. Lint:
```sh
cd ferrtable
cargo fmt --check
cargo clippy
cd ../ferrtable-test
cargo fmt --check
cargo clippy
```
2. Run integration tests:
```sh
cd ferrtable-test
cat <<EOF
access_token = "<AIRTABLE ACCESS TOKEN>"
base_id = "<BASE ID>"
table_id = "<TABLE ID>"
EOF > ferrtable-test.config.toml
cargo run
```
3. Run unit tests:
```sh
cd ferrtable
# At the time of this writing, nextest doesn't actually have anything to run,
# but it may in the future as tests are added outside of doc comments.
cargo nextest run --all-features --no-tests=warn
cargo test --doc
```
4. Bump `version` in `Cargo.toml`.
5. Update main package version in `ferrtable-test` lockfile:
```sh
cd ferrtable-test
cargo build
```
6. Commit and push changes.
7. Publish:
```sh
cargo publish
```

View file

@ -57,8 +57,9 @@ impl Client {
/// ("status".to_owned(), "In progress".to_owned()), /// ("status".to_owned(), "In progress".to_owned()),
/// ]), /// ]),
/// ]) /// ])
/// .with_base_id("***") /// .with_base_id("***".to_owned())
/// .with_table_id("***") /// .with_table_id("***".to_owned())
/// .build()?
/// .execute() /// .execute()
/// .await?; /// .await?;
/// # Ok(()) /// # Ok(())
@ -93,9 +94,10 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?; /// # let client = Client::new_from_access_token("*****")?;
/// let result = client /// let result = client
/// .get_record() /// .get_record()
/// .with_base_id("***") /// .with_base_id("***".to_owned())
/// .with_table_id("***") /// .with_table_id("***".to_owned())
/// .with_record_id("***") /// .with_record_id("***".to_owned())
/// .build()?
/// .fetch_optional::<HashMap<String, String>>() /// .fetch_optional::<HashMap<String, String>>()
/// .await?; /// .await?;
/// dbg!(result); /// dbg!(result);
@ -122,7 +124,8 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?; /// # let client = Client::new_from_access_token("*****")?;
/// let mut base_stream = client /// let mut base_stream = client
/// .list_bases() /// .list_bases()
/// .stream_items()?; /// .build()?
/// .stream_items();
/// ///
/// while let Some(result) = base_stream.next().await { /// while let Some(result) = base_stream.next().await {
/// dbg!(result?); /// dbg!(result?);
@ -152,9 +155,10 @@ impl Client {
/// # let client = Client::new_from_access_token("*****")?; /// # let client = Client::new_from_access_token("*****")?;
/// let mut rec_stream = client /// let mut rec_stream = client
/// .list_records() /// .list_records()
/// .with_base_id("***") /// .with_base_id("***".to_owned())
/// .with_table_id("***") /// .with_table_id("***".to_owned())
/// .stream_items::<HashMap<String, serde_json::Value>>()?; /// .build()?
/// .stream_items::<HashMap<String, serde_json::Value>>();
/// ///
/// while let Some(result) = rec_stream.next().await { /// while let Some(result) = rec_stream.next().await {
/// dbg!(result?.fields); /// dbg!(result?.fields);

View file

@ -4,23 +4,14 @@ use derive_builder::Builder;
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode}; use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde::{Deserialize, Serialize, de::DeserializeOwned};
use crate::{ use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
client::Client,
errors::{ExecutionError, Result},
types::AirtableRecord,
};
#[derive(Builder, Clone, Debug)] #[derive(Builder, Clone, Debug)]
#[builder( #[builder(pattern = "owned", setter(prefix = "with"))]
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct CreateRecordsQuery<T> pub struct CreateRecordsQuery<T>
where where
T: Serialize, T: Serialize,
{ {
#[builder(setter(into))]
base_id: String, base_id: String,
#[builder(vis = "pub(crate)")] #[builder(vis = "pub(crate)")]
@ -29,7 +20,6 @@ where
#[builder(vis = "pub(crate)")] #[builder(vis = "pub(crate)")]
records: Vec<T>, records: Vec<T>,
#[builder(setter(into))]
table_id: String, table_id: String,
} }
@ -54,7 +44,7 @@ pub struct CreateRecordsDetails {
pub reasons: Vec<String>, pub reasons: Vec<String>,
} }
impl<T> CreateRecordsQueryBuilder<T> impl<T> CreateRecordsQuery<T>
where where
T: Clone + Debug + DeserializeOwned + Serialize, T: Clone + Debug + DeserializeOwned + Serialize,
{ {
@ -64,9 +54,7 @@ where
/// underlying `reqwest::Error`. This may be improved in future releases /// underlying `reqwest::Error`. This may be improved in future releases
/// to better differentiate between network, serialization, deserialization, /// to better differentiate between network, serialization, deserialization,
/// and API errors. /// and API errors.
pub async fn execute(self) -> Result<CreateRecordsResponse<T>> { pub async fn execute(self) -> Result<CreateRecordsResponse<T>, ExecutionError> {
let query = self.build()?;
#[derive(Serialize)] #[derive(Serialize)]
struct Record<RT: Serialize> { struct Record<RT: Serialize> {
fields: RT, fields: RT,
@ -75,14 +63,13 @@ where
struct RequestBody<RT: Serialize> { struct RequestBody<RT: Serialize> {
records: Vec<Record<RT>>, records: Vec<Record<RT>>,
} }
let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
let base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC).to_string(); let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
let table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC).to_string(); let http_resp = self
let http_resp = query
.client .client
.post_path(&format!("v0/{base_id}/{table_id}")) .post_path(&format!("v0/{base_id}/{table_id}"))
.json(&RequestBody { .json(&RequestBody {
records: query records: self
.records .records
.into_iter() .into_iter()
.map(|rec| Record { fields: rec }) .map(|rec| Record { fields: rec })

View file

@ -1,13 +1,9 @@
use thiserror::Error; use thiserror::Error;
/// Errors that may occur when making an Airtable API request.
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum ExecutionError { pub enum ExecutionError {
#[error("error making http request to airtable api: {0}")] #[error("error making http request to airtable api: {0}")]
Reqwest(reqwest::Error), Reqwest(reqwest::Error),
#[error("incomplete airtable api request information: {0}")]
Builder(derive_builder::UninitializedFieldError),
} }
impl From<reqwest::Error> for ExecutionError { impl From<reqwest::Error> for ExecutionError {
@ -15,16 +11,3 @@ impl From<reqwest::Error> for ExecutionError {
Self::Reqwest(value) Self::Reqwest(value)
} }
} }
// In addition to the self-evident purpose of type conversion, this allows our
// type to be specified as the error type for auto-generated `build()` methods,
// by annotating the relevant struct declaration with
// `#[builder(build_fn(error = "crate::errors::ExecutionError"))]`.
impl From<derive_builder::UninitializedFieldError> for ExecutionError {
fn from(value: derive_builder::UninitializedFieldError) -> Self {
Self::Builder(value)
}
}
// Custom `Result` type helps to make complex method signatures more concise.
pub type Result<T> = std::result::Result<T, ExecutionError>;

View file

@ -4,50 +4,36 @@ use derive_builder::Builder;
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode}; use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use serde::{Serialize, de::DeserializeOwned}; use serde::{Serialize, de::DeserializeOwned};
use crate::{ use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
client::Client,
errors::{ExecutionError, Result},
types::AirtableRecord,
};
#[derive(Builder, Clone, Debug, Serialize)] #[derive(Builder, Clone, Debug, Serialize)]
#[builder( #[builder(pattern = "owned", setter(prefix = "with"))]
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct GetRecordQuery { pub struct GetRecordQuery {
#[builder(setter(into))]
#[serde(skip)] #[serde(skip)]
base_id: String, base_id: String,
#[builder(vis = "pub(crate)")]
#[serde(skip)] #[serde(skip)]
#[builder(vis = "pub(crate)")]
client: Client, client: Client,
#[builder(setter(into))]
#[serde(skip)] #[serde(skip)]
record_id: String, record_id: String,
#[builder(setter(into))]
#[serde(skip)] #[serde(skip)]
table_id: String, table_id: String,
} }
impl GetRecordQueryBuilder { impl GetRecordQuery {
pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>> pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>, ExecutionError>
where where
T: Clone + Debug + DeserializeOwned, T: Clone + Debug + DeserializeOwned,
{ {
let query = self.build()?; let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
let http_resp = query let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
let record_id = utf8_percent_encode(&self.record_id, NON_ALPHANUMERIC).to_string();
let http_resp = self
.client .client
.get_path(&format!( .get_path(&format!("v0/{base_id}/{table_id}/{record_id}"))
"v0/{base_id}/{table_id}/{record_id}",
base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC),
table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC),
record_id = utf8_percent_encode(&query.record_id, NON_ALPHANUMERIC),
))
.send() .send()
.await?; .await?;
match http_resp.error_for_status() { match http_resp.error_for_status() {

View file

@ -55,17 +55,19 @@
//! status: Status::InProgress, //! status: Status::InProgress,
//! attachments: vec![], //! attachments: vec![],
//! }]) //! }])
//! .with_base_id("***") //! .with_base_id("***".to_owned())
//! .with_table_id("***") //! .with_table_id("***".to_owned())
//! .build()?
//! .execute() //! .execute()
//! .await?; //! .await?;
//! //!
//! let mut rec_stream = client //! let mut rec_stream = client
//! .list_records() //! .list_records()
//! .with_base_id("***") //! .with_base_id("***".to_owned())
//! .with_table_id("***") //! .with_table_id("***".to_owned())
//! .with_filter("{status} = 'Todo' || {status} = 'In Progress'") //! .with_filter(Some("{status} = 'Todo' || {status} = 'In Progress'".to_owned()))
//! .stream_items::<MyRecord>()?; //! .build()?
//! .stream_items::<MyRecord>();
//! //!
//! while let Some(result) = rec_stream.next().await { //! while let Some(result) = rec_stream.next().await {
//! dbg!(result?.fields); //! dbg!(result?.fields);

View file

@ -6,16 +6,12 @@ use serde::{Deserialize, Serialize};
use crate::{ use crate::{
client::Client, client::Client,
errors::{ExecutionError, Result}, errors::ExecutionError,
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated}, pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
}; };
#[derive(Builder, Clone, Debug, Serialize)] #[derive(Builder, Clone, Debug, Serialize)]
#[builder( #[builder(pattern = "owned", setter(prefix = "with"))]
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct ListBasesQuery { pub struct ListBasesQuery {
#[serde(skip)] #[serde(skip)]
#[builder(vis = "pub(crate)")] #[builder(vis = "pub(crate)")]
@ -43,10 +39,9 @@ impl PaginatedQuery<Base, ListBasesResponse> for ListBasesQuery {
} }
} }
impl ListBasesQueryBuilder { impl ListBasesQuery {
pub fn stream_items(self) -> Result<Pin<Box<impl Stream<Item = Result<Base>>>>> { pub fn stream_items(self) -> Pin<Box<impl Stream<Item = Result<Base, ExecutionError>>>> {
self.build() execute_paginated::<Base, ListBasesResponse>(self)
.map(execute_paginated::<Base, ListBasesResponse>)
} }
} }

View file

@ -7,30 +7,25 @@ use serde::{Deserialize, Serialize, de::DeserializeOwned};
use crate::{ use crate::{
client::Client, client::Client,
errors::{ExecutionError, Result}, errors::ExecutionError,
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated}, pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
types::AirtableRecord, types::AirtableRecord,
}; };
#[derive(Builder, Clone, Debug, Serialize)] #[derive(Builder, Clone, Debug, Serialize)]
#[builder( #[builder(pattern = "owned", setter(prefix = "with"))]
build_fn(error = "ExecutionError", private),
pattern = "owned",
setter(prefix = "with")
)]
pub struct ListRecordsQuery { pub struct ListRecordsQuery {
#[builder(setter(into))]
#[serde(skip)] #[serde(skip)]
base_id: String, base_id: String,
#[builder(vis = "pub(crate)")]
#[serde(skip)] #[serde(skip)]
#[builder(vis = "pub(crate)")]
client: Client, client: Client,
/// Only data for fields whose names or IDs are in this list will be /// Only data for fields whose names or IDs are in this list will be
/// included in the result. If you don't need every field, you can use this /// included in the result. If you don't need every field, you can use this
/// parameter to reduce the amount of data transferred. /// parameter to reduce the amount of data transferred.
#[builder(default, setter(into, strip_option))] #[builder(default)]
fields: Option<Vec<String>>, fields: Option<Vec<String>>,
/// A formula used to filter records. The formula will be evaluated for /// A formula used to filter records. The formula will be evaluated for
@ -39,7 +34,7 @@ pub struct ListRecordsQuery {
/// ///
/// If combined with the view parameter, only records in that view which /// If combined with the view parameter, only records in that view which
/// satisfy the formula will be returned. /// satisfy the formula will be returned.
#[builder(default, setter(into, strip_option))] #[builder(default)]
// filterByFormula is renamed so that the builder method, that is, // filterByFormula is renamed so that the builder method, that is,
// `.with_filter()`, reads more cleanly. // `.with_filter()`, reads more cleanly.
#[serde(rename = "filterByFormula")] #[serde(rename = "filterByFormula")]
@ -48,11 +43,10 @@ pub struct ListRecordsQuery {
#[builder(default, private)] #[builder(default, private)]
offset: Option<String>, offset: Option<String>,
#[builder(default, setter(into, strip_option))]
#[serde(rename = "pageSize")] #[serde(rename = "pageSize")]
#[builder(default)]
page_size: Option<usize>, page_size: Option<usize>,
#[builder(setter(into))]
#[serde(skip)] #[serde(skip)]
table_id: String, table_id: String,
} }
@ -78,13 +72,14 @@ where
} }
} }
impl ListRecordsQueryBuilder { impl ListRecordsQuery {
pub fn stream_items<T>(self) -> Result<Pin<Box<impl Stream<Item = Result<AirtableRecord<T>>>>>> pub fn stream_items<T>(
self,
) -> Pin<Box<impl Stream<Item = Result<AirtableRecord<T>, ExecutionError>>>>
where where
T: Clone + Debug + DeserializeOwned, T: Clone + Debug + DeserializeOwned,
{ {
self.build() execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>(self)
.map(execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>)
} }
} }

View file

@ -35,17 +35,8 @@ where
Q: Clone, Q: Clone,
T: Clone, T: Clone,
{ {
/// Records remaining from the most recently fetched page but not yet
/// yielded to the stream consumer. Subsequent page should not be fetched
/// until this collection has been fully consumed.
buffered: VecDeque<T>, buffered: VecDeque<T>,
/// Owned copy of the paginated query.
query: Q, query: Q,
/// When `query.offset` is `None`, this flag indicates whether that is
/// because the first page still needs to be fetched (`started` = `false`)
/// or because there are no pages remaining (`started` = `true`).
started: bool, started: bool,
} }
@ -70,9 +61,9 @@ macro_rules! handle_stream_err {
}; };
} }
// This could be folded into the `PaginatedQuery` trait as a default // This could be brought into PaginatedQuery as a default implementation, but
// implementation, but that forces that the traits in this module be exposed // that forces that the traits in this module be exposed outside of the crate
// outside of the crate and results in worse caller ergonomics overall. // and additionally results in worse client ergonomics overall.
pub(crate) fn execute_paginated<T, R>( pub(crate) fn execute_paginated<T, R>(
query: impl PaginatedQuery<T, R>, query: impl PaginatedQuery<T, R>,
) -> Pin<Box<impl Stream<Item = Result<T, ExecutionError>>>> ) -> Pin<Box<impl Stream<Item = Result<T, ExecutionError>>>>
@ -81,7 +72,7 @@ where
R: PaginatedResponse<T>, R: PaginatedResponse<T>,
{ {
// Stream has to be pinned to the heap so that the closure inside // Stream has to be pinned to the heap so that the closure inside
// doesn't need to implement Unpin. // doesn't need to implement Unpin (which I don't think it can).
Box::pin(futures::stream::unfold( Box::pin(futures::stream::unfold(
StreamState { StreamState {
buffered: VecDeque::new(), buffered: VecDeque::new(),