Compare commits
3 commits
7c27482d08
...
577cd76f2c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
577cd76f2c | ||
|
|
b2d1bdcaac | ||
|
|
4a76f13d58 |
12 changed files with 194 additions and 89 deletions
2
ferrtable-test/Cargo.lock
generated
2
ferrtable-test/Cargo.lock
generated
|
|
@ -381,7 +381,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ferrtable"
|
name = "ferrtable"
|
||||||
version = "0.0.1"
|
version = "0.0.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"derive_builder",
|
"derive_builder",
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
let client = Client::new_from_access_token(&settings.access_token)?;
|
let client = Client::new_from_access_token(&settings.access_token)?;
|
||||||
|
|
||||||
println!("Testing Client::list_bases()...");
|
println!("Testing Client::list_bases()...");
|
||||||
let mut bases = client.list_bases().build()?.stream_items();
|
let mut bases = client.list_bases().stream_items()?;
|
||||||
while let Some(res) = bases.next().await {
|
while let Some(res) = bases.next().await {
|
||||||
dbg!(res?);
|
dbg!(res?);
|
||||||
}
|
}
|
||||||
|
|
@ -44,19 +44,17 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
notes: Some("Just for fun, no other reason.".to_owned()),
|
notes: Some("Just for fun, no other reason.".to_owned()),
|
||||||
status: Some(RecordStatus::InProgress),
|
status: Some(RecordStatus::InProgress),
|
||||||
}])
|
}])
|
||||||
.with_base_id(settings.base_id.clone())
|
.with_base_id(&settings.base_id)
|
||||||
.with_table_id(settings.table_id.clone())
|
.with_table_id(&settings.table_id)
|
||||||
.build()?
|
|
||||||
.execute()
|
.execute()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
println!("Testing Client::list_records()...");
|
println!("Testing Client::list_records()...");
|
||||||
let records = client
|
let records = client
|
||||||
.list_records()
|
.list_records()
|
||||||
.with_base_id(settings.base_id.clone())
|
.with_base_id(&settings.base_id)
|
||||||
.with_table_id(settings.table_id.clone())
|
.with_table_id(&settings.table_id)
|
||||||
.build()?
|
.stream_items::<TestRecord>()?
|
||||||
.stream_items::<TestRecord>()
|
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.await
|
.await
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|
@ -67,20 +65,18 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
|
|
||||||
let record = client
|
let record = client
|
||||||
.get_record()
|
.get_record()
|
||||||
.with_base_id(settings.base_id.clone())
|
.with_base_id(&settings.base_id)
|
||||||
.with_table_id(settings.table_id.clone())
|
.with_table_id(&settings.table_id)
|
||||||
.with_record_id("does_not_exist".to_string())
|
.with_record_id("does_not_exist")
|
||||||
.build()?
|
|
||||||
.fetch_optional::<TestRecord>()
|
.fetch_optional::<TestRecord>()
|
||||||
.await?;
|
.await?;
|
||||||
assert!(record.is_none());
|
assert!(record.is_none());
|
||||||
|
|
||||||
let record = client
|
let record = client
|
||||||
.get_record()
|
.get_record()
|
||||||
.with_base_id(settings.base_id.clone())
|
.with_base_id(&settings.base_id)
|
||||||
.with_table_id(settings.table_id.clone())
|
.with_table_id(&settings.table_id)
|
||||||
.with_record_id(records.first().unwrap().id.clone())
|
.with_record_id(&records.first().unwrap().id)
|
||||||
.build()?
|
|
||||||
.fetch_optional::<TestRecord>()
|
.fetch_optional::<TestRecord>()
|
||||||
.await?;
|
.await?;
|
||||||
dbg!(record);
|
dbg!(record);
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "ferrtable"
|
name = "ferrtable"
|
||||||
version = "0.0.2"
|
version = "0.0.3"
|
||||||
categories = ["api-bindings"]
|
categories = ["api-bindings"]
|
||||||
description = "Ferris the crab's favorite Airtable library"
|
description = "Ferris the crab's favorite Airtable library"
|
||||||
homepage = "https://forge.secondsystemtech.com/brent/ferrtable"
|
homepage = "https://forge.secondsystemtech.com/brent/ferrtable"
|
||||||
|
|
|
||||||
|
|
@ -7,16 +7,16 @@ that implement the `Clone`, `serde::Deserialize`, and `serde::Serialize` traits.
|
||||||
|
|
||||||
This crate follows in the footsteps of the
|
This crate follows in the footsteps of the
|
||||||
[airtable-api](https://crates.io/crates/airtable-api) crate from Oxide Computer
|
[airtable-api](https://crates.io/crates/airtable-api) crate from Oxide Computer
|
||||||
Company, which appears to have been archived and unmaintained since 2022.
|
Company, which appears to have been archived and unmaintained since 2022. By
|
||||||
By comparison, Ferrtable aims to provide a more flexible and expressive client
|
comparison, Ferrtable aims to provide a more flexible and expressive client
|
||||||
interface as well as greater control over paginated responses with the help of
|
interface as well as greater control over paginated responses with the help of
|
||||||
async [streams](https://doc.rust-lang.org/book/ch17-04-streams.html).
|
async [streams](https://doc.rust-lang.org/book/ch17-04-streams.html).
|
||||||
|
|
||||||
## Status: Work in Progress
|
## Status: Work in Progress
|
||||||
|
|
||||||
Only a limited set of operations (e.g., creating and listing records) are
|
Only a limited set of operations (e.g., creating and listing records) are
|
||||||
currently supported. The goal is to implement coverage for at least the full
|
currently supported. The goal is to implement coverage for at least the full set
|
||||||
set of non-enterprise API endpoints, but my initial emphasis is on getting a
|
of non-enterprise API endpoints, but my initial emphasis is on getting a
|
||||||
relatively small subset built and tested well.
|
relatively small subset built and tested well.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
@ -68,19 +68,17 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
status: Status::InProgress,
|
status: Status::InProgress,
|
||||||
attachments: vec![],
|
attachments: vec![],
|
||||||
}])
|
}])
|
||||||
.with_base_id("***".to_owned())
|
.with_base_id("***")
|
||||||
.with_table_id("***".to_owned())
|
.with_table_id("***")
|
||||||
.build()?
|
|
||||||
.execute()
|
.execute()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mut rec_stream = client
|
let mut rec_stream = client
|
||||||
.list_records()
|
.list_records()
|
||||||
.with_base_id("***".to_owned())
|
.with_base_id("***")
|
||||||
.with_table_id("***".to_owned())
|
.with_table_id("***")
|
||||||
.with_filter("{status} = 'Todo' || {status} = 'In Progress'".to_owned())
|
.with_filter("{status} = 'Todo' || {status} = 'In Progress'")
|
||||||
.build()?
|
.stream_items::<MyRecord>()?;
|
||||||
.stream_items::<MyRecord>();
|
|
||||||
|
|
||||||
while let Some(result) = rec_stream.next().await {
|
while let Some(result) = rec_stream.next().await {
|
||||||
dbg!(result?.fields);
|
dbg!(result?.fields);
|
||||||
|
|
@ -89,3 +87,57 @@ async fn main() -> Result<(), Box<dyn Error>> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Crate Release Process
|
||||||
|
|
||||||
|
Maintainers: Eventually the release process should be automated, but until CI
|
||||||
|
runners are available for the repo, follow this checklist.
|
||||||
|
|
||||||
|
1. Lint:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ferrtable
|
||||||
|
cargo fmt --check
|
||||||
|
cargo clippy
|
||||||
|
|
||||||
|
cd ../ferrtable-test
|
||||||
|
cargo fmt --check
|
||||||
|
cargo clippy
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Run integration tests:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ferrtable-test
|
||||||
|
cat <<EOF
|
||||||
|
access_token = "<AIRTABLE ACCESS TOKEN>"
|
||||||
|
base_id = "<BASE ID>"
|
||||||
|
table_id = "<TABLE ID>"
|
||||||
|
EOF > ferrtable-test.config.toml
|
||||||
|
cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Run unit tests:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ferrtable
|
||||||
|
# At the time of this writing, nextest doesn't actually have anything to run,
|
||||||
|
# but it may in the future as tests are added outside of doc comments.
|
||||||
|
cargo nextest run --all-features --no-tests=warn
|
||||||
|
cargo test --doc
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Bump `version` in `Cargo.toml`.
|
||||||
|
5. Update main package version in `ferrtable-test` lockfile:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd ferrtable-test
|
||||||
|
cargo build
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Commit and push changes.
|
||||||
|
7. Publish:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cargo publish
|
||||||
|
```
|
||||||
|
|
|
||||||
|
|
@ -57,9 +57,8 @@ impl Client {
|
||||||
/// ("status".to_owned(), "In progress".to_owned()),
|
/// ("status".to_owned(), "In progress".to_owned()),
|
||||||
/// ]),
|
/// ]),
|
||||||
/// ])
|
/// ])
|
||||||
/// .with_base_id("***".to_owned())
|
/// .with_base_id("***")
|
||||||
/// .with_table_id("***".to_owned())
|
/// .with_table_id("***")
|
||||||
/// .build()?
|
|
||||||
/// .execute()
|
/// .execute()
|
||||||
/// .await?;
|
/// .await?;
|
||||||
/// # Ok(())
|
/// # Ok(())
|
||||||
|
|
@ -94,10 +93,9 @@ impl Client {
|
||||||
/// # let client = Client::new_from_access_token("*****")?;
|
/// # let client = Client::new_from_access_token("*****")?;
|
||||||
/// let result = client
|
/// let result = client
|
||||||
/// .get_record()
|
/// .get_record()
|
||||||
/// .with_base_id("***".to_owned())
|
/// .with_base_id("***")
|
||||||
/// .with_table_id("***".to_owned())
|
/// .with_table_id("***")
|
||||||
/// .with_record_id("***".to_owned())
|
/// .with_record_id("***")
|
||||||
/// .build()?
|
|
||||||
/// .fetch_optional::<HashMap<String, String>>()
|
/// .fetch_optional::<HashMap<String, String>>()
|
||||||
/// .await?;
|
/// .await?;
|
||||||
/// dbg!(result);
|
/// dbg!(result);
|
||||||
|
|
@ -124,8 +122,7 @@ impl Client {
|
||||||
/// # let client = Client::new_from_access_token("*****")?;
|
/// # let client = Client::new_from_access_token("*****")?;
|
||||||
/// let mut base_stream = client
|
/// let mut base_stream = client
|
||||||
/// .list_bases()
|
/// .list_bases()
|
||||||
/// .build()?
|
/// .stream_items()?;
|
||||||
/// .stream_items();
|
|
||||||
///
|
///
|
||||||
/// while let Some(result) = base_stream.next().await {
|
/// while let Some(result) = base_stream.next().await {
|
||||||
/// dbg!(result?);
|
/// dbg!(result?);
|
||||||
|
|
@ -155,10 +152,9 @@ impl Client {
|
||||||
/// # let client = Client::new_from_access_token("*****")?;
|
/// # let client = Client::new_from_access_token("*****")?;
|
||||||
/// let mut rec_stream = client
|
/// let mut rec_stream = client
|
||||||
/// .list_records()
|
/// .list_records()
|
||||||
/// .with_base_id("***".to_owned())
|
/// .with_base_id("***")
|
||||||
/// .with_table_id("***".to_owned())
|
/// .with_table_id("***")
|
||||||
/// .build()?
|
/// .stream_items::<HashMap<String, serde_json::Value>>()?;
|
||||||
/// .stream_items::<HashMap<String, serde_json::Value>>();
|
|
||||||
///
|
///
|
||||||
/// while let Some(result) = rec_stream.next().await {
|
/// while let Some(result) = rec_stream.next().await {
|
||||||
/// dbg!(result?.fields);
|
/// dbg!(result?.fields);
|
||||||
|
|
|
||||||
|
|
@ -4,14 +4,23 @@ use derive_builder::Builder;
|
||||||
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
|
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
|
||||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||||
|
|
||||||
use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
|
use crate::{
|
||||||
|
client::Client,
|
||||||
|
errors::{ExecutionError, Result},
|
||||||
|
types::AirtableRecord,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Builder, Clone, Debug)]
|
#[derive(Builder, Clone, Debug)]
|
||||||
#[builder(pattern = "owned", setter(prefix = "with"))]
|
#[builder(
|
||||||
|
build_fn(error = "ExecutionError", private),
|
||||||
|
pattern = "owned",
|
||||||
|
setter(prefix = "with")
|
||||||
|
)]
|
||||||
pub struct CreateRecordsQuery<T>
|
pub struct CreateRecordsQuery<T>
|
||||||
where
|
where
|
||||||
T: Serialize,
|
T: Serialize,
|
||||||
{
|
{
|
||||||
|
#[builder(setter(into))]
|
||||||
base_id: String,
|
base_id: String,
|
||||||
|
|
||||||
#[builder(vis = "pub(crate)")]
|
#[builder(vis = "pub(crate)")]
|
||||||
|
|
@ -20,6 +29,7 @@ where
|
||||||
#[builder(vis = "pub(crate)")]
|
#[builder(vis = "pub(crate)")]
|
||||||
records: Vec<T>,
|
records: Vec<T>,
|
||||||
|
|
||||||
|
#[builder(setter(into))]
|
||||||
table_id: String,
|
table_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -44,7 +54,7 @@ pub struct CreateRecordsDetails {
|
||||||
pub reasons: Vec<String>,
|
pub reasons: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> CreateRecordsQuery<T>
|
impl<T> CreateRecordsQueryBuilder<T>
|
||||||
where
|
where
|
||||||
T: Clone + Debug + DeserializeOwned + Serialize,
|
T: Clone + Debug + DeserializeOwned + Serialize,
|
||||||
{
|
{
|
||||||
|
|
@ -54,7 +64,9 @@ where
|
||||||
/// underlying `reqwest::Error`. This may be improved in future releases
|
/// underlying `reqwest::Error`. This may be improved in future releases
|
||||||
/// to better differentiate between network, serialization, deserialization,
|
/// to better differentiate between network, serialization, deserialization,
|
||||||
/// and API errors.
|
/// and API errors.
|
||||||
pub async fn execute(self) -> Result<CreateRecordsResponse<T>, ExecutionError> {
|
pub async fn execute(self) -> Result<CreateRecordsResponse<T>> {
|
||||||
|
let query = self.build()?;
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct Record<RT: Serialize> {
|
struct Record<RT: Serialize> {
|
||||||
fields: RT,
|
fields: RT,
|
||||||
|
|
@ -63,13 +75,14 @@ where
|
||||||
struct RequestBody<RT: Serialize> {
|
struct RequestBody<RT: Serialize> {
|
||||||
records: Vec<Record<RT>>,
|
records: Vec<Record<RT>>,
|
||||||
}
|
}
|
||||||
let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
|
|
||||||
let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
|
let base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC).to_string();
|
||||||
let http_resp = self
|
let table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC).to_string();
|
||||||
|
let http_resp = query
|
||||||
.client
|
.client
|
||||||
.post_path(&format!("v0/{base_id}/{table_id}"))
|
.post_path(&format!("v0/{base_id}/{table_id}"))
|
||||||
.json(&RequestBody {
|
.json(&RequestBody {
|
||||||
records: self
|
records: query
|
||||||
.records
|
.records
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|rec| Record { fields: rec })
|
.map(|rec| Record { fields: rec })
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,13 @@
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Errors that may occur when making an Airtable API request.
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum ExecutionError {
|
pub enum ExecutionError {
|
||||||
#[error("error making http request to airtable api: {0}")]
|
#[error("error making http request to airtable api: {0}")]
|
||||||
Reqwest(reqwest::Error),
|
Reqwest(reqwest::Error),
|
||||||
|
|
||||||
|
#[error("incomplete airtable api request information: {0}")]
|
||||||
|
Builder(derive_builder::UninitializedFieldError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<reqwest::Error> for ExecutionError {
|
impl From<reqwest::Error> for ExecutionError {
|
||||||
|
|
@ -11,3 +15,16 @@ impl From<reqwest::Error> for ExecutionError {
|
||||||
Self::Reqwest(value)
|
Self::Reqwest(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In addition to the self-evident purpose of type conversion, this allows our
|
||||||
|
// type to be specified as the error type for auto-generated `build()` methods,
|
||||||
|
// by annotating the relevant struct declaration with
|
||||||
|
// `#[builder(build_fn(error = "crate::errors::ExecutionError"))]`.
|
||||||
|
impl From<derive_builder::UninitializedFieldError> for ExecutionError {
|
||||||
|
fn from(value: derive_builder::UninitializedFieldError) -> Self {
|
||||||
|
Self::Builder(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom `Result` type helps to make complex method signatures more concise.
|
||||||
|
pub type Result<T> = std::result::Result<T, ExecutionError>;
|
||||||
|
|
|
||||||
|
|
@ -4,36 +4,50 @@ use derive_builder::Builder;
|
||||||
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
|
use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
|
|
||||||
use crate::{client::Client, errors::ExecutionError, types::AirtableRecord};
|
use crate::{
|
||||||
|
client::Client,
|
||||||
|
errors::{ExecutionError, Result},
|
||||||
|
types::AirtableRecord,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Builder, Clone, Debug, Serialize)]
|
#[derive(Builder, Clone, Debug, Serialize)]
|
||||||
#[builder(pattern = "owned", setter(prefix = "with"))]
|
#[builder(
|
||||||
|
build_fn(error = "ExecutionError", private),
|
||||||
|
pattern = "owned",
|
||||||
|
setter(prefix = "with")
|
||||||
|
)]
|
||||||
pub struct GetRecordQuery {
|
pub struct GetRecordQuery {
|
||||||
|
#[builder(setter(into))]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
base_id: String,
|
base_id: String,
|
||||||
|
|
||||||
#[serde(skip)]
|
|
||||||
#[builder(vis = "pub(crate)")]
|
#[builder(vis = "pub(crate)")]
|
||||||
|
#[serde(skip)]
|
||||||
client: Client,
|
client: Client,
|
||||||
|
|
||||||
|
#[builder(setter(into))]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
record_id: String,
|
record_id: String,
|
||||||
|
|
||||||
|
#[builder(setter(into))]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
table_id: String,
|
table_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GetRecordQuery {
|
impl GetRecordQueryBuilder {
|
||||||
pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>, ExecutionError>
|
pub async fn fetch_optional<T>(self) -> Result<Option<AirtableRecord<T>>>
|
||||||
where
|
where
|
||||||
T: Clone + Debug + DeserializeOwned,
|
T: Clone + Debug + DeserializeOwned,
|
||||||
{
|
{
|
||||||
let base_id = utf8_percent_encode(&self.base_id, NON_ALPHANUMERIC).to_string();
|
let query = self.build()?;
|
||||||
let table_id = utf8_percent_encode(&self.table_id, NON_ALPHANUMERIC).to_string();
|
let http_resp = query
|
||||||
let record_id = utf8_percent_encode(&self.record_id, NON_ALPHANUMERIC).to_string();
|
|
||||||
let http_resp = self
|
|
||||||
.client
|
.client
|
||||||
.get_path(&format!("v0/{base_id}/{table_id}/{record_id}"))
|
.get_path(&format!(
|
||||||
|
"v0/{base_id}/{table_id}/{record_id}",
|
||||||
|
base_id = utf8_percent_encode(&query.base_id, NON_ALPHANUMERIC),
|
||||||
|
table_id = utf8_percent_encode(&query.table_id, NON_ALPHANUMERIC),
|
||||||
|
record_id = utf8_percent_encode(&query.record_id, NON_ALPHANUMERIC),
|
||||||
|
))
|
||||||
.send()
|
.send()
|
||||||
.await?;
|
.await?;
|
||||||
match http_resp.error_for_status() {
|
match http_resp.error_for_status() {
|
||||||
|
|
|
||||||
|
|
@ -55,19 +55,17 @@
|
||||||
//! status: Status::InProgress,
|
//! status: Status::InProgress,
|
||||||
//! attachments: vec![],
|
//! attachments: vec![],
|
||||||
//! }])
|
//! }])
|
||||||
//! .with_base_id("***".to_owned())
|
//! .with_base_id("***")
|
||||||
//! .with_table_id("***".to_owned())
|
//! .with_table_id("***")
|
||||||
//! .build()?
|
|
||||||
//! .execute()
|
//! .execute()
|
||||||
//! .await?;
|
//! .await?;
|
||||||
//!
|
//!
|
||||||
//! let mut rec_stream = client
|
//! let mut rec_stream = client
|
||||||
//! .list_records()
|
//! .list_records()
|
||||||
//! .with_base_id("***".to_owned())
|
//! .with_base_id("***")
|
||||||
//! .with_table_id("***".to_owned())
|
//! .with_table_id("***")
|
||||||
//! .with_filter(Some("{status} = 'Todo' || {status} = 'In Progress'".to_owned()))
|
//! .with_filter("{status} = 'Todo' || {status} = 'In Progress'")
|
||||||
//! .build()?
|
//! .stream_items::<MyRecord>()?;
|
||||||
//! .stream_items::<MyRecord>();
|
|
||||||
//!
|
//!
|
||||||
//! while let Some(result) = rec_stream.next().await {
|
//! while let Some(result) = rec_stream.next().await {
|
||||||
//! dbg!(result?.fields);
|
//! dbg!(result?.fields);
|
||||||
|
|
|
||||||
|
|
@ -6,12 +6,16 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
client::Client,
|
client::Client,
|
||||||
errors::ExecutionError,
|
errors::{ExecutionError, Result},
|
||||||
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
|
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Builder, Clone, Debug, Serialize)]
|
#[derive(Builder, Clone, Debug, Serialize)]
|
||||||
#[builder(pattern = "owned", setter(prefix = "with"))]
|
#[builder(
|
||||||
|
build_fn(error = "ExecutionError", private),
|
||||||
|
pattern = "owned",
|
||||||
|
setter(prefix = "with")
|
||||||
|
)]
|
||||||
pub struct ListBasesQuery {
|
pub struct ListBasesQuery {
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
#[builder(vis = "pub(crate)")]
|
#[builder(vis = "pub(crate)")]
|
||||||
|
|
@ -39,9 +43,10 @@ impl PaginatedQuery<Base, ListBasesResponse> for ListBasesQuery {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListBasesQuery {
|
impl ListBasesQueryBuilder {
|
||||||
pub fn stream_items(self) -> Pin<Box<impl Stream<Item = Result<Base, ExecutionError>>>> {
|
pub fn stream_items(self) -> Result<Pin<Box<impl Stream<Item = Result<Base>>>>> {
|
||||||
execute_paginated::<Base, ListBasesResponse>(self)
|
self.build()
|
||||||
|
.map(execute_paginated::<Base, ListBasesResponse>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,25 +7,30 @@ use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
client::Client,
|
client::Client,
|
||||||
errors::ExecutionError,
|
errors::{ExecutionError, Result},
|
||||||
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
|
pagination::{PaginatedQuery, PaginatedResponse, execute_paginated},
|
||||||
types::AirtableRecord,
|
types::AirtableRecord,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Builder, Clone, Debug, Serialize)]
|
#[derive(Builder, Clone, Debug, Serialize)]
|
||||||
#[builder(pattern = "owned", setter(prefix = "with"))]
|
#[builder(
|
||||||
|
build_fn(error = "ExecutionError", private),
|
||||||
|
pattern = "owned",
|
||||||
|
setter(prefix = "with")
|
||||||
|
)]
|
||||||
pub struct ListRecordsQuery {
|
pub struct ListRecordsQuery {
|
||||||
|
#[builder(setter(into))]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
base_id: String,
|
base_id: String,
|
||||||
|
|
||||||
#[serde(skip)]
|
|
||||||
#[builder(vis = "pub(crate)")]
|
#[builder(vis = "pub(crate)")]
|
||||||
|
#[serde(skip)]
|
||||||
client: Client,
|
client: Client,
|
||||||
|
|
||||||
/// Only data for fields whose names or IDs are in this list will be
|
/// Only data for fields whose names or IDs are in this list will be
|
||||||
/// included in the result. If you don't need every field, you can use this
|
/// included in the result. If you don't need every field, you can use this
|
||||||
/// parameter to reduce the amount of data transferred.
|
/// parameter to reduce the amount of data transferred.
|
||||||
#[builder(default)]
|
#[builder(default, setter(into, strip_option))]
|
||||||
fields: Option<Vec<String>>,
|
fields: Option<Vec<String>>,
|
||||||
|
|
||||||
/// A formula used to filter records. The formula will be evaluated for
|
/// A formula used to filter records. The formula will be evaluated for
|
||||||
|
|
@ -34,7 +39,7 @@ pub struct ListRecordsQuery {
|
||||||
///
|
///
|
||||||
/// If combined with the view parameter, only records in that view which
|
/// If combined with the view parameter, only records in that view which
|
||||||
/// satisfy the formula will be returned.
|
/// satisfy the formula will be returned.
|
||||||
#[builder(default)]
|
#[builder(default, setter(into, strip_option))]
|
||||||
// filterByFormula is renamed so that the builder method, that is,
|
// filterByFormula is renamed so that the builder method, that is,
|
||||||
// `.with_filter()`, reads more cleanly.
|
// `.with_filter()`, reads more cleanly.
|
||||||
#[serde(rename = "filterByFormula")]
|
#[serde(rename = "filterByFormula")]
|
||||||
|
|
@ -43,10 +48,11 @@ pub struct ListRecordsQuery {
|
||||||
#[builder(default, private)]
|
#[builder(default, private)]
|
||||||
offset: Option<String>,
|
offset: Option<String>,
|
||||||
|
|
||||||
|
#[builder(default, setter(into, strip_option))]
|
||||||
#[serde(rename = "pageSize")]
|
#[serde(rename = "pageSize")]
|
||||||
#[builder(default)]
|
|
||||||
page_size: Option<usize>,
|
page_size: Option<usize>,
|
||||||
|
|
||||||
|
#[builder(setter(into))]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
table_id: String,
|
table_id: String,
|
||||||
}
|
}
|
||||||
|
|
@ -72,14 +78,13 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListRecordsQuery {
|
impl ListRecordsQueryBuilder {
|
||||||
pub fn stream_items<T>(
|
pub fn stream_items<T>(self) -> Result<Pin<Box<impl Stream<Item = Result<AirtableRecord<T>>>>>>
|
||||||
self,
|
|
||||||
) -> Pin<Box<impl Stream<Item = Result<AirtableRecord<T>, ExecutionError>>>>
|
|
||||||
where
|
where
|
||||||
T: Clone + Debug + DeserializeOwned,
|
T: Clone + Debug + DeserializeOwned,
|
||||||
{
|
{
|
||||||
execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>(self)
|
self.build()
|
||||||
|
.map(execute_paginated::<AirtableRecord<T>, ListRecordsResponse<T>>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,8 +35,17 @@ where
|
||||||
Q: Clone,
|
Q: Clone,
|
||||||
T: Clone,
|
T: Clone,
|
||||||
{
|
{
|
||||||
|
/// Records remaining from the most recently fetched page but not yet
|
||||||
|
/// yielded to the stream consumer. Subsequent page should not be fetched
|
||||||
|
/// until this collection has been fully consumed.
|
||||||
buffered: VecDeque<T>,
|
buffered: VecDeque<T>,
|
||||||
|
|
||||||
|
/// Owned copy of the paginated query.
|
||||||
query: Q,
|
query: Q,
|
||||||
|
|
||||||
|
/// When `query.offset` is `None`, this flag indicates whether that is
|
||||||
|
/// because the first page still needs to be fetched (`started` = `false`)
|
||||||
|
/// or because there are no pages remaining (`started` = `true`).
|
||||||
started: bool,
|
started: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -61,9 +70,9 @@ macro_rules! handle_stream_err {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// This could be brought into PaginatedQuery as a default implementation, but
|
// This could be folded into the `PaginatedQuery` trait as a default
|
||||||
// that forces that the traits in this module be exposed outside of the crate
|
// implementation, but that forces that the traits in this module be exposed
|
||||||
// and additionally results in worse client ergonomics overall.
|
// outside of the crate and results in worse caller ergonomics overall.
|
||||||
pub(crate) fn execute_paginated<T, R>(
|
pub(crate) fn execute_paginated<T, R>(
|
||||||
query: impl PaginatedQuery<T, R>,
|
query: impl PaginatedQuery<T, R>,
|
||||||
) -> Pin<Box<impl Stream<Item = Result<T, ExecutionError>>>>
|
) -> Pin<Box<impl Stream<Item = Result<T, ExecutionError>>>>
|
||||||
|
|
@ -72,7 +81,7 @@ where
|
||||||
R: PaginatedResponse<T>,
|
R: PaginatedResponse<T>,
|
||||||
{
|
{
|
||||||
// Stream has to be pinned to the heap so that the closure inside
|
// Stream has to be pinned to the heap so that the closure inside
|
||||||
// doesn't need to implement Unpin (which I don't think it can).
|
// doesn't need to implement Unpin.
|
||||||
Box::pin(futures::stream::unfold(
|
Box::pin(futures::stream::unfold(
|
||||||
StreamState {
|
StreamState {
|
||||||
buffered: VecDeque::new(),
|
buffered: VecDeque::new(),
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue