1
0
Fork 0
forked from 2sys/phonograph
phonograph/phono-server/src/routes/relations_single/get_data_handler.rs

266 lines
8.2 KiB
Rust
Raw Normal View History

use std::collections::HashMap;
use axum::{
Json,
extract::{Path, State},
response::{IntoResponse as _, Response},
};
use phono_backends::{pg_acl::PgPrivilegeType, pg_attribute::PgAttribute, pg_class::PgClass};
use phono_models::{
accessors::{Accessor, Actor, portal::PortalAccessor},
expression::PgExpressionAny,
field::Field,
};
use phono_pestgros::{Datum, QueryFragment, escape_identifier};
use serde::{Deserialize, Serialize};
use sqlx::{
Postgres, QueryBuilder,
postgres::{PgRow, types::Oid},
};
2026-01-13 22:31:50 +00:00
use tracing::debug;
use uuid::Uuid;
2026-01-13 22:31:50 +00:00
use validator::Validate;
use crate::{
app::AppDbConn,
errors::AppError,
2026-01-13 22:31:50 +00:00
extractors::ValidatedForm,
2025-10-01 22:36:19 -07:00
field_info::TableFieldInfo,
user::CurrentUser,
workspace_pooler::{RoleAssignment, WorkspacePooler},
};
#[derive(Clone, Debug, Deserialize)]
pub(super) struct PathParams {
portal_id: Uuid,
rel_oid: u32,
workspace_id: Uuid,
}
2026-01-13 22:31:50 +00:00
#[derive(Debug, Deserialize, Validate)]
pub(super) struct FormBody {
subfilter: Option<String>,
}
const FRONTEND_ROW_LIMIT: i64 = 1000;
/// Helper type to make it easier to build and reason about multiple related SQL
/// queries.
#[derive(Clone, Debug)]
pub struct SelectQuery {
/// Query fragment following (not including) "select ".
pub selection: QueryFragment,
/// Query fragment following (not including) "from ".
pub source: QueryFragment,
/// Query fragment following (not including) "where ", or empty if not
/// applicable.
pub filters: QueryFragment,
/// Query fragment following (not including) "order by ", or empty if not
/// applicable.
pub order: QueryFragment,
/// Query fragment following (not including) "limit ", or empty if not
/// applicable.
pub limit: QueryFragment,
}
impl From<SelectQuery> for QueryFragment {
fn from(value: SelectQuery) -> Self {
let mut result = QueryFragment::from_sql("select ");
result.push(value.selection);
result.push(QueryFragment::from_sql(" from "));
result.push(value.source);
if !value.filters.is_empty() {
result.push(QueryFragment::from_sql(" where "));
result.push(value.filters);
}
if !value.order.is_empty() {
result.push(QueryFragment::from_sql(" order by "));
result.push(value.order);
}
if !value.limit.is_empty() {
result.push(QueryFragment::from_sql(" limit "));
result.push(value.limit);
}
result
}
}
impl From<SelectQuery> for QueryBuilder<'_, Postgres> {
fn from(value: SelectQuery) -> Self {
QueryFragment::from(value).into()
}
}
/// HTTP GET handler for an API endpoint returning a JSON encoding of portal
2026-01-13 22:31:50 +00:00
/// data to display in a table or similar form. If the `subfilter` URL parameter
/// is specified, it is `&&`-ed with the portal's stored filter.
///
/// Only queries up to the first [`FRONTEND_ROW_LIMIT`] rows.
pub(super) async fn get(
State(mut workspace_pooler): State<WorkspacePooler>,
AppDbConn(mut app_db): AppDbConn,
CurrentUser(user): CurrentUser,
Path(PathParams {
portal_id,
rel_oid,
workspace_id,
}): Path<PathParams>,
2026-01-13 22:31:50 +00:00
ValidatedForm(form): ValidatedForm<FormBody>,
) -> Result<Response, AppError> {
let mut workspace_client = workspace_pooler
.acquire_for(workspace_id, RoleAssignment::User(user.id))
.await?;
let rel = PgClass::with_oid(Oid(rel_oid))
.fetch_one(&mut workspace_client)
.await?;
let portal = PortalAccessor::default()
.id(portal_id)
.as_actor(Actor::User(user.id))
.verify_workspace_id(workspace_id)
.verify_rel_oid(Oid(rel_oid))
.verify_rel_permissions([PgPrivilegeType::Select])
.using_rel(&rel)
.using_workspace_client(&mut workspace_client)
.using_app_db(&mut app_db)
.fetch_one()
.await?;
let attrs = PgAttribute::all_for_rel(portal.class_oid)
.fetch_all(&mut workspace_client)
.await?;
let pkey_attrs = PgAttribute::pkeys_for_rel(portal.class_oid)
.fetch_all(&mut workspace_client)
.await?;
2025-10-01 22:36:19 -07:00
let fields: Vec<TableFieldInfo> = {
let fields: Vec<Field> = Field::belonging_to_portal(portal.id)
.fetch_all(&mut app_db)
.await?;
2025-10-01 22:36:19 -07:00
let mut field_info: Vec<TableFieldInfo> = Vec::with_capacity(fields.len());
for field in fields {
if let Some(attr) = attrs.iter().find(|attr| attr.attname == field.name) {
2025-10-01 22:36:19 -07:00
field_info.push(TableFieldInfo {
field,
2025-10-01 22:36:19 -07:00
column_present: true,
has_default: attr.atthasdef,
not_null: attr.attnotnull.unwrap_or_default(),
});
}
}
field_info
};
let main_data_query = SelectQuery {
selection: QueryFragment::from_sql(
&pkey_attrs
2026-01-13 18:10:44 +00:00
.iter()
.chain(attrs.iter())
.map(|attr| {
format!(
"main.{ident_esc}",
ident_esc = escape_identifier(&attr.attname)
)
})
2026-01-13 18:10:44 +00:00
.collect::<Vec<_>>()
.join(", "),
),
source: QueryFragment::from_sql(&format!(
"{ident_esc} as main",
ident_esc = rel.get_identifier()
)),
filters: QueryFragment::join(
[
portal
.table_filter
.0
.map(|filter| filter.into_query_fragment()),
form.subfilter
.and_then(|value| {
if value.is_empty() {
None
} else {
serde_json::from_str::<Option<PgExpressionAny>>(&value)
// Ignore invalid input. A user likely pasted incorrectly
// or made a typo.
.inspect_err(|_| debug!("ignoring invalid subfilter expression"))
.ok()
.flatten()
}
})
.map(|filter| filter.into_query_fragment()),
]
.into_iter()
.flatten(),
QueryFragment::from_sql(" and "),
),
order: QueryFragment::from_sql("_id"),
limit: QueryFragment::from_param(Datum::Numeric(Some(FRONTEND_ROW_LIMIT.into()))),
};
let count_query = SelectQuery {
selection: QueryFragment::from_sql("count(*)"),
order: QueryFragment::empty(),
limit: QueryFragment::empty(),
..main_data_query.clone()
};
// TODO: Consider running queries in a transaction to improve consistency.
let rows: Vec<PgRow> = QueryBuilder::from(main_data_query)
.build()
.fetch_all(workspace_client.get_conn())
.await?;
let count: i64 = QueryBuilder::from(count_query)
.build_query_scalar()
.fetch_one(workspace_client.get_conn())
.await?;
2026-01-13 18:10:44 +00:00
#[derive(Serialize)]
struct DataRow {
pkey: String,
data: Vec<Datum>,
}
let mut data_rows: Vec<DataRow> = vec![];
let mut pkeys: Vec<String> = vec![];
for row in rows.iter() {
let mut pkey_values: HashMap<String, Datum> = HashMap::new();
for attr in pkey_attrs.clone() {
let field = Field::default_from_attr(&attr)
.ok_or(anyhow::anyhow!("unsupported primary key column type"))?;
pkey_values.insert(field.name.clone(), field.get_datum(row)?);
}
let pkey = serde_json::to_string(&pkey_values)?;
pkeys.push(pkey.clone());
let mut row_data: Vec<Datum> = vec![];
for field in fields.iter() {
row_data.push(field.field.get_datum(row)?);
}
data_rows.push(DataRow {
pkey,
data: row_data,
});
}
#[derive(Serialize)]
struct ResponseBody {
2025-10-01 22:36:19 -07:00
fields: Vec<TableFieldInfo>,
pkeys: Vec<String>,
rows: Vec<DataRow>,
count: i64,
}
Ok(Json(ResponseBody {
count,
rows: data_rows,
fields,
pkeys,
})
.into_response())
}