+ `;
+ }
+}
diff --git a/components/src/cell-uuid.ts b/components/src/cell-uuid.ts
new file mode 100644
index 0000000..9ec4d79
--- /dev/null
+++ b/components/src/cell-uuid.ts
@@ -0,0 +1,20 @@
+import { html, LitElement } from "lit";
+import { customElement, property } from "lit/decorators.js";
+
+@customElement("cell-uuid")
+export class CellUuid extends LitElement {
+ @property({ attribute: "is-null", type: Boolean, reflect: true })
+ isNull = false;
+
+ protected override render() {
+ if (this.isNull) {
+ return html`
+ NULL
+ `;
+ }
+
+ return html`
+
+ `;
+ }
+}
diff --git a/components/src/cells.ts b/components/src/cells.ts
new file mode 100644
index 0000000..550195d
--- /dev/null
+++ b/components/src/cells.ts
@@ -0,0 +1,2 @@
+export { CellText } from "./cell-text.ts";
+export { CellUuid } from "./cell-uuid.ts";
diff --git a/components/src/grid-size-context.ts b/components/src/grid-size-context.ts
new file mode 100644
index 0000000..dda935f
--- /dev/null
+++ b/components/src/grid-size-context.ts
@@ -0,0 +1,5 @@
+import { createContext } from "@lit/context";
+
+export const gridSizePxContext = createContext(
+ Symbol("grid-size-px-context"),
+);
diff --git a/components/src/my-grid.ts b/components/src/my-grid.ts
new file mode 100644
index 0000000..389f0ba
--- /dev/null
+++ b/components/src/my-grid.ts
@@ -0,0 +1,44 @@
+import { provide } from "@lit/context";
+import { css, html, LitElement } from "lit";
+import { customElement, property } from "lit/decorators.js";
+
+import { gridSizePxContext } from "./grid-size-context.ts";
+
+@customElement("my-grid")
+export class MyGrid extends LitElement {
+ @provide({ context: gridSizePxContext })
+ @property({ attribute: "grid-size-px", type: Number })
+ gridSizePx = 32;
+
+ @property()
+ gridColor = "#ddd";
+
+ @property()
+ width = "100%";
+
+ @property()
+ height = "100%";
+
+ static override styles = css`
+ :host {
+ display: block;
+ position: relative;
+ }
+ `;
+
+ protected override render() {
+ return html`
+
+
+ `;
+ }
+}
diff --git a/components/tsconfig.json b/components/tsconfig.json
new file mode 100644
index 0000000..e8d5c89
--- /dev/null
+++ b/components/tsconfig.json
@@ -0,0 +1,7 @@
+{
+ "compilerOptions": {
+ "lib": ["deno.ns", "dom"],
+ "experimentalDecorators": true,
+ "useDefineForClassFields": false
+ }
+}
diff --git a/components/vite.config.mjs b/components/vite.config.mjs
new file mode 100644
index 0000000..8da3822
--- /dev/null
+++ b/components/vite.config.mjs
@@ -0,0 +1,18 @@
+import { defineConfig } from "vite";
+
+import "lit";
+
+// https://vitejs.dev/config/
+export default defineConfig({
+ build: {
+ lib: {
+ entry: ["src/cells.ts"],
+ formats: ["es"],
+ },
+ outDir: "../js_dist",
+ emptyOutDir: true,
+ rollupOptions: {
+ // external: /^lit/,
+ },
+ },
+});
diff --git a/diesel.toml b/diesel.toml
deleted file mode 100644
index 69e7f1d..0000000
--- a/diesel.toml
+++ /dev/null
@@ -1,9 +0,0 @@
-# For documentation on how to configure this file,
-# see https://diesel.rs/guides/configuring-diesel-cli
-
-[print_schema]
-file = "do_not_use.txt"
-custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
-
-[migrations_directory]
-dir = "migrations"
diff --git a/mdengine/Cargo.lock b/mdengine/Cargo.lock
deleted file mode 100644
index de1b0fa..0000000
--- a/mdengine/Cargo.lock
+++ /dev/null
@@ -1,199 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 4
-
-[[package]]
-name = "bitflags"
-version = "2.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
-
-[[package]]
-name = "byteorder"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
-
-[[package]]
-name = "darling"
-version = "0.20.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
-dependencies = [
- "darling_core",
- "darling_macro",
-]
-
-[[package]]
-name = "darling_core"
-version = "0.20.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
-dependencies = [
- "fnv",
- "ident_case",
- "proc-macro2",
- "quote",
- "strsim",
- "syn",
-]
-
-[[package]]
-name = "darling_macro"
-version = "0.20.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
-dependencies = [
- "darling_core",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "diesel"
-version = "2.2.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ff3e1edb1f37b4953dd5176916347289ed43d7119cc2e6c7c3f7849ff44ea506"
-dependencies = [
- "bitflags",
- "byteorder",
- "diesel_derives",
- "itoa",
- "pq-sys",
-]
-
-[[package]]
-name = "diesel_derives"
-version = "2.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68d4216021b3ea446fd2047f5c8f8fe6e98af34508a254a01e4d6bc1e844f84d"
-dependencies = [
- "diesel_table_macro_syntax",
- "dsl_auto_type",
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "diesel_table_macro_syntax"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25"
-dependencies = [
- "syn",
-]
-
-[[package]]
-name = "dsl_auto_type"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b"
-dependencies = [
- "darling",
- "either",
- "heck",
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "either"
-version = "1.15.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
-
-[[package]]
-name = "fnv"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
-
-[[package]]
-name = "heck"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
-
-[[package]]
-name = "ident_case"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
-
-[[package]]
-name = "info_schema"
-version = "0.1.0"
-dependencies = [
- "diesel",
-]
-
-[[package]]
-name = "itoa"
-version = "1.0.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
-
-[[package]]
-name = "libc"
-version = "0.2.172"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
-
-[[package]]
-name = "pq-sys"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41c852911b98f5981956037b2ca976660612e548986c30af075e753107bc3400"
-dependencies = [
- "libc",
- "vcpkg",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.95"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.40"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "strsim"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
-
-[[package]]
-name = "syn"
-version = "2.0.101"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
-
-[[package]]
-name = "vcpkg"
-version = "0.2.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
diff --git a/mdengine/Cargo.toml b/mdengine/Cargo.toml
deleted file mode 100644
index 3548bd4..0000000
--- a/mdengine/Cargo.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[package]
-name = "mdengine"
-version = "0.1.0"
-edition = "2024"
-
-[dependencies]
-chrono = "0.4.41"
-diesel = { version = "2.2.10", features = ["64-column-tables", "chrono", "postgres"], default-features = false }
diff --git a/mdengine/Makefile b/mdengine/Makefile
deleted file mode 100644
index f6a75ad..0000000
--- a/mdengine/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-.PHONY: run-postgres
-
-run-postgres:
- docker run --rm -it -e POSTGRES_PASSWORD=guest -p 127.0.0.1:5432:5432 postgres:17
diff --git a/mdengine/README.md b/mdengine/README.md
deleted file mode 100644
index 966d832..0000000
--- a/mdengine/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Interim Metadata Engine (mdengine)
-
-This crate is responsible for navigating the PostgreSQL `information_schema`
-and catalogs tables to extract rich understanding of database structure and
-permissions.
-
-It does not fetch data directly from any user defined tables.
diff --git a/mdengine/diesel.toml b/mdengine/diesel.toml
deleted file mode 100644
index 06f5b1c..0000000
--- a/mdengine/diesel.toml
+++ /dev/null
@@ -1,11 +0,0 @@
-# For documentation on how to configure this file,
-# see https://diesel.rs/guides/configuring-diesel-cli
-
-[print_schema]
-file = "src/schema.rs"
-custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
-schema = "information_schema"
-filter = { except_tables = ["sql_features", "sql_implementation_info", "sql_parts", "sql_sizing"] }
-
-[migrations_directory]
-dir = "migrations"
diff --git a/mdengine/src/lib.rs b/mdengine/src/lib.rs
deleted file mode 100644
index def35db..0000000
--- a/mdengine/src/lib.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-use crate::{pg_class::PgClass, table_privileges::TablePrivilege};
-use diesel::{
- dsl::{AsSelect, auto_type},
- pg::Pg,
- prelude::*,
-};
-
-pub mod pg_attribute;
-pub mod pg_class;
-pub mod pg_namespace;
-pub mod pg_roles;
-mod schema;
-pub mod table_privileges;
-
-// Still waiting for Postgres to gain class consciousness
-#[derive(Clone, Queryable, Selectable)]
-pub struct PgClassPrivilege {
- #[diesel(embed)]
- pub class: PgClass,
- #[diesel(embed)]
- pub privilege: TablePrivilege,
-}
-
-/// Query for the list of tables any of the provided roles has access to. A Vec
-/// of grantees is accepted in case you wish to query for multiple roles of
-/// which a user is a member.
-#[auto_type(no_type_alias)]
-pub fn class_privileges_for_grantees(grantees: Vec) -> _ {
- let select: AsSelect = PgClassPrivilege::as_select();
- pg_class::table
- .inner_join(pg_namespace::table.on(pg_namespace::dsl::oid.eq(pg_class::dsl::relnamespace)))
- .inner_join(
- table_privileges::table.on(table_privileges::dsl::table_schema
- .eq(pg_namespace::dsl::nspname)
- .and(table_privileges::dsl::table_name.eq(pg_class::dsl::relname))),
- )
- // Excude indexes, series, etc.
- .filter(pg_class::dsl::relkind.eq(b'r'))
- .filter(table_privileges::dsl::grantee.eq_any(grantees))
- .select(select)
-}
diff --git a/mdengine/src/pg_attribute.rs b/mdengine/src/pg_attribute.rs
deleted file mode 100644
index bad182f..0000000
--- a/mdengine/src/pg_attribute.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use diesel::{
- dsl::{AsSelect, auto_type},
- pg::Pg,
- prelude::*,
-};
-
-use crate::schema::pg_attribute;
-
-pub use crate::schema::pg_attribute::{dsl, table};
-
-#[derive(Clone, Debug, Queryable, Selectable)]
-#[diesel(check_for_backend(Pg))]
-#[diesel(table_name = pg_attribute)]
-pub struct PgAttribute {
- /// The table this column belongs to
- pub attrelid: u32,
- /// The column name
- pub attname: String,
- /// The data type of this column (zero for a dropped column)
- pub atttypid: u32,
- /// A copy of pg_type.typlen of this column's type
- pub attlen: i16,
- /// The number of the column. Ordinary columns are numbered from 1 up. System columns, such as ctid, have (arbitrary) negative numbers.
- pub attnum: i16,
- /// Always -1 in storage, but when loaded into a row descriptor in memory this might be updated to cache the offset of the attribute within the row
- pub attcacheoff: i32,
- /// atttypmod records type-specific data supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. The value will generally be -1 for types that do not need atttypmod.
- pub atttypmod: i32,
- /// Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, so any nonzero value effectively means “it's an array”.)
- pub attndims: i16,
- /// A copy of pg_type.typbyval of this column's type
- pub attbyval: bool,
- /// A copy of pg_type.typalign of this column's type
- pub attalign: u8,
- /// Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy.
- pub attstorage: u8,
- /// The current compression method of the column. Typically this is '\0' to specify use of the current default setting (see default_toast_compression). Otherwise, 'p' selects pglz compression, while 'l' selects LZ4 compression. However, this field is ignored whenever attstorage does not allow compression.
- pub attcompression: Option,
- /// This represents a not-null constraint.
- pub attnotnull: bool,
- /// This column has a default expression or generation expression, in which case there will be a corresponding entry in the pg_attrdef catalog that actually defines the expression. (Check attgenerated to determine whether this is a default or a generation expression.)
- pub atthasdef: bool,
- /// This column has a value which is used where the column is entirely missing from the row, as happens when a column is added with a non-volatile DEFAULT value after the row is created. The actual value used is stored in the attmissingval column.
- pub atthasmissing: bool,
- /// If a zero byte (''), then not an identity column. Otherwise, a = generated always, d = generated by default.
- pub attidentity: Option,
- /// If a zero byte (''), then not a generated column. Otherwise, s = stored. (Other values might be added in the future.)
- pub attgenerated: Option,
- /// This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is ignored by the parser and so cannot be accessed via SQL.
- pub attisdropped: bool,
- /// This column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.
- pub attislocal: bool,
- /// The number of direct ancestors this column has. A column with a nonzero number of ancestors cannot be dropped nor renamed.
- pub attinhcount: i16,
- /// The defined collation of the column, or zero if the column is not of a collatable data type
- pub attcollation: u32,
- /// attstattarget controls the level of detail of statistics accumulated for this column by ANALYZE. A zero value indicates that no statistics should be collected. A null value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. For scalar data types, attstattarget is both the target number of “most common values” to collect, and the target number of histogram bins to create.
- pub attstattarget: Option,
- /// Column-level access privileges, if any have been granted specifically on this column
- pub attacl: Option>,
- /// Attribute-level options, as “keyword=value” strings
- pub attoptions: Option>,
- /// Attribute-level foreign data wrapper options, as “keyword=value” strings
- pub attfdwoptions: Option>,
-}
-
-#[auto_type(no_type_alias)]
-pub fn attributes_for_rel(oid: u32) -> _ {
- let select: AsSelect = PgAttribute::as_select();
- pg_attribute::table
- .filter(pg_attribute::dsl::attrelid.eq(oid))
- .filter(pg_attribute::dsl::attnum.gt(0i16))
- .filter(pg_attribute::dsl::attisdropped.eq(false))
- .select(select)
-}
diff --git a/mdengine/src/pg_class.rs b/mdengine/src/pg_class.rs
deleted file mode 100644
index c38c314..0000000
--- a/mdengine/src/pg_class.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-use diesel::{pg::Pg, prelude::*};
-
-use crate::schema::pg_class;
-
-pub use crate::schema::pg_class::{dsl, table};
-
-#[derive(Clone, Debug, Queryable, Selectable)]
-#[diesel(check_for_backend(Pg))]
-#[diesel(table_name = pg_class)]
-#[diesel(primary_key(oid))]
-pub struct PgClass {
- pub oid: u32,
- pub relname: String,
-}
diff --git a/mdengine/src/pg_namespace.rs b/mdengine/src/pg_namespace.rs
deleted file mode 100644
index 0285156..0000000
--- a/mdengine/src/pg_namespace.rs
+++ /dev/null
@@ -1,13 +0,0 @@
-use diesel::{pg::Pg, prelude::*};
-
-use crate::schema::pg_namespace;
-
-pub use crate::schema::pg_namespace::{dsl, table};
-
-#[derive(Clone, Debug, Queryable, Selectable)]
-#[diesel(check_for_backend(Pg))]
-#[diesel(table_name = pg_namespace)]
-#[diesel(primary_key(oid))]
-pub struct PgNamespace {
- pub oid: u32,
-}
diff --git a/mdengine/src/pg_roles.rs b/mdengine/src/pg_roles.rs
deleted file mode 100644
index 252cd02..0000000
--- a/mdengine/src/pg_roles.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-use chrono::{DateTime, Utc};
-use diesel::{pg::Pg, prelude::*};
-
-use crate::schema::pg_roles;
-
-pub use crate::schema::pg_roles::{dsl, table};
-
-#[derive(Clone, Debug, Queryable, Selectable)]
-#[diesel(check_for_backend(Pg))]
-#[diesel(table_name = pg_roles)]
-#[diesel(primary_key(oid))]
-pub struct PgRole {
- /// Role name
- pub rolname: String,
- /// Role has superuser privileges
- pub rolsuper: bool,
- /// Role automatically inherits privileges of roles it is a member of
- pub rolinherit: bool,
- /// Role can create more roles
- pub rolcreaterole: bool,
- /// Role can create databases
- pub rolcreatedb: bool,
- /// Role can log in. That is, this role can be given as the initial session authorization identifier
- pub rolcanlogin: bool,
- /// Role is a replication role. A replication role can initiate replication connections and create and drop replication slots.
- pub rolreplication: bool,
- /// For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.
- pub rolconnlimit: i32,
- /// Not the password (always reads as ********)
- pub rolpassword: String,
- /// Password expiry time (only used for password authentication); null if no expiration
- pub rolvaliduntil: Option>,
- /// Role bypasses every row-level security policy, see Section 5.9 for more information.
- pub rolbypassrls: bool,
- /// Role-specific defaults for run-time configuration variables
- pub rolconfig: Option>,
- /// ID of role
- pub oid: u32,
-}
diff --git a/mdengine/src/schema.rs b/mdengine/src/schema.rs
deleted file mode 100644
index 3805f87..0000000
--- a/mdengine/src/schema.rs
+++ /dev/null
@@ -1,195 +0,0 @@
-use diesel::{allow_tables_to_appear_in_same_query, joinable, table};
-
-table! {
- pg_class (oid) {
- /// Row identifier
- oid -> Oid,
- /// Name of the table, index, view, etc.
- relname -> Text,
- /// The OID of the namespace that contains this relation
- relnamespace -> Oid,
- /// The OID of the data type that corresponds to this table's row type, if any; zero for indexes, sequences, and toast tables, which have no pg_type entry
- reltype -> Oid,
- /// For typed tables, the OID of the underlying composite type; zero for all other relations
- reloftype -> Oid,
- /// Owner of the relation
- relowner -> Oid,
- /// The access method used to access this table or index. Not meaningful if the relation is a sequence or has no on-disk file, except for partitioned tables, where, if set, it takes precedence over default_table_access_method when determining the access method to use for partitions created when one is not specified in the creation command.
- relam -> Oid,
- /// Name of the on-disk file of this relation; zero means this is a “mapped” relation whose disk file name is determined by low-level state
- relfilenode -> Oid,
- /// The tablespace in which this relation is stored. If zero, the database's default tablespace is implied. Not meaningful if the relation has no on-disk file, except for partitioned tables, where this is the tablespace in which partitions will be created when one is not specified in the creation command.
- reltablespace -> Oid,
- /// Size of the on-disk representation of this table in pages (of size BLCKSZ). This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX.
- relpages -> Integer,
- /// Number of live rows in the table. This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX. If the table has never yet been vacuumed or analyzed, reltuples contains -1 indicating that the row count is unknown.
- reltuples -> Float,
- /// Number of pages that are marked all-visible in the table's visibility map. This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX.
- relallvisible -> Integer,
- /// OID of the TOAST table associated with this table, zero if none. The TOAST table stores large attributes “out of line” in a secondary table.
- reltoastrelid -> Oid,
- /// True if this is a table and it has (or recently had) any indexes
- relhasindex -> Bool,
- /// True if this table is shared across all databases in the cluster. Only certain system catalogs (such as pg_database) are shared.
- relisshared -> Bool,
- /// p = permanent table/sequence, u = unlogged table/sequence, t = temporary table/sequence
- relpersistence -> CChar,
- /// r = ordinary table, i = index, S = sequence, t = TOAST table, v = view, m = materialized view, c = composite type, f = foreign table, p = partitioned table, I = partitioned index
- relkind -> CChar,
- /// Number of user columns in the relation (system columns not counted). There must be this many corresponding entries in pg_attribute. See also pg_attribute.attnum.
- relnatts -> SmallInt,
- /// Number of CHECK constraints on the table; see pg_constraint catalog
- relchecks -> SmallInt,
- /// True if table has (or once had) rules; see pg_rewrite catalog
- relhasrules -> Bool,
- /// True if table has (or once had) triggers; see pg_trigger catalog
- relhastriggers -> Bool,
- /// True if table or index has (or once had) any inheritance children or partitions
- relhassubclass -> Bool,
- /// True if table has row-level security enabled; see pg_policy catalog
- relrowsecurity -> Bool,
- /// True if row-level security (when enabled) will also apply to table owner; see pg_policy catalog
- relforcerowsecurity -> Bool,
- /// True if relation is populated (this is true for all relations other than some materialized views)
- relispopulated -> Bool,
- /// Columns used to form “replica identity” for rows: d = default (primary key, if any), n = nothing, f = all columns, i = index with indisreplident set (same as nothing if the index used has been dropped)
- relreplident -> CChar,
- /// True if table or index is a partition
- relispartition -> Bool,
- /// For new relations being written during a DDL operation that requires a table rewrite, this contains the OID of the original relation; otherwise zero. That state is only visible internally; this field should never contain anything other than zero for a user-visible relation.
- relrewrite -> Oid,
- /// All transaction IDs before this one have been replaced with a permanent (“frozen”) transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent transaction ID wraparound or to allow pg_xact to be shrunk. Zero (InvalidTransactionId) if the relation is not a table.
- /// Access-method-specific options, as “keyword=value” strings
- reloptions -> Array,
- }
-}
-
-table! {
- pg_roles (oid) {
- /// Role name
- rolname -> Text,
- /// Role has superuser privileges
- rolsuper -> Bool,
- /// Role automatically inherits privileges of roles it is a member of
- rolinherit -> Bool,
- /// Role can create more roles
- rolcreaterole -> Bool,
- /// Role can create databases
- rolcreatedb -> Bool,
- /// Role can log in. That is, this role can be given as the initial session authorization identifier
- rolcanlogin -> Bool,
- /// Role is a replication role. A replication role can initiate replication connections and create and drop replication slots.
- rolreplication -> Bool,
- /// For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.
- rolconnlimit -> Integer,
- /// Not the password (always reads as ********)
- rolpassword -> Text,
- /// Password expiry time (only used for password authentication); null if no expiration
- rolvaliduntil -> Nullable,
- /// Role bypasses every row-level security policy, see Section 5.9 for more information.
- rolbypassrls -> Bool,
- /// Role-specific defaults for run-time configuration variables
- rolconfig -> Nullable>,
- /// ID of role
- oid -> Oid,
- }
-}
-
-table! {
- pg_namespace (oid) {
- /// Row identifier
- oid -> Oid,
- /// Name of the namespace
- nspname -> Text,
- /// Onwer of the namespace
- nspowner -> Oid,
- /// Access privileges; see Section 5.8 for details
- nspacl -> Array,
- }
-}
-
-table! {
- pg_attribute (attrelid, attname) {
- /// The table this column belongs to
- attrelid -> Oid,
- /// The column name
- attname -> Text,
- /// The data type of this column (zero for a dropped column)
- atttypid -> Oid,
- /// A copy of pg_type.typlen of this column's type
- attlen -> SmallInt,
- /// The number of the column. Ordinary columns are numbered from 1 up. System columns, such as ctid, have (arbitrary) negative numbers.
- attnum -> SmallInt,
- /// Always -1 in storage, but when loaded into a row descriptor in memory this might be updated to cache the offset of the attribute within the row
- attcacheoff -> Integer,
- /// atttypmod records type-specific data supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. The value will generally be -1 for types that do not need atttypmod.
- atttypmod -> Integer,
- /// Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, so any nonzero value effectively means “it's an array”.)
- attndims -> SmallInt,
- /// A copy of pg_type.typbyval of this column's type
- attbyval -> Bool,
- /// A copy of pg_type.typalign of this column's type
- attalign -> CChar,
- /// Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy.
- attstorage -> CChar,
- /// The current compression method of the column. Typically this is '\0' to specify use of the current default setting (see default_toast_compression). Otherwise, 'p' selects pglz compression, while 'l' selects LZ4 compression. However, this field is ignored whenever attstorage does not allow compression.
- attcompression -> Nullable,
- /// This represents a not-null constraint.
- attnotnull -> Bool,
- /// This column has a default expression or generation expression, in which case there will be a corresponding entry in the pg_attrdef catalog that actually defines the expression. (Check attgenerated to determine whether this is a default or a generation expression.)
- atthasdef -> Bool,
- /// This column has a value which is used where the column is entirely missing from the row, as happens when a column is added with a non-volatile DEFAULT value after the row is created. The actual value used is stored in the attmissingval column.
- atthasmissing -> Bool,
- /// If a zero byte (''), then not an identity column. Otherwise, a = generated always, d = generated by default.
- attidentity -> Nullable,
- /// If a zero byte (''), then not a generated column. Otherwise, s = stored. (Other values might be added in the future.)
- attgenerated -> Nullable,
- /// This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is ignored by the parser and so cannot be accessed via SQL.
- attisdropped -> Bool,
- /// This column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.
- attislocal -> Bool,
- /// The number of direct ancestors this column has. A column with a nonzero number of ancestors cannot be dropped nor renamed.
- attinhcount -> SmallInt,
- /// The defined collation of the column, or zero if the column is not of a collatable data type
- attcollation -> Oid,
- /// attstattarget controls the level of detail of statistics accumulated for this column by ANALYZE. A zero value indicates that no statistics should be collected. A null value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. For scalar data types, attstattarget is both the target number of “most common values” to collect, and the target number of histogram bins to create.
- attstattarget -> Nullable,
- /// Column-level access privileges, if any have been granted specifically on this column
- attacl -> Nullable>,
- /// Attribute-level options, as “keyword=value” strings
- attoptions -> Nullable>,
- /// Attribute-level foreign data wrapper options, as “keyword=value” strings
- attfdwoptions -> Nullable>,
- }
-}
-
-table! {
- information_schema.table_privileges (table_catalog, table_schema, table_name, grantor, grantee) {
- /// Name of the role that granted the privilege
- grantor -> Text,
- /// Name of the role that the privilege was granted to
- grantee -> Text,
- /// Name of the database that contains the table (always the current database)
- table_catalog -> Text,
- /// Name of the schema that contains the table
- table_schema -> Text,
- /// Name of the table
- table_name -> Text,
- /// Type of the privilege: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER
- privilege_type -> Text,
- /// YES if the privilege is grantable, NO if not
- is_grantable -> Text,
- /// In the SQL standard, WITH HIERARCHY OPTION is a separate (sub-)privilege allowing certain operations on table inheritance hierarchies. In PostgreSQL, this is included in the SELECT privilege, so this column shows YES if the privilege is SELECT, else NO.
- with_hierarchy -> Text,
- }
-}
-
-allow_tables_to_appear_in_same_query!(
- pg_attribute,
- pg_class,
- pg_namespace,
- pg_roles,
- table_privileges
-);
-joinable!(pg_class -> pg_roles (relowner));
-joinable!(pg_attribute -> pg_class (attrelid));
diff --git a/mdengine/src/table_privileges.rs b/mdengine/src/table_privileges.rs
deleted file mode 100644
index 947eba9..0000000
--- a/mdengine/src/table_privileges.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-use diesel::{pg::Pg, prelude::*};
-
-use crate::schema::table_privileges;
-
-pub use crate::schema::table_privileges::{dsl, table};
-
-#[derive(Clone, Debug, Queryable, Selectable)]
-#[diesel(check_for_backend(Pg))]
-#[diesel(table_name = table_privileges)]
-pub struct TablePrivilege {
- /// Name of the role that granted the privilege
- pub grantor: String,
- /// Name of the role that the privilege was granted to
- pub grantee: String,
- /// Name of the database that contains the table (always the current database)
- pub table_catalog: String,
- /// Name of the schema that contains the table
- pub table_schema: String,
- /// Name of the table
- pub table_name: String,
- /// Type of the privilege: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER
- pub privilege_type: String,
- /// YES if the privilege is grantable, NO if not
- pub is_grantable: String,
- /// In the SQL standard, WITH HIERARCHY OPTION is a separate (sub-)privilege allowing certain operations on table inheritance hierarchies. In PostgreSQL, this is included in the SELECT privilege, so this column shows YES if the privilege is SELECT, else NO.
- pub with_hierarchy: String,
-}
diff --git a/migrations/00000000000000_diesel_initial_setup/down.sql b/migrations/00000000000000_diesel_initial_setup/down.sql
deleted file mode 100644
index a9f5260..0000000
--- a/migrations/00000000000000_diesel_initial_setup/down.sql
+++ /dev/null
@@ -1,6 +0,0 @@
--- This file was automatically created by Diesel to setup helper functions
--- and other internal bookkeeping. This file is safe to edit, any future
--- changes will be added to existing projects as new migrations.
-
-DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
-DROP FUNCTION IF EXISTS diesel_set_updated_at();
diff --git a/migrations/00000000000000_diesel_initial_setup/up.sql b/migrations/00000000000000_diesel_initial_setup/up.sql
deleted file mode 100644
index d68895b..0000000
--- a/migrations/00000000000000_diesel_initial_setup/up.sql
+++ /dev/null
@@ -1,36 +0,0 @@
--- This file was automatically created by Diesel to setup helper functions
--- and other internal bookkeeping. This file is safe to edit, any future
--- changes will be added to existing projects as new migrations.
-
-
-
-
--- Sets up a trigger for the given table to automatically set a column called
--- `updated_at` whenever the row is modified (unless `updated_at` was included
--- in the modified columns)
---
--- # Example
---
--- ```sql
--- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
---
--- SELECT diesel_manage_updated_at('users');
--- ```
-CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
-BEGIN
- EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
- FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
-END;
-$$ LANGUAGE plpgsql;
-
-CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
-BEGIN
- IF (
- NEW IS DISTINCT FROM OLD AND
- NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
- ) THEN
- NEW.updated_at := current_timestamp;
- END IF;
- RETURN NEW;
-END;
-$$ LANGUAGE plpgsql;
diff --git a/migrations/2024-11-25-232658_init/down.sql b/migrations/2024-11-25-232658_init/down.sql
deleted file mode 100644
index c99ddcd..0000000
--- a/migrations/2024-11-25-232658_init/down.sql
+++ /dev/null
@@ -1 +0,0 @@
-DROP TABLE IF EXISTS users;
diff --git a/migrations/2024-11-25-232658_init/up.sql b/migrations/2024-11-25-232658_init/up.sql
deleted file mode 100644
index a876762..0000000
--- a/migrations/2024-11-25-232658_init/up.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-CREATE TABLE IF NOT EXISTS users (
- id UUID NOT NULL PRIMARY KEY,
- uid TEXT UNIQUE NOT NULL,
- email TEXT NOT NULL
-);
-CREATE INDEX ON users (uid);
diff --git a/migrations/20241125232658_users.down.sql b/migrations/20241125232658_users.down.sql
new file mode 100644
index 0000000..2482754
--- /dev/null
+++ b/migrations/20241125232658_users.down.sql
@@ -0,0 +1 @@
+drop table if exists users;
diff --git a/migrations/20241125232658_users.up.sql b/migrations/20241125232658_users.up.sql
new file mode 100644
index 0000000..93ff111
--- /dev/null
+++ b/migrations/20241125232658_users.up.sql
@@ -0,0 +1,6 @@
+create table if not exists users (
+ id uuid not null primary key,
+ uid text unique not null,
+ email text not null
+);
+create index on users (uid);
diff --git a/migrations/2025-01-08-211839_sessions/down.sql b/migrations/2025-01-08-211839_sessions/down.sql
deleted file mode 100644
index e51c609..0000000
--- a/migrations/2025-01-08-211839_sessions/down.sql
+++ /dev/null
@@ -1 +0,0 @@
-DROP TABLE IF EXISTS browser_sessions;
diff --git a/migrations/2025-01-08-211839_sessions/up.sql b/migrations/2025-01-08-211839_sessions/up.sql
deleted file mode 100644
index 8cba36c..0000000
--- a/migrations/2025-01-08-211839_sessions/up.sql
+++ /dev/null
@@ -1,8 +0,0 @@
-CREATE TABLE browser_sessions (
- id TEXT NOT NULL PRIMARY KEY,
- serialized TEXT NOT NULL,
- created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
- expiry TIMESTAMPTZ
-);
-CREATE INDEX ON browser_sessions (expiry);
-CREATE INDEX ON browser_sessions (created_at);
diff --git a/migrations/20250108211839_sessions.down.sql b/migrations/20250108211839_sessions.down.sql
new file mode 100644
index 0000000..97738a8
--- /dev/null
+++ b/migrations/20250108211839_sessions.down.sql
@@ -0,0 +1 @@
+drop table if exists browser_sessions;
diff --git a/migrations/20250108211839_sessions.up.sql b/migrations/20250108211839_sessions.up.sql
new file mode 100644
index 0000000..fdc96f2
--- /dev/null
+++ b/migrations/20250108211839_sessions.up.sql
@@ -0,0 +1,8 @@
+create table if not exists browser_sessions (
+ id text not null primary key,
+ serialized text not null,
+ created_at timestamptz not null default now(),
+ expiry timestamptz
+);
+create index on browser_sessions (expiry);
+create index on browser_sessions (created_at);
diff --git a/migrations/20250522224809_bases.down.sql b/migrations/20250522224809_bases.down.sql
new file mode 100644
index 0000000..c14b6a4
--- /dev/null
+++ b/migrations/20250522224809_bases.down.sql
@@ -0,0 +1,2 @@
+drop table if exists base_user_perms;
+drop table if exists bases;
diff --git a/migrations/20250522224809_bases.up.sql b/migrations/20250522224809_bases.up.sql
new file mode 100644
index 0000000..7833fab
--- /dev/null
+++ b/migrations/20250522224809_bases.up.sql
@@ -0,0 +1,19 @@
+create table if not exists bases (
+ id uuid not null primary key,
+ name text not null default '',
+ url text not null,
+ owner_id uuid not null references users(id)
+ on delete restrict,
+ user_role_prefix text not null default '__itmu__'
+);
+create index on bases (owner_id);
+
+create table if not exists base_user_perms (
+ id uuid not null primary key,
+ base_id uuid not null references bases(id),
+ user_id uuid not null references users(id),
+ perm text not null,
+ unique (base_id, user_id, perm)
+);
+create index on base_user_perms (user_id);
+create index on base_user_perms (base_id);
diff --git a/src/abstract_.rs b/src/abstract_.rs
index f4127cd..8b13789 100644
--- a/src/abstract_.rs
+++ b/src/abstract_.rs
@@ -1,41 +1 @@
-use anyhow::{Context as _, Result};
-use diesel::{prelude::*, sql_query};
-use mdengine::pg_roles::{self, PgRole};
-use uuid::Uuid;
-pub fn escape_identifier(identifier: &str) -> String {
- // Escaping identifiers for Postgres is fairly easy, provided that the input is
- // already known to contain no invalid multi-byte sequences. Backslashes may
- // remain as-is, and embedded double quotes are escaped simply by doubling
- // them (`"` becomes `""`). Refer to the PQescapeInternal() function in
- // libpq (fe-exec.c) and Diesel's PgQueryBuilder::push_identifier().
- format!("\"{}\"", identifier.replace('"', "\"\""))
-}
-
-pub fn diesel_set_user_id(
- pg_user_role_prefix: &str,
- user_id: Uuid,
- conn: &mut PgConnection,
-) -> Result<()> {
- let role = pg_roles::table
- .select(PgRole::as_select())
- .filter(pg_roles::dsl::rolname.eq(format!("{}{}", pg_user_role_prefix, user_id.simple())))
- .first(conn)
- .optional()
- .context("error reading role")?;
- if role.is_none() {
- sql_query(format!(
- "CREATE ROLE {}",
- escape_identifier(&format!("{}{}", pg_user_role_prefix, user_id.simple()))
- ))
- .execute(conn)
- .context("error creating role")?;
- }
- sql_query(format!(
- "SET ROLE {}",
- escape_identifier(&format!("{}{}", pg_user_role_prefix, user_id.simple()))
- ))
- .execute(conn)
- .context("error setting role to user")?;
- Ok(())
-}
diff --git a/src/app_state.rs b/src/app_state.rs
index 25bdf34..1336b83 100644
--- a/src/app_state.rs
+++ b/src/app_state.rs
@@ -6,15 +6,17 @@ use axum::{
http::request::Parts,
};
use oauth2::basic::BasicClient;
+use sqlx::{pool::PoolConnection, postgres::PgPoolOptions, Postgres};
-use crate::{app_error::AppError, auth, nav::NavbarBuilder, sessions::PgStore, settings::Settings};
+use crate::{
+ app_error::AppError, auth, base_pooler::BasePooler, sessions::PgStore, settings::Settings,
+};
/// Global app configuration
pub struct App {
- pub diesel_pool: deadpool_diesel::postgres::Pool,
- pub navbar_template: NavbarBuilder,
+ pub app_db: sqlx::PgPool,
+ pub base_pooler: BasePooler,
pub oauth_client: BasicClient,
- pub pg_pool: deadpool_postgres::Pool,
pub reqwest_client: reqwest::Client,
pub session_store: PgStore,
pub settings: Settings,
@@ -23,41 +25,21 @@ pub struct App {
impl App {
/// Initialize global application functions based on config values
pub async fn from_settings(settings: Settings) -> Result {
- let database_url = settings.database_url.clone();
- let diesel_manager = deadpool_diesel::postgres::Manager::from_config(
- database_url.clone(),
- deadpool_diesel::Runtime::Tokio1,
- deadpool_diesel::ManagerConfig {
- // Reset role after each interaction is recycled so that user
- // sessions remain isolated by deadpool interaction
- recycling_method: deadpool_diesel::RecyclingMethod::CustomQuery(
- std::borrow::Cow::Owned("RESET ROLE;".to_owned()),
- ),
- },
- );
- let diesel_pool = deadpool_diesel::postgres::Pool::builder(diesel_manager).build()?;
+ let app_db = PgPoolOptions::new()
+ .max_connections(settings.app_db_max_connections)
+ .connect(&settings.database_url)
+ .await?;
- let pg_config = deadpool_postgres::Config {
- url: Some(database_url),
- manager: Some(deadpool_postgres::ManagerConfig {
- recycling_method: deadpool_postgres::RecyclingMethod::Clean,
- }),
- ..Default::default()
- };
- let pg_pool = pg_config.create_pool(
- Some(deadpool_postgres::Runtime::Tokio1),
- postgres_native_tls::MakeTlsConnector::new(native_tls::TlsConnector::new()?),
- )?;
-
- let session_store = PgStore::new(diesel_pool.clone());
+ let session_store = PgStore::new(app_db.clone());
let reqwest_client = reqwest::ClientBuilder::new().https_only(true).build()?;
let oauth_client = auth::new_oauth_client(&settings)?;
+ let base_pooler = BasePooler::new_with_app_db(app_db.clone());
+
Ok(Self {
- diesel_pool,
- navbar_template: NavbarBuilder::default().with_base_path(&settings.base_path),
+ app_db,
+ base_pooler,
oauth_client,
- pg_pool,
reqwest_client,
session_store,
settings,
@@ -82,9 +64,9 @@ where
}
/// Extractor to automatically obtain a Deadpool Diesel connection
-pub struct DieselConn(pub deadpool_diesel::postgres::Connection);
+pub struct AppDbConn(pub PoolConnection);
-impl FromRequestParts for DieselConn
+impl FromRequestParts for AppDbConn
where
S: Into + Clone + Sync,
{
@@ -92,24 +74,9 @@ where
async fn from_request_parts(_: &mut Parts, state: &S) -> Result {
let conn = Into::::into(state.clone())
- .diesel_pool
- .get()
+ .app_db
+ .acquire()
.await?;
Ok(Self(conn))
}
}
-
-/// Extractor to automatically obtain a Deadpool tokio-postgres connection
-pub struct PgConn(pub deadpool_postgres::Object);
-
-impl FromRequestParts for PgConn
-where
- S: Into + Clone + Sync,
-{
- type Rejection = AppError;
-
- async fn from_request_parts(_: &mut Parts, state: &S) -> Result {
- let conn = Into::::into(state.clone()).pg_pool.get().await?;
- Ok(Self(conn))
- }
-}
diff --git a/src/base_pooler.rs b/src/base_pooler.rs
new file mode 100644
index 0000000..e50dea6
--- /dev/null
+++ b/src/base_pooler.rs
@@ -0,0 +1,111 @@
+use std::{collections::HashMap, sync::Arc, time::Duration};
+
+use anyhow::{Context as _, Result};
+use axum::extract::FromRef;
+use sqlx::{pool::PoolConnection, postgres::PgPoolOptions, raw_sql, Executor, PgPool, Postgres};
+use tokio::sync::{OnceCell, RwLock};
+use uuid::Uuid;
+
+use crate::{app_state::AppState, bases::Base};
+
+const MAX_CONNECTIONS: u32 = 4;
+const IDLE_SECONDS: u64 = 3600;
+
+// NOTE: The Arc this uses will probably need to be cleaned up for
+// performance eventually.
+
+/// A collection of multiple SQLx Pools.
+#[derive(Clone)]
+pub struct BasePooler {
+ pools: Arc>>>,
+ app_db: PgPool,
+}
+
+impl BasePooler {
+ pub fn new_with_app_db(app_db: PgPool) -> Self {
+ Self {
+ app_db,
+ pools: Arc::new(RwLock::new(HashMap::new())),
+ }
+ }
+
+ async fn get_pool_for(&mut self, base_id: Uuid) -> Result {
+ let init_cell = || async {
+ let base = Base::fetch_by_id(base_id, &self.app_db)
+ .await?
+ .context("no such base")?;
+ Ok(PgPoolOptions::new()
+ .min_connections(0)
+ .max_connections(MAX_CONNECTIONS)
+ .idle_timeout(Some(Duration::from_secs(IDLE_SECONDS)))
+ .after_release(|conn, _| {
+ Box::pin(async move {
+ // Essentially "DISCARD ALL" without "DEALLOCATE ALL"
+ conn.execute(raw_sql(
+ "
+close all;
+set session authorization default;
+reset all;
+unlisten *;
+select pg_advisory_unlock_all();
+discard plans;
+discard temp;
+discard sequences;
+",
+ ))
+ .await?;
+ Ok(true)
+ })
+ })
+ .connect(&base.url)
+ .await?)
+ };
+
+ // Attempt to get an existing pool without write-locking the map
+ let pools = self.pools.read().await;
+ if let Some(cell) = pools.get(&base_id) {
+ return Ok(cell
+ .get_or_try_init::(init_cell)
+ .await?
+ .clone());
+ }
+ drop(pools); // Release read lock
+ let mut pools = self.pools.write().await;
+ let entry = pools.entry(base_id).or_insert(OnceCell::new());
+ Ok(entry
+ .get_or_try_init::(init_cell)
+ .await?
+ .clone())
+ }
+
+ pub async fn acquire_for(&mut self, base_id: Uuid) -> Result> {
+ let pool = self.get_pool_for(base_id).await?;
+ Ok(pool.acquire().await?)
+ }
+
+ pub async fn close_for(&mut self, base_id: Uuid) -> Result<()> {
+ let pools = self.pools.read().await;
+ if let Some(cell) = pools.get(&base_id) {
+ if let Some(pool) = cell.get() {
+ let pool = pool.clone();
+ drop(pools); // Release read lock
+ let mut pools = self.pools.write().await;
+ pools.remove(&base_id);
+ drop(pools); // Release write lock
+ pool.close().await;
+ }
+ }
+ Ok(())
+ }
+
+ // TODO: Add a cleanup method to remove entries with no connections
+}
+
+impl FromRef for BasePooler
+where
+ S: Into + Clone,
+{
+ fn from_ref(state: &S) -> Self {
+ Into::::into(state.clone()).base_pooler.clone()
+ }
+}
diff --git a/src/base_user_perms.rs b/src/base_user_perms.rs
new file mode 100644
index 0000000..7795af2
--- /dev/null
+++ b/src/base_user_perms.rs
@@ -0,0 +1,80 @@
+use std::collections::HashSet;
+
+use anyhow::{Context as _, Result};
+use sqlx::{query, PgConnection};
+use uuid::Uuid;
+
+use crate::{
+ bases::Base,
+ pg_acls::PgPrivilegeType,
+ pg_databases::PgDatabase,
+ pg_roles::{user_id_from_rolname, PgRole, RoleTree},
+};
+
+pub struct BaseUserPerm {
+ pub id: Uuid,
+ pub base_id: Uuid,
+ pub user_id: Uuid,
+ pub perm: String,
+}
+
+pub async fn sync_perms_for_base(
+ base_id: Uuid,
+ app_db: &mut PgConnection,
+ client: &mut PgConnection,
+) -> Result<()> {
+ let db = PgDatabase::fetch_current(&mut *client).await?;
+ let explicit_roles = PgRole::fetch_by_names_any(
+ db.datacl
+ .unwrap_or(vec![])
+ .into_iter()
+ .filter(|item| {
+ item.privileges
+ .iter()
+ .any(|privilege| privilege.privilege == PgPrivilegeType::Connect)
+ })
+ .map(|item| item.grantee)
+ .collect(),
+ &mut *client,
+ )
+ .await?;
+ let mut all_roles: HashSet = HashSet::new();
+ for explicit_role in explicit_roles {
+ if let Some(role_tree) = RoleTree::fetch_members(explicit_role.oid, &mut *client).await? {
+ for implicit_role in role_tree.flatten_inherited() {
+ all_roles.insert(implicit_role.clone());
+ }
+ }
+ }
+ let base = Base::fetch_by_id(base_id, &mut *app_db)
+ .await?
+ .context("base with that id not found")?;
+ let user_ids: Vec = all_roles
+ .iter()
+ .filter_map(|role| user_id_from_rolname(&role.rolname, &base.user_role_prefix).ok())
+ .collect();
+ dbg!(&all_roles);
+ query!(
+ "delete from base_user_perms where base_id = $1 and not (user_id = any($2))",
+ base_id,
+ user_ids.as_slice(),
+ )
+ .execute(&mut *app_db)
+ .await?;
+ for user_id in user_ids {
+ query!(
+ "
+insert into base_user_perms
+ (id, base_id, user_id, perm)
+values ($1, $2, $3, 'connect')
+on conflict (base_id, user_id, perm) do nothing
+",
+ Uuid::now_v7(),
+ base.id,
+ user_id
+ )
+ .execute(&mut *app_db)
+ .await?;
+ }
+ Ok(())
+}
diff --git a/src/bases.rs b/src/bases.rs
new file mode 100644
index 0000000..cb768e0
--- /dev/null
+++ b/src/bases.rs
@@ -0,0 +1,75 @@
+use derive_builder::Builder;
+use sqlx::{query_as, PgExecutor};
+use uuid::Uuid;
+
+pub struct Base {
+ pub id: Uuid,
+ pub name: String,
+ pub url: String,
+ pub owner_id: Uuid,
+ pub user_role_prefix: String,
+}
+
+impl Base {
+ pub fn insertable_builder() -> InsertableBaseBuilder {
+ InsertableBaseBuilder::default()
+ }
+
+ pub async fn fetch_by_id<'a, E: PgExecutor<'a>>(
+ id: Uuid,
+ client: E,
+ ) -> Result