sqlx etc
This commit is contained in:
parent
ae4b4707d9
commit
ced7eced4a
65 changed files with 2777 additions and 1470 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -1,3 +1,6 @@
|
|||
target
|
||||
.env
|
||||
.DS_Store
|
||||
node_modules
|
||||
js_dist
|
||||
pgdata
|
||||
|
|
|
|||
893
Cargo.lock
generated
893
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
34
Cargo.toml
34
Cargo.toml
|
|
@ -4,38 +4,42 @@ version = "0.0.1"
|
|||
edition = "2021"
|
||||
|
||||
[workspace]
|
||||
members = ["mdengine"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = { version = "1.0.91", features = ["backtrace"] }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
reqwest = { version = "0.12.8", features = ["json"] }
|
||||
serde = { version = "1.0.213", features = ["derive"] }
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "tls-rustls-ring-native-roots", "postgres", "derive", "uuid", "chrono", "json", "macros"] }
|
||||
uuid = { version = "1.11.0", features = ["serde", "v4", "v7"] }
|
||||
tokio = { version = "1.42.0", features = ["full"] }
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.91"
|
||||
anyhow = { workspace = true }
|
||||
askama = { version = "0.14.0", features = ["urlencode"] }
|
||||
async-session = "3.0.0"
|
||||
axum = { version = "0.8.1", features = ["macros"] }
|
||||
axum-extra = { version = "0.10.0", features = ["cookie", "form", "typed-header"] }
|
||||
mdengine = { path = "./mdengine" }
|
||||
chrono = { version = "0.4.39", features = ["serde"] }
|
||||
chrono = { workspace = true }
|
||||
clap = { version = "4.5.31", features = ["derive"] }
|
||||
config = "0.14.1"
|
||||
deadpool-diesel = { version = "0.6.1", features = ["postgres", "serde"] }
|
||||
deadpool-postgres = { version = "0.14.1", features = ["serde"] }
|
||||
derive_builder = "0.20.2"
|
||||
diesel = { version = "2.2.10", features = ["postgres", "chrono", "uuid", "serde_json"] }
|
||||
diesel_migrations = { version = "2.2.0", features = ["postgres"] }
|
||||
dotenvy = "0.15.7"
|
||||
futures = "0.3.31"
|
||||
native-tls = "0.2.14"
|
||||
nom = "8.0.0"
|
||||
oauth2 = "4.4.2"
|
||||
percent-encoding = "2.3.1"
|
||||
postgres-native-tls = "0.5.1"
|
||||
rand = "0.8.5"
|
||||
reqwest = { version = "0.12.8", features = ["json"] }
|
||||
serde = { version = "1.0.213", features = ["derive"] }
|
||||
reqwest = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = "1.0.132"
|
||||
tokio = { version = "1.42.0", features = ["full"] }
|
||||
sqlx = { workspace = true }
|
||||
thiserror = "2.0.12"
|
||||
tokio = { workspace = true }
|
||||
tower = "0.5.2"
|
||||
tower-http = { version = "0.6.2", features = ["compression-gzip", "fs", "normalize-path", "set-header", "trace"] }
|
||||
tracing = "0.1.40"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["chrono", "env-filter"] }
|
||||
uuid = { version = "1.11.0", features = ["serde", "v4", "v7"] }
|
||||
uuid = { workspace = true }
|
||||
validator = { version = "0.20.0", features = ["derive"] }
|
||||
tokio-postgres = { version = "0.7.13", features = ["with-uuid-1", "with-chrono-0_4"] }
|
||||
|
|
|
|||
4
Makefile
Normal file
4
Makefile
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
.PHONY: run-postgres
|
||||
|
||||
run-postgres:
|
||||
docker run --rm -it -e POSTGRES_PASSWORD=guest -v "./pgdata:/var/lib/postgresql/data" -p 127.0.0.1:5432:5432 postgres:17
|
||||
|
|
@ -85,10 +85,7 @@ default_watch = false
|
|||
# If you often use this job, it makes sense to override the 'r' key by adding
|
||||
# a binding `r = job:run-long` at the end of this file .
|
||||
[jobs.run-server]
|
||||
command = [
|
||||
"cargo", "run", "serve"
|
||||
# put launch parameters for your program behind a `--` separator
|
||||
]
|
||||
command = ["cargo", "run", "serve"]
|
||||
need_stdout = true
|
||||
allow_warnings = true
|
||||
background = false
|
||||
|
|
|
|||
16
components/deno.json
Normal file
16
components/deno.json
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["deno.ns", "dom"],
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false
|
||||
},
|
||||
"tasks": {
|
||||
"dev": "deno run -A --node-modules-dir npm:vite",
|
||||
"build": "deno run -A --node-modules-dir npm:vite build"
|
||||
},
|
||||
"imports": {
|
||||
"@lit/context": "npm:@lit/context@^1.1.2",
|
||||
"lit": "npm:lit@^3.2.0",
|
||||
"vite": "npm:vite@^5.2.10"
|
||||
}
|
||||
}
|
||||
347
components/deno.lock
generated
Normal file
347
components/deno.lock
generated
Normal file
|
|
@ -0,0 +1,347 @@
|
|||
{
|
||||
"version": "5",
|
||||
"specifiers": {
|
||||
"npm:@lit/context@^1.1.2": "1.1.2",
|
||||
"npm:lit@^3.2.0": "3.2.1",
|
||||
"npm:vite@*": "5.4.5",
|
||||
"npm:vite@^5.2.10": "5.4.5"
|
||||
},
|
||||
"npm": {
|
||||
"@esbuild/aix-ppc64@0.21.5": {
|
||||
"integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
|
||||
"os": ["aix"],
|
||||
"cpu": ["ppc64"]
|
||||
},
|
||||
"@esbuild/android-arm64@0.21.5": {
|
||||
"integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
|
||||
"os": ["android"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@esbuild/android-arm@0.21.5": {
|
||||
"integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
|
||||
"os": ["android"],
|
||||
"cpu": ["arm"]
|
||||
},
|
||||
"@esbuild/android-x64@0.21.5": {
|
||||
"integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
|
||||
"os": ["android"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/darwin-arm64@0.21.5": {
|
||||
"integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@esbuild/darwin-x64@0.21.5": {
|
||||
"integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/freebsd-arm64@0.21.5": {
|
||||
"integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
|
||||
"os": ["freebsd"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@esbuild/freebsd-x64@0.21.5": {
|
||||
"integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
|
||||
"os": ["freebsd"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/linux-arm64@0.21.5": {
|
||||
"integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@esbuild/linux-arm@0.21.5": {
|
||||
"integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm"]
|
||||
},
|
||||
"@esbuild/linux-ia32@0.21.5": {
|
||||
"integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["ia32"]
|
||||
},
|
||||
"@esbuild/linux-loong64@0.21.5": {
|
||||
"integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["loong64"]
|
||||
},
|
||||
"@esbuild/linux-mips64el@0.21.5": {
|
||||
"integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["mips64el"]
|
||||
},
|
||||
"@esbuild/linux-ppc64@0.21.5": {
|
||||
"integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["ppc64"]
|
||||
},
|
||||
"@esbuild/linux-riscv64@0.21.5": {
|
||||
"integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["riscv64"]
|
||||
},
|
||||
"@esbuild/linux-s390x@0.21.5": {
|
||||
"integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["s390x"]
|
||||
},
|
||||
"@esbuild/linux-x64@0.21.5": {
|
||||
"integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/netbsd-x64@0.21.5": {
|
||||
"integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
|
||||
"os": ["netbsd"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/openbsd-x64@0.21.5": {
|
||||
"integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
|
||||
"os": ["openbsd"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/sunos-x64@0.21.5": {
|
||||
"integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
|
||||
"os": ["sunos"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@esbuild/win32-arm64@0.21.5": {
|
||||
"integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@esbuild/win32-ia32@0.21.5": {
|
||||
"integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["ia32"]
|
||||
},
|
||||
"@esbuild/win32-x64@0.21.5": {
|
||||
"integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@lit-labs/ssr-dom-shim@1.2.1": {
|
||||
"integrity": "sha512-wx4aBmgeGvFmOKucFKY+8VFJSYZxs9poN3SDNQFF6lT6NrQUnHiPB2PWz2sc4ieEcAaYYzN+1uWahEeTq2aRIQ=="
|
||||
},
|
||||
"@lit/context@1.1.2": {
|
||||
"integrity": "sha512-S0nw2C6Tkm7fVX5TGYqeROGD+Z9Coa2iFpW+ysYBDH3YvCqOY3wVQvSgwbaliLJkjTnSEYCBe9qFqKV8WUFpVw==",
|
||||
"dependencies": [
|
||||
"@lit/reactive-element"
|
||||
]
|
||||
},
|
||||
"@lit/reactive-element@2.0.4": {
|
||||
"integrity": "sha512-GFn91inaUa2oHLak8awSIigYz0cU0Payr1rcFsrkf5OJ5eSPxElyZfKh0f2p9FsTiZWXQdWGJeXZICEfXXYSXQ==",
|
||||
"dependencies": [
|
||||
"@lit-labs/ssr-dom-shim"
|
||||
]
|
||||
},
|
||||
"@rollup/rollup-android-arm-eabi@4.21.3": {
|
||||
"integrity": "sha512-MmKSfaB9GX+zXl6E8z4koOr/xU63AMVleLEa64v7R0QF/ZloMs5vcD1sHgM64GXXS1csaJutG+ddtzcueI/BLg==",
|
||||
"os": ["android"],
|
||||
"cpu": ["arm"]
|
||||
},
|
||||
"@rollup/rollup-android-arm64@4.21.3": {
|
||||
"integrity": "sha512-zrt8ecH07PE3sB4jPOggweBjJMzI1JG5xI2DIsUbkA+7K+Gkjys6eV7i9pOenNSDJH3eOr/jLb/PzqtmdwDq5g==",
|
||||
"os": ["android"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@rollup/rollup-darwin-arm64@4.21.3": {
|
||||
"integrity": "sha512-P0UxIOrKNBFTQaXTxOH4RxuEBVCgEA5UTNV6Yz7z9QHnUJ7eLX9reOd/NYMO3+XZO2cco19mXTxDMXxit4R/eQ==",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@rollup/rollup-darwin-x64@4.21.3": {
|
||||
"integrity": "sha512-L1M0vKGO5ASKntqtsFEjTq/fD91vAqnzeaF6sfNAy55aD+Hi2pBI5DKwCO+UNDQHWsDViJLqshxOahXyLSh3EA==",
|
||||
"os": ["darwin"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@rollup/rollup-linux-arm-gnueabihf@4.21.3": {
|
||||
"integrity": "sha512-btVgIsCjuYFKUjopPoWiDqmoUXQDiW2A4C3Mtmp5vACm7/GnyuprqIDPNczeyR5W8rTXEbkmrJux7cJmD99D2g==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm"]
|
||||
},
|
||||
"@rollup/rollup-linux-arm-musleabihf@4.21.3": {
|
||||
"integrity": "sha512-zmjbSphplZlau6ZTkxd3+NMtE4UKVy7U4aVFMmHcgO5CUbw17ZP6QCgyxhzGaU/wFFdTfiojjbLG3/0p9HhAqA==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm"]
|
||||
},
|
||||
"@rollup/rollup-linux-arm64-gnu@4.21.3": {
|
||||
"integrity": "sha512-nSZfcZtAnQPRZmUkUQwZq2OjQciR6tEoJaZVFvLHsj0MF6QhNMg0fQ6mUOsiCUpTqxTx0/O6gX0V/nYc7LrgPw==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@rollup/rollup-linux-arm64-musl@4.21.3": {
|
||||
"integrity": "sha512-MnvSPGO8KJXIMGlQDYfvYS3IosFN2rKsvxRpPO2l2cum+Z3exiExLwVU+GExL96pn8IP+GdH8Tz70EpBhO0sIQ==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@rollup/rollup-linux-powerpc64le-gnu@4.21.3": {
|
||||
"integrity": "sha512-+W+p/9QNDr2vE2AXU0qIy0qQE75E8RTwTwgqS2G5CRQ11vzq0tbnfBd6brWhS9bCRjAjepJe2fvvkvS3dno+iw==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["ppc64"]
|
||||
},
|
||||
"@rollup/rollup-linux-riscv64-gnu@4.21.3": {
|
||||
"integrity": "sha512-yXH6K6KfqGXaxHrtr+Uoy+JpNlUlI46BKVyonGiaD74ravdnF9BUNC+vV+SIuB96hUMGShhKV693rF9QDfO6nQ==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["riscv64"]
|
||||
},
|
||||
"@rollup/rollup-linux-s390x-gnu@4.21.3": {
|
||||
"integrity": "sha512-R8cwY9wcnApN/KDYWTH4gV/ypvy9yZUHlbJvfaiXSB48JO3KpwSpjOGqO4jnGkLDSk1hgjYkTbTt6Q7uvPf8eg==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["s390x"]
|
||||
},
|
||||
"@rollup/rollup-linux-x64-gnu@4.21.3": {
|
||||
"integrity": "sha512-kZPbX/NOPh0vhS5sI+dR8L1bU2cSO9FgxwM8r7wHzGydzfSjLRCFAT87GR5U9scj2rhzN3JPYVC7NoBbl4FZ0g==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@rollup/rollup-linux-x64-musl@4.21.3": {
|
||||
"integrity": "sha512-S0Yq+xA1VEH66uiMNhijsWAafffydd2X5b77eLHfRmfLsRSpbiAWiRHV6DEpz6aOToPsgid7TI9rGd6zB1rhbg==",
|
||||
"os": ["linux"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@rollup/rollup-win32-arm64-msvc@4.21.3": {
|
||||
"integrity": "sha512-9isNzeL34yquCPyerog+IMCNxKR8XYmGd0tHSV+OVx0TmE0aJOo9uw4fZfUuk2qxobP5sug6vNdZR6u7Mw7Q+Q==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["arm64"]
|
||||
},
|
||||
"@rollup/rollup-win32-ia32-msvc@4.21.3": {
|
||||
"integrity": "sha512-nMIdKnfZfzn1Vsk+RuOvl43ONTZXoAPUUxgcU0tXooqg4YrAqzfKzVenqqk2g5efWh46/D28cKFrOzDSW28gTA==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["ia32"]
|
||||
},
|
||||
"@rollup/rollup-win32-x64-msvc@4.21.3": {
|
||||
"integrity": "sha512-fOvu7PCQjAj4eWDEuD8Xz5gpzFqXzGlxHZozHP4b9Jxv9APtdxL6STqztDzMLuRXEc4UpXGGhx029Xgm91QBeA==",
|
||||
"os": ["win32"],
|
||||
"cpu": ["x64"]
|
||||
},
|
||||
"@types/estree@1.0.5": {
|
||||
"integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw=="
|
||||
},
|
||||
"@types/trusted-types@2.0.7": {
|
||||
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="
|
||||
},
|
||||
"esbuild@0.21.5": {
|
||||
"integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
|
||||
"optionalDependencies": [
|
||||
"@esbuild/aix-ppc64",
|
||||
"@esbuild/android-arm",
|
||||
"@esbuild/android-arm64",
|
||||
"@esbuild/android-x64",
|
||||
"@esbuild/darwin-arm64",
|
||||
"@esbuild/darwin-x64",
|
||||
"@esbuild/freebsd-arm64",
|
||||
"@esbuild/freebsd-x64",
|
||||
"@esbuild/linux-arm",
|
||||
"@esbuild/linux-arm64",
|
||||
"@esbuild/linux-ia32",
|
||||
"@esbuild/linux-loong64",
|
||||
"@esbuild/linux-mips64el",
|
||||
"@esbuild/linux-ppc64",
|
||||
"@esbuild/linux-riscv64",
|
||||
"@esbuild/linux-s390x",
|
||||
"@esbuild/linux-x64",
|
||||
"@esbuild/netbsd-x64",
|
||||
"@esbuild/openbsd-x64",
|
||||
"@esbuild/sunos-x64",
|
||||
"@esbuild/win32-arm64",
|
||||
"@esbuild/win32-ia32",
|
||||
"@esbuild/win32-x64"
|
||||
],
|
||||
"scripts": true,
|
||||
"bin": true
|
||||
},
|
||||
"fsevents@2.3.3": {
|
||||
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
|
||||
"os": ["darwin"],
|
||||
"scripts": true
|
||||
},
|
||||
"lit-element@4.1.1": {
|
||||
"integrity": "sha512-HO9Tkkh34QkTeUmEdNYhMT8hzLid7YlMlATSi1q4q17HE5d9mrrEHJ/o8O2D0cMi182zK1F3v7x0PWFjrhXFew==",
|
||||
"dependencies": [
|
||||
"@lit-labs/ssr-dom-shim",
|
||||
"@lit/reactive-element",
|
||||
"lit-html"
|
||||
]
|
||||
},
|
||||
"lit-html@3.2.1": {
|
||||
"integrity": "sha512-qI/3lziaPMSKsrwlxH/xMgikhQ0EGOX2ICU73Bi/YHFvz2j/yMCIrw4+puF2IpQ4+upd3EWbvnHM9+PnJn48YA==",
|
||||
"dependencies": [
|
||||
"@types/trusted-types"
|
||||
]
|
||||
},
|
||||
"lit@3.2.1": {
|
||||
"integrity": "sha512-1BBa1E/z0O9ye5fZprPtdqnc0BFzxIxTTOO/tQFmyC/hj1O3jL4TfmLBw0WEwjAokdLwpclkvGgDJwTIh0/22w==",
|
||||
"dependencies": [
|
||||
"@lit/reactive-element",
|
||||
"lit-element",
|
||||
"lit-html"
|
||||
]
|
||||
},
|
||||
"nanoid@3.3.7": {
|
||||
"integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
|
||||
"bin": true
|
||||
},
|
||||
"picocolors@1.1.0": {
|
||||
"integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw=="
|
||||
},
|
||||
"postcss@8.4.47": {
|
||||
"integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==",
|
||||
"dependencies": [
|
||||
"nanoid",
|
||||
"picocolors",
|
||||
"source-map-js"
|
||||
]
|
||||
},
|
||||
"rollup@4.21.3": {
|
||||
"integrity": "sha512-7sqRtBNnEbcBtMeRVc6VRsJMmpI+JU1z9VTvW8D4gXIYQFz0aLcsE6rRkyghZkLfEgUZgVvOG7A5CVz/VW5GIA==",
|
||||
"dependencies": [
|
||||
"@types/estree"
|
||||
],
|
||||
"optionalDependencies": [
|
||||
"@rollup/rollup-android-arm-eabi",
|
||||
"@rollup/rollup-android-arm64",
|
||||
"@rollup/rollup-darwin-arm64",
|
||||
"@rollup/rollup-darwin-x64",
|
||||
"@rollup/rollup-linux-arm-gnueabihf",
|
||||
"@rollup/rollup-linux-arm-musleabihf",
|
||||
"@rollup/rollup-linux-arm64-gnu",
|
||||
"@rollup/rollup-linux-arm64-musl",
|
||||
"@rollup/rollup-linux-powerpc64le-gnu",
|
||||
"@rollup/rollup-linux-riscv64-gnu",
|
||||
"@rollup/rollup-linux-s390x-gnu",
|
||||
"@rollup/rollup-linux-x64-gnu",
|
||||
"@rollup/rollup-linux-x64-musl",
|
||||
"@rollup/rollup-win32-arm64-msvc",
|
||||
"@rollup/rollup-win32-ia32-msvc",
|
||||
"@rollup/rollup-win32-x64-msvc",
|
||||
"fsevents"
|
||||
],
|
||||
"bin": true
|
||||
},
|
||||
"source-map-js@1.2.1": {
|
||||
"integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="
|
||||
},
|
||||
"vite@5.4.5": {
|
||||
"integrity": "sha512-pXqR0qtb2bTwLkev4SE3r4abCNioP3GkjvIDLlzziPpXtHgiJIjuKl+1GN6ESOT3wMjG3JTeARopj2SwYaHTOA==",
|
||||
"dependencies": [
|
||||
"esbuild",
|
||||
"postcss",
|
||||
"rollup"
|
||||
],
|
||||
"optionalDependencies": [
|
||||
"fsevents"
|
||||
],
|
||||
"bin": true
|
||||
}
|
||||
},
|
||||
"workspace": {
|
||||
"dependencies": [
|
||||
"npm:@lit/context@^1.1.2",
|
||||
"npm:lit@^3.2.0",
|
||||
"npm:vite@^5.2.10"
|
||||
]
|
||||
}
|
||||
}
|
||||
29
components/src/cell-text.ts
Normal file
29
components/src/cell-text.ts
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
import { html, LitElement } from "lit";
|
||||
import { customElement, property } from "lit/decorators.js";
|
||||
|
||||
@customElement("cell-text")
|
||||
export class CellText extends LitElement {
|
||||
@property({ attribute: true, type: String, reflect: true })
|
||||
value = "null";
|
||||
|
||||
@property({ attribute: "editable", type: Boolean, reflect: true })
|
||||
editable = false;
|
||||
|
||||
@property({ type: Boolean })
|
||||
editing = false;
|
||||
|
||||
protected override render() {
|
||||
const parsed = JSON.parse(this.value);
|
||||
if (parsed === null) {
|
||||
return html`
|
||||
<code>---</code>
|
||||
`;
|
||||
}
|
||||
|
||||
return html`
|
||||
<div>
|
||||
${parsed}
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
}
|
||||
20
components/src/cell-uuid.ts
Normal file
20
components/src/cell-uuid.ts
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import { html, LitElement } from "lit";
|
||||
import { customElement, property } from "lit/decorators.js";
|
||||
|
||||
@customElement("cell-uuid")
|
||||
export class CellUuid extends LitElement {
|
||||
@property({ attribute: "is-null", type: Boolean, reflect: true })
|
||||
isNull = false;
|
||||
|
||||
protected override render() {
|
||||
if (this.isNull) {
|
||||
return html`
|
||||
<code>NULL</code>
|
||||
`;
|
||||
}
|
||||
|
||||
return html`
|
||||
<code><slot></slot></code>
|
||||
`;
|
||||
}
|
||||
}
|
||||
2
components/src/cells.ts
Normal file
2
components/src/cells.ts
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
export { CellText } from "./cell-text.ts";
|
||||
export { CellUuid } from "./cell-uuid.ts";
|
||||
5
components/src/grid-size-context.ts
Normal file
5
components/src/grid-size-context.ts
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
import { createContext } from "@lit/context";
|
||||
|
||||
export const gridSizePxContext = createContext<number>(
|
||||
Symbol("grid-size-px-context"),
|
||||
);
|
||||
44
components/src/my-grid.ts
Normal file
44
components/src/my-grid.ts
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
import { provide } from "@lit/context";
|
||||
import { css, html, LitElement } from "lit";
|
||||
import { customElement, property } from "lit/decorators.js";
|
||||
|
||||
import { gridSizePxContext } from "./grid-size-context.ts";
|
||||
|
||||
@customElement("my-grid")
|
||||
export class MyGrid extends LitElement {
|
||||
@provide({ context: gridSizePxContext })
|
||||
@property({ attribute: "grid-size-px", type: Number })
|
||||
gridSizePx = 32;
|
||||
|
||||
@property()
|
||||
gridColor = "#ddd";
|
||||
|
||||
@property()
|
||||
width = "100%";
|
||||
|
||||
@property()
|
||||
height = "100%";
|
||||
|
||||
static override styles = css`
|
||||
:host {
|
||||
display: block;
|
||||
position: relative;
|
||||
}
|
||||
`;
|
||||
|
||||
protected override render() {
|
||||
return html`
|
||||
<style>
|
||||
:host {
|
||||
/* Dotted grid CSS inspired by: https://stackoverflow.com/a/55364821 */
|
||||
background-image: radial-gradient(circle at 1px 1px, ${this
|
||||
.gridColor} 1px, transparent 0);
|
||||
background-size: ${this.gridSizePx}px ${this.gridSizePx}px;
|
||||
height: ${this.height};
|
||||
width: ${this.width};
|
||||
}
|
||||
</style>
|
||||
<slot></slot>
|
||||
`;
|
||||
}
|
||||
}
|
||||
7
components/tsconfig.json
Normal file
7
components/tsconfig.json
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["deno.ns", "dom"],
|
||||
"experimentalDecorators": true,
|
||||
"useDefineForClassFields": false
|
||||
}
|
||||
}
|
||||
18
components/vite.config.mjs
Normal file
18
components/vite.config.mjs
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
import { defineConfig } from "vite";
|
||||
|
||||
import "lit";
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig({
|
||||
build: {
|
||||
lib: {
|
||||
entry: ["src/cells.ts"],
|
||||
formats: ["es"],
|
||||
},
|
||||
outDir: "../js_dist",
|
||||
emptyOutDir: true,
|
||||
rollupOptions: {
|
||||
// external: /^lit/,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# For documentation on how to configure this file,
|
||||
# see https://diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "do_not_use.txt"
|
||||
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
|
||||
|
||||
[migrations_directory]
|
||||
dir = "migrations"
|
||||
199
mdengine/Cargo.lock
generated
199
mdengine/Cargo.lock
generated
|
|
@ -1,199 +0,0 @@
|
|||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
|
||||
|
||||
[[package]]
|
||||
name = "darling"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"darling_macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_core"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"strsim",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "darling_macro"
|
||||
version = "0.20.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diesel"
|
||||
version = "2.2.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff3e1edb1f37b4953dd5176916347289ed43d7119cc2e6c7c3f7849ff44ea506"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"byteorder",
|
||||
"diesel_derives",
|
||||
"itoa",
|
||||
"pq-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diesel_derives"
|
||||
version = "2.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68d4216021b3ea446fd2047f5c8f8fe6e98af34508a254a01e4d6bc1e844f84d"
|
||||
dependencies = [
|
||||
"diesel_table_macro_syntax",
|
||||
"dsl_auto_type",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diesel_table_macro_syntax"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25"
|
||||
dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dsl_auto_type"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"either",
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||
|
||||
[[package]]
|
||||
name = "ident_case"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
|
||||
|
||||
[[package]]
|
||||
name = "info_schema"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"diesel",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "pq-sys"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41c852911b98f5981956037b2ca976660612e548986c30af075e753107bc3400"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
[package]
|
||||
name = "mdengine"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4.41"
|
||||
diesel = { version = "2.2.10", features = ["64-column-tables", "chrono", "postgres"], default-features = false }
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
.PHONY: run-postgres
|
||||
|
||||
run-postgres:
|
||||
docker run --rm -it -e POSTGRES_PASSWORD=guest -p 127.0.0.1:5432:5432 postgres:17
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
# Interim Metadata Engine (mdengine)
|
||||
|
||||
This crate is responsible for navigating the PostgreSQL `information_schema`
|
||||
and catalogs tables to extract rich understanding of database structure and
|
||||
permissions.
|
||||
|
||||
It does not fetch data directly from any user defined tables.
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
# For documentation on how to configure this file,
|
||||
# see https://diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "src/schema.rs"
|
||||
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
|
||||
schema = "information_schema"
|
||||
filter = { except_tables = ["sql_features", "sql_implementation_info", "sql_parts", "sql_sizing"] }
|
||||
|
||||
[migrations_directory]
|
||||
dir = "migrations"
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
use crate::{pg_class::PgClass, table_privileges::TablePrivilege};
|
||||
use diesel::{
|
||||
dsl::{AsSelect, auto_type},
|
||||
pg::Pg,
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
pub mod pg_attribute;
|
||||
pub mod pg_class;
|
||||
pub mod pg_namespace;
|
||||
pub mod pg_roles;
|
||||
mod schema;
|
||||
pub mod table_privileges;
|
||||
|
||||
// Still waiting for Postgres to gain class consciousness
|
||||
#[derive(Clone, Queryable, Selectable)]
|
||||
pub struct PgClassPrivilege {
|
||||
#[diesel(embed)]
|
||||
pub class: PgClass,
|
||||
#[diesel(embed)]
|
||||
pub privilege: TablePrivilege,
|
||||
}
|
||||
|
||||
/// Query for the list of tables any of the provided roles has access to. A Vec
|
||||
/// of grantees is accepted in case you wish to query for multiple roles of
|
||||
/// which a user is a member.
|
||||
#[auto_type(no_type_alias)]
|
||||
pub fn class_privileges_for_grantees(grantees: Vec<String>) -> _ {
|
||||
let select: AsSelect<PgClassPrivilege, Pg> = PgClassPrivilege::as_select();
|
||||
pg_class::table
|
||||
.inner_join(pg_namespace::table.on(pg_namespace::dsl::oid.eq(pg_class::dsl::relnamespace)))
|
||||
.inner_join(
|
||||
table_privileges::table.on(table_privileges::dsl::table_schema
|
||||
.eq(pg_namespace::dsl::nspname)
|
||||
.and(table_privileges::dsl::table_name.eq(pg_class::dsl::relname))),
|
||||
)
|
||||
// Excude indexes, series, etc.
|
||||
.filter(pg_class::dsl::relkind.eq(b'r'))
|
||||
.filter(table_privileges::dsl::grantee.eq_any(grantees))
|
||||
.select(select)
|
||||
}
|
||||
|
|
@ -1,75 +0,0 @@
|
|||
use diesel::{
|
||||
dsl::{AsSelect, auto_type},
|
||||
pg::Pg,
|
||||
prelude::*,
|
||||
};
|
||||
|
||||
use crate::schema::pg_attribute;
|
||||
|
||||
pub use crate::schema::pg_attribute::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Queryable, Selectable)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[diesel(table_name = pg_attribute)]
|
||||
pub struct PgAttribute {
|
||||
/// The table this column belongs to
|
||||
pub attrelid: u32,
|
||||
/// The column name
|
||||
pub attname: String,
|
||||
/// The data type of this column (zero for a dropped column)
|
||||
pub atttypid: u32,
|
||||
/// A copy of pg_type.typlen of this column's type
|
||||
pub attlen: i16,
|
||||
/// The number of the column. Ordinary columns are numbered from 1 up. System columns, such as ctid, have (arbitrary) negative numbers.
|
||||
pub attnum: i16,
|
||||
/// Always -1 in storage, but when loaded into a row descriptor in memory this might be updated to cache the offset of the attribute within the row
|
||||
pub attcacheoff: i32,
|
||||
/// atttypmod records type-specific data supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. The value will generally be -1 for types that do not need atttypmod.
|
||||
pub atttypmod: i32,
|
||||
/// Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, so any nonzero value effectively means “it's an array”.)
|
||||
pub attndims: i16,
|
||||
/// A copy of pg_type.typbyval of this column's type
|
||||
pub attbyval: bool,
|
||||
/// A copy of pg_type.typalign of this column's type
|
||||
pub attalign: u8,
|
||||
/// Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy.
|
||||
pub attstorage: u8,
|
||||
/// The current compression method of the column. Typically this is '\0' to specify use of the current default setting (see default_toast_compression). Otherwise, 'p' selects pglz compression, while 'l' selects LZ4 compression. However, this field is ignored whenever attstorage does not allow compression.
|
||||
pub attcompression: Option<u8>,
|
||||
/// This represents a not-null constraint.
|
||||
pub attnotnull: bool,
|
||||
/// This column has a default expression or generation expression, in which case there will be a corresponding entry in the pg_attrdef catalog that actually defines the expression. (Check attgenerated to determine whether this is a default or a generation expression.)
|
||||
pub atthasdef: bool,
|
||||
/// This column has a value which is used where the column is entirely missing from the row, as happens when a column is added with a non-volatile DEFAULT value after the row is created. The actual value used is stored in the attmissingval column.
|
||||
pub atthasmissing: bool,
|
||||
/// If a zero byte (''), then not an identity column. Otherwise, a = generated always, d = generated by default.
|
||||
pub attidentity: Option<u8>,
|
||||
/// If a zero byte (''), then not a generated column. Otherwise, s = stored. (Other values might be added in the future.)
|
||||
pub attgenerated: Option<u8>,
|
||||
/// This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is ignored by the parser and so cannot be accessed via SQL.
|
||||
pub attisdropped: bool,
|
||||
/// This column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.
|
||||
pub attislocal: bool,
|
||||
/// The number of direct ancestors this column has. A column with a nonzero number of ancestors cannot be dropped nor renamed.
|
||||
pub attinhcount: i16,
|
||||
/// The defined collation of the column, or zero if the column is not of a collatable data type
|
||||
pub attcollation: u32,
|
||||
/// attstattarget controls the level of detail of statistics accumulated for this column by ANALYZE. A zero value indicates that no statistics should be collected. A null value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. For scalar data types, attstattarget is both the target number of “most common values” to collect, and the target number of histogram bins to create.
|
||||
pub attstattarget: Option<i16>,
|
||||
/// Column-level access privileges, if any have been granted specifically on this column
|
||||
pub attacl: Option<Vec<String>>,
|
||||
/// Attribute-level options, as “keyword=value” strings
|
||||
pub attoptions: Option<Vec<String>>,
|
||||
/// Attribute-level foreign data wrapper options, as “keyword=value” strings
|
||||
pub attfdwoptions: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[auto_type(no_type_alias)]
|
||||
pub fn attributes_for_rel(oid: u32) -> _ {
|
||||
let select: AsSelect<PgAttribute, Pg> = PgAttribute::as_select();
|
||||
pg_attribute::table
|
||||
.filter(pg_attribute::dsl::attrelid.eq(oid))
|
||||
.filter(pg_attribute::dsl::attnum.gt(0i16))
|
||||
.filter(pg_attribute::dsl::attisdropped.eq(false))
|
||||
.select(select)
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
use diesel::{pg::Pg, prelude::*};
|
||||
|
||||
use crate::schema::pg_class;
|
||||
|
||||
pub use crate::schema::pg_class::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Queryable, Selectable)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[diesel(table_name = pg_class)]
|
||||
#[diesel(primary_key(oid))]
|
||||
pub struct PgClass {
|
||||
pub oid: u32,
|
||||
pub relname: String,
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
use diesel::{pg::Pg, prelude::*};
|
||||
|
||||
use crate::schema::pg_namespace;
|
||||
|
||||
pub use crate::schema::pg_namespace::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Queryable, Selectable)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[diesel(table_name = pg_namespace)]
|
||||
#[diesel(primary_key(oid))]
|
||||
pub struct PgNamespace {
|
||||
pub oid: u32,
|
||||
}
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use diesel::{pg::Pg, prelude::*};
|
||||
|
||||
use crate::schema::pg_roles;
|
||||
|
||||
pub use crate::schema::pg_roles::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Queryable, Selectable)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[diesel(table_name = pg_roles)]
|
||||
#[diesel(primary_key(oid))]
|
||||
pub struct PgRole {
|
||||
/// Role name
|
||||
pub rolname: String,
|
||||
/// Role has superuser privileges
|
||||
pub rolsuper: bool,
|
||||
/// Role automatically inherits privileges of roles it is a member of
|
||||
pub rolinherit: bool,
|
||||
/// Role can create more roles
|
||||
pub rolcreaterole: bool,
|
||||
/// Role can create databases
|
||||
pub rolcreatedb: bool,
|
||||
/// Role can log in. That is, this role can be given as the initial session authorization identifier
|
||||
pub rolcanlogin: bool,
|
||||
/// Role is a replication role. A replication role can initiate replication connections and create and drop replication slots.
|
||||
pub rolreplication: bool,
|
||||
/// For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.
|
||||
pub rolconnlimit: i32,
|
||||
/// Not the password (always reads as ********)
|
||||
pub rolpassword: String,
|
||||
/// Password expiry time (only used for password authentication); null if no expiration
|
||||
pub rolvaliduntil: Option<DateTime<Utc>>,
|
||||
/// Role bypasses every row-level security policy, see Section 5.9 for more information.
|
||||
pub rolbypassrls: bool,
|
||||
/// Role-specific defaults for run-time configuration variables
|
||||
pub rolconfig: Option<Vec<String>>,
|
||||
/// ID of role
|
||||
pub oid: u32,
|
||||
}
|
||||
|
|
@ -1,195 +0,0 @@
|
|||
use diesel::{allow_tables_to_appear_in_same_query, joinable, table};
|
||||
|
||||
table! {
|
||||
pg_class (oid) {
|
||||
/// Row identifier
|
||||
oid -> Oid,
|
||||
/// Name of the table, index, view, etc.
|
||||
relname -> Text,
|
||||
/// The OID of the namespace that contains this relation
|
||||
relnamespace -> Oid,
|
||||
/// The OID of the data type that corresponds to this table's row type, if any; zero for indexes, sequences, and toast tables, which have no pg_type entry
|
||||
reltype -> Oid,
|
||||
/// For typed tables, the OID of the underlying composite type; zero for all other relations
|
||||
reloftype -> Oid,
|
||||
/// Owner of the relation
|
||||
relowner -> Oid,
|
||||
/// The access method used to access this table or index. Not meaningful if the relation is a sequence or has no on-disk file, except for partitioned tables, where, if set, it takes precedence over default_table_access_method when determining the access method to use for partitions created when one is not specified in the creation command.
|
||||
relam -> Oid,
|
||||
/// Name of the on-disk file of this relation; zero means this is a “mapped” relation whose disk file name is determined by low-level state
|
||||
relfilenode -> Oid,
|
||||
/// The tablespace in which this relation is stored. If zero, the database's default tablespace is implied. Not meaningful if the relation has no on-disk file, except for partitioned tables, where this is the tablespace in which partitions will be created when one is not specified in the creation command.
|
||||
reltablespace -> Oid,
|
||||
/// Size of the on-disk representation of this table in pages (of size BLCKSZ). This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX.
|
||||
relpages -> Integer,
|
||||
/// Number of live rows in the table. This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX. If the table has never yet been vacuumed or analyzed, reltuples contains -1 indicating that the row count is unknown.
|
||||
reltuples -> Float,
|
||||
/// Number of pages that are marked all-visible in the table's visibility map. This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX.
|
||||
relallvisible -> Integer,
|
||||
/// OID of the TOAST table associated with this table, zero if none. The TOAST table stores large attributes “out of line” in a secondary table.
|
||||
reltoastrelid -> Oid,
|
||||
/// True if this is a table and it has (or recently had) any indexes
|
||||
relhasindex -> Bool,
|
||||
/// True if this table is shared across all databases in the cluster. Only certain system catalogs (such as pg_database) are shared.
|
||||
relisshared -> Bool,
|
||||
/// p = permanent table/sequence, u = unlogged table/sequence, t = temporary table/sequence
|
||||
relpersistence -> CChar,
|
||||
/// r = ordinary table, i = index, S = sequence, t = TOAST table, v = view, m = materialized view, c = composite type, f = foreign table, p = partitioned table, I = partitioned index
|
||||
relkind -> CChar,
|
||||
/// Number of user columns in the relation (system columns not counted). There must be this many corresponding entries in pg_attribute. See also pg_attribute.attnum.
|
||||
relnatts -> SmallInt,
|
||||
/// Number of CHECK constraints on the table; see pg_constraint catalog
|
||||
relchecks -> SmallInt,
|
||||
/// True if table has (or once had) rules; see pg_rewrite catalog
|
||||
relhasrules -> Bool,
|
||||
/// True if table has (or once had) triggers; see pg_trigger catalog
|
||||
relhastriggers -> Bool,
|
||||
/// True if table or index has (or once had) any inheritance children or partitions
|
||||
relhassubclass -> Bool,
|
||||
/// True if table has row-level security enabled; see pg_policy catalog
|
||||
relrowsecurity -> Bool,
|
||||
/// True if row-level security (when enabled) will also apply to table owner; see pg_policy catalog
|
||||
relforcerowsecurity -> Bool,
|
||||
/// True if relation is populated (this is true for all relations other than some materialized views)
|
||||
relispopulated -> Bool,
|
||||
/// Columns used to form “replica identity” for rows: d = default (primary key, if any), n = nothing, f = all columns, i = index with indisreplident set (same as nothing if the index used has been dropped)
|
||||
relreplident -> CChar,
|
||||
/// True if table or index is a partition
|
||||
relispartition -> Bool,
|
||||
/// For new relations being written during a DDL operation that requires a table rewrite, this contains the OID of the original relation; otherwise zero. That state is only visible internally; this field should never contain anything other than zero for a user-visible relation.
|
||||
relrewrite -> Oid,
|
||||
/// All transaction IDs before this one have been replaced with a permanent (“frozen”) transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent transaction ID wraparound or to allow pg_xact to be shrunk. Zero (InvalidTransactionId) if the relation is not a table.
|
||||
/// Access-method-specific options, as “keyword=value” strings
|
||||
reloptions -> Array<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
pg_roles (oid) {
|
||||
/// Role name
|
||||
rolname -> Text,
|
||||
/// Role has superuser privileges
|
||||
rolsuper -> Bool,
|
||||
/// Role automatically inherits privileges of roles it is a member of
|
||||
rolinherit -> Bool,
|
||||
/// Role can create more roles
|
||||
rolcreaterole -> Bool,
|
||||
/// Role can create databases
|
||||
rolcreatedb -> Bool,
|
||||
/// Role can log in. That is, this role can be given as the initial session authorization identifier
|
||||
rolcanlogin -> Bool,
|
||||
/// Role is a replication role. A replication role can initiate replication connections and create and drop replication slots.
|
||||
rolreplication -> Bool,
|
||||
/// For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.
|
||||
rolconnlimit -> Integer,
|
||||
/// Not the password (always reads as ********)
|
||||
rolpassword -> Text,
|
||||
/// Password expiry time (only used for password authentication); null if no expiration
|
||||
rolvaliduntil -> Nullable<Timestamptz>,
|
||||
/// Role bypasses every row-level security policy, see Section 5.9 for more information.
|
||||
rolbypassrls -> Bool,
|
||||
/// Role-specific defaults for run-time configuration variables
|
||||
rolconfig -> Nullable<Array<Text>>,
|
||||
/// ID of role
|
||||
oid -> Oid,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
pg_namespace (oid) {
|
||||
/// Row identifier
|
||||
oid -> Oid,
|
||||
/// Name of the namespace
|
||||
nspname -> Text,
|
||||
/// Onwer of the namespace
|
||||
nspowner -> Oid,
|
||||
/// Access privileges; see Section 5.8 for details
|
||||
nspacl -> Array<Text>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
pg_attribute (attrelid, attname) {
|
||||
/// The table this column belongs to
|
||||
attrelid -> Oid,
|
||||
/// The column name
|
||||
attname -> Text,
|
||||
/// The data type of this column (zero for a dropped column)
|
||||
atttypid -> Oid,
|
||||
/// A copy of pg_type.typlen of this column's type
|
||||
attlen -> SmallInt,
|
||||
/// The number of the column. Ordinary columns are numbered from 1 up. System columns, such as ctid, have (arbitrary) negative numbers.
|
||||
attnum -> SmallInt,
|
||||
/// Always -1 in storage, but when loaded into a row descriptor in memory this might be updated to cache the offset of the attribute within the row
|
||||
attcacheoff -> Integer,
|
||||
/// atttypmod records type-specific data supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. The value will generally be -1 for types that do not need atttypmod.
|
||||
atttypmod -> Integer,
|
||||
/// Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, so any nonzero value effectively means “it's an array”.)
|
||||
attndims -> SmallInt,
|
||||
/// A copy of pg_type.typbyval of this column's type
|
||||
attbyval -> Bool,
|
||||
/// A copy of pg_type.typalign of this column's type
|
||||
attalign -> CChar,
|
||||
/// Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy.
|
||||
attstorage -> CChar,
|
||||
/// The current compression method of the column. Typically this is '\0' to specify use of the current default setting (see default_toast_compression). Otherwise, 'p' selects pglz compression, while 'l' selects LZ4 compression. However, this field is ignored whenever attstorage does not allow compression.
|
||||
attcompression -> Nullable<CChar>,
|
||||
/// This represents a not-null constraint.
|
||||
attnotnull -> Bool,
|
||||
/// This column has a default expression or generation expression, in which case there will be a corresponding entry in the pg_attrdef catalog that actually defines the expression. (Check attgenerated to determine whether this is a default or a generation expression.)
|
||||
atthasdef -> Bool,
|
||||
/// This column has a value which is used where the column is entirely missing from the row, as happens when a column is added with a non-volatile DEFAULT value after the row is created. The actual value used is stored in the attmissingval column.
|
||||
atthasmissing -> Bool,
|
||||
/// If a zero byte (''), then not an identity column. Otherwise, a = generated always, d = generated by default.
|
||||
attidentity -> Nullable<CChar>,
|
||||
/// If a zero byte (''), then not a generated column. Otherwise, s = stored. (Other values might be added in the future.)
|
||||
attgenerated -> Nullable<CChar>,
|
||||
/// This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is ignored by the parser and so cannot be accessed via SQL.
|
||||
attisdropped -> Bool,
|
||||
/// This column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.
|
||||
attislocal -> Bool,
|
||||
/// The number of direct ancestors this column has. A column with a nonzero number of ancestors cannot be dropped nor renamed.
|
||||
attinhcount -> SmallInt,
|
||||
/// The defined collation of the column, or zero if the column is not of a collatable data type
|
||||
attcollation -> Oid,
|
||||
/// attstattarget controls the level of detail of statistics accumulated for this column by ANALYZE. A zero value indicates that no statistics should be collected. A null value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. For scalar data types, attstattarget is both the target number of “most common values” to collect, and the target number of histogram bins to create.
|
||||
attstattarget -> Nullable<SmallInt>,
|
||||
/// Column-level access privileges, if any have been granted specifically on this column
|
||||
attacl -> Nullable<Array<Text>>,
|
||||
/// Attribute-level options, as “keyword=value” strings
|
||||
attoptions -> Nullable<Array<Text>>,
|
||||
/// Attribute-level foreign data wrapper options, as “keyword=value” strings
|
||||
attfdwoptions -> Nullable<Array<Text>>,
|
||||
}
|
||||
}
|
||||
|
||||
table! {
|
||||
information_schema.table_privileges (table_catalog, table_schema, table_name, grantor, grantee) {
|
||||
/// Name of the role that granted the privilege
|
||||
grantor -> Text,
|
||||
/// Name of the role that the privilege was granted to
|
||||
grantee -> Text,
|
||||
/// Name of the database that contains the table (always the current database)
|
||||
table_catalog -> Text,
|
||||
/// Name of the schema that contains the table
|
||||
table_schema -> Text,
|
||||
/// Name of the table
|
||||
table_name -> Text,
|
||||
/// Type of the privilege: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER
|
||||
privilege_type -> Text,
|
||||
/// YES if the privilege is grantable, NO if not
|
||||
is_grantable -> Text,
|
||||
/// In the SQL standard, WITH HIERARCHY OPTION is a separate (sub-)privilege allowing certain operations on table inheritance hierarchies. In PostgreSQL, this is included in the SELECT privilege, so this column shows YES if the privilege is SELECT, else NO.
|
||||
with_hierarchy -> Text,
|
||||
}
|
||||
}
|
||||
|
||||
allow_tables_to_appear_in_same_query!(
|
||||
pg_attribute,
|
||||
pg_class,
|
||||
pg_namespace,
|
||||
pg_roles,
|
||||
table_privileges
|
||||
);
|
||||
joinable!(pg_class -> pg_roles (relowner));
|
||||
joinable!(pg_attribute -> pg_class (attrelid));
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
use diesel::{pg::Pg, prelude::*};
|
||||
|
||||
use crate::schema::table_privileges;
|
||||
|
||||
pub use crate::schema::table_privileges::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Queryable, Selectable)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[diesel(table_name = table_privileges)]
|
||||
pub struct TablePrivilege {
|
||||
/// Name of the role that granted the privilege
|
||||
pub grantor: String,
|
||||
/// Name of the role that the privilege was granted to
|
||||
pub grantee: String,
|
||||
/// Name of the database that contains the table (always the current database)
|
||||
pub table_catalog: String,
|
||||
/// Name of the schema that contains the table
|
||||
pub table_schema: String,
|
||||
/// Name of the table
|
||||
pub table_name: String,
|
||||
/// Type of the privilege: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER
|
||||
pub privilege_type: String,
|
||||
/// YES if the privilege is grantable, NO if not
|
||||
pub is_grantable: String,
|
||||
/// In the SQL standard, WITH HIERARCHY OPTION is a separate (sub-)privilege allowing certain operations on table inheritance hierarchies. In PostgreSQL, this is included in the SELECT privilege, so this column shows YES if the privilege is SELECT, else NO.
|
||||
pub with_hierarchy: String,
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
-- This file was automatically created by Diesel to setup helper functions
|
||||
-- and other internal bookkeeping. This file is safe to edit, any future
|
||||
-- changes will be added to existing projects as new migrations.
|
||||
|
||||
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
|
||||
DROP FUNCTION IF EXISTS diesel_set_updated_at();
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
-- This file was automatically created by Diesel to setup helper functions
|
||||
-- and other internal bookkeeping. This file is safe to edit, any future
|
||||
-- changes will be added to existing projects as new migrations.
|
||||
|
||||
|
||||
|
||||
|
||||
-- Sets up a trigger for the given table to automatically set a column called
|
||||
-- `updated_at` whenever the row is modified (unless `updated_at` was included
|
||||
-- in the modified columns)
|
||||
--
|
||||
-- # Example
|
||||
--
|
||||
-- ```sql
|
||||
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
|
||||
--
|
||||
-- SELECT diesel_manage_updated_at('users');
|
||||
-- ```
|
||||
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
|
||||
BEGIN
|
||||
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
|
||||
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
IF (
|
||||
NEW IS DISTINCT FROM OLD AND
|
||||
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
|
||||
) THEN
|
||||
NEW.updated_at := current_timestamp;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
|
@ -1 +0,0 @@
|
|||
DROP TABLE IF EXISTS users;
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID NOT NULL PRIMARY KEY,
|
||||
uid TEXT UNIQUE NOT NULL,
|
||||
email TEXT NOT NULL
|
||||
);
|
||||
CREATE INDEX ON users (uid);
|
||||
1
migrations/20241125232658_users.down.sql
Normal file
1
migrations/20241125232658_users.down.sql
Normal file
|
|
@ -0,0 +1 @@
|
|||
drop table if exists users;
|
||||
6
migrations/20241125232658_users.up.sql
Normal file
6
migrations/20241125232658_users.up.sql
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
create table if not exists users (
|
||||
id uuid not null primary key,
|
||||
uid text unique not null,
|
||||
email text not null
|
||||
);
|
||||
create index on users (uid);
|
||||
|
|
@ -1 +0,0 @@
|
|||
DROP TABLE IF EXISTS browser_sessions;
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
CREATE TABLE browser_sessions (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
serialized TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expiry TIMESTAMPTZ
|
||||
);
|
||||
CREATE INDEX ON browser_sessions (expiry);
|
||||
CREATE INDEX ON browser_sessions (created_at);
|
||||
1
migrations/20250108211839_sessions.down.sql
Normal file
1
migrations/20250108211839_sessions.down.sql
Normal file
|
|
@ -0,0 +1 @@
|
|||
drop table if exists browser_sessions;
|
||||
8
migrations/20250108211839_sessions.up.sql
Normal file
8
migrations/20250108211839_sessions.up.sql
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
create table if not exists browser_sessions (
|
||||
id text not null primary key,
|
||||
serialized text not null,
|
||||
created_at timestamptz not null default now(),
|
||||
expiry timestamptz
|
||||
);
|
||||
create index on browser_sessions (expiry);
|
||||
create index on browser_sessions (created_at);
|
||||
2
migrations/20250522224809_bases.down.sql
Normal file
2
migrations/20250522224809_bases.down.sql
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
drop table if exists base_user_perms;
|
||||
drop table if exists bases;
|
||||
19
migrations/20250522224809_bases.up.sql
Normal file
19
migrations/20250522224809_bases.up.sql
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
create table if not exists bases (
|
||||
id uuid not null primary key,
|
||||
name text not null default '',
|
||||
url text not null,
|
||||
owner_id uuid not null references users(id)
|
||||
on delete restrict,
|
||||
user_role_prefix text not null default '__itmu__'
|
||||
);
|
||||
create index on bases (owner_id);
|
||||
|
||||
create table if not exists base_user_perms (
|
||||
id uuid not null primary key,
|
||||
base_id uuid not null references bases(id),
|
||||
user_id uuid not null references users(id),
|
||||
perm text not null,
|
||||
unique (base_id, user_id, perm)
|
||||
);
|
||||
create index on base_user_perms (user_id);
|
||||
create index on base_user_perms (base_id);
|
||||
|
|
@ -1,41 +1 @@
|
|||
use anyhow::{Context as _, Result};
|
||||
use diesel::{prelude::*, sql_query};
|
||||
use mdengine::pg_roles::{self, PgRole};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn escape_identifier(identifier: &str) -> String {
|
||||
// Escaping identifiers for Postgres is fairly easy, provided that the input is
|
||||
// already known to contain no invalid multi-byte sequences. Backslashes may
|
||||
// remain as-is, and embedded double quotes are escaped simply by doubling
|
||||
// them (`"` becomes `""`). Refer to the PQescapeInternal() function in
|
||||
// libpq (fe-exec.c) and Diesel's PgQueryBuilder::push_identifier().
|
||||
format!("\"{}\"", identifier.replace('"', "\"\""))
|
||||
}
|
||||
|
||||
pub fn diesel_set_user_id(
|
||||
pg_user_role_prefix: &str,
|
||||
user_id: Uuid,
|
||||
conn: &mut PgConnection,
|
||||
) -> Result<()> {
|
||||
let role = pg_roles::table
|
||||
.select(PgRole::as_select())
|
||||
.filter(pg_roles::dsl::rolname.eq(format!("{}{}", pg_user_role_prefix, user_id.simple())))
|
||||
.first(conn)
|
||||
.optional()
|
||||
.context("error reading role")?;
|
||||
if role.is_none() {
|
||||
sql_query(format!(
|
||||
"CREATE ROLE {}",
|
||||
escape_identifier(&format!("{}{}", pg_user_role_prefix, user_id.simple()))
|
||||
))
|
||||
.execute(conn)
|
||||
.context("error creating role")?;
|
||||
}
|
||||
sql_query(format!(
|
||||
"SET ROLE {}",
|
||||
escape_identifier(&format!("{}{}", pg_user_role_prefix, user_id.simple()))
|
||||
))
|
||||
.execute(conn)
|
||||
.context("error setting role to user")?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,15 +6,17 @@ use axum::{
|
|||
http::request::Parts,
|
||||
};
|
||||
use oauth2::basic::BasicClient;
|
||||
use sqlx::{pool::PoolConnection, postgres::PgPoolOptions, Postgres};
|
||||
|
||||
use crate::{app_error::AppError, auth, nav::NavbarBuilder, sessions::PgStore, settings::Settings};
|
||||
use crate::{
|
||||
app_error::AppError, auth, base_pooler::BasePooler, sessions::PgStore, settings::Settings,
|
||||
};
|
||||
|
||||
/// Global app configuration
|
||||
pub struct App {
|
||||
pub diesel_pool: deadpool_diesel::postgres::Pool,
|
||||
pub navbar_template: NavbarBuilder,
|
||||
pub app_db: sqlx::PgPool,
|
||||
pub base_pooler: BasePooler,
|
||||
pub oauth_client: BasicClient,
|
||||
pub pg_pool: deadpool_postgres::Pool,
|
||||
pub reqwest_client: reqwest::Client,
|
||||
pub session_store: PgStore,
|
||||
pub settings: Settings,
|
||||
|
|
@ -23,41 +25,21 @@ pub struct App {
|
|||
impl App {
|
||||
/// Initialize global application functions based on config values
|
||||
pub async fn from_settings(settings: Settings) -> Result<Self> {
|
||||
let database_url = settings.database_url.clone();
|
||||
let diesel_manager = deadpool_diesel::postgres::Manager::from_config(
|
||||
database_url.clone(),
|
||||
deadpool_diesel::Runtime::Tokio1,
|
||||
deadpool_diesel::ManagerConfig {
|
||||
// Reset role after each interaction is recycled so that user
|
||||
// sessions remain isolated by deadpool interaction
|
||||
recycling_method: deadpool_diesel::RecyclingMethod::CustomQuery(
|
||||
std::borrow::Cow::Owned("RESET ROLE;".to_owned()),
|
||||
),
|
||||
},
|
||||
);
|
||||
let diesel_pool = deadpool_diesel::postgres::Pool::builder(diesel_manager).build()?;
|
||||
let app_db = PgPoolOptions::new()
|
||||
.max_connections(settings.app_db_max_connections)
|
||||
.connect(&settings.database_url)
|
||||
.await?;
|
||||
|
||||
let pg_config = deadpool_postgres::Config {
|
||||
url: Some(database_url),
|
||||
manager: Some(deadpool_postgres::ManagerConfig {
|
||||
recycling_method: deadpool_postgres::RecyclingMethod::Clean,
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
let pg_pool = pg_config.create_pool(
|
||||
Some(deadpool_postgres::Runtime::Tokio1),
|
||||
postgres_native_tls::MakeTlsConnector::new(native_tls::TlsConnector::new()?),
|
||||
)?;
|
||||
|
||||
let session_store = PgStore::new(diesel_pool.clone());
|
||||
let session_store = PgStore::new(app_db.clone());
|
||||
let reqwest_client = reqwest::ClientBuilder::new().https_only(true).build()?;
|
||||
let oauth_client = auth::new_oauth_client(&settings)?;
|
||||
|
||||
let base_pooler = BasePooler::new_with_app_db(app_db.clone());
|
||||
|
||||
Ok(Self {
|
||||
diesel_pool,
|
||||
navbar_template: NavbarBuilder::default().with_base_path(&settings.base_path),
|
||||
app_db,
|
||||
base_pooler,
|
||||
oauth_client,
|
||||
pg_pool,
|
||||
reqwest_client,
|
||||
session_store,
|
||||
settings,
|
||||
|
|
@ -82,9 +64,9 @@ where
|
|||
}
|
||||
|
||||
/// Extractor to automatically obtain a Deadpool Diesel connection
|
||||
pub struct DieselConn(pub deadpool_diesel::postgres::Connection);
|
||||
pub struct AppDbConn(pub PoolConnection<Postgres>);
|
||||
|
||||
impl<S> FromRequestParts<S> for DieselConn
|
||||
impl<S> FromRequestParts<S> for AppDbConn
|
||||
where
|
||||
S: Into<AppState> + Clone + Sync,
|
||||
{
|
||||
|
|
@ -92,24 +74,9 @@ where
|
|||
|
||||
async fn from_request_parts(_: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
||||
let conn = Into::<AppState>::into(state.clone())
|
||||
.diesel_pool
|
||||
.get()
|
||||
.app_db
|
||||
.acquire()
|
||||
.await?;
|
||||
Ok(Self(conn))
|
||||
}
|
||||
}
|
||||
|
||||
/// Extractor to automatically obtain a Deadpool tokio-postgres connection
|
||||
pub struct PgConn(pub deadpool_postgres::Object);
|
||||
|
||||
impl<S> FromRequestParts<S> for PgConn
|
||||
where
|
||||
S: Into<AppState> + Clone + Sync,
|
||||
{
|
||||
type Rejection = AppError;
|
||||
|
||||
async fn from_request_parts(_: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
|
||||
let conn = Into::<AppState>::into(state.clone()).pg_pool.get().await?;
|
||||
Ok(Self(conn))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
111
src/base_pooler.rs
Normal file
111
src/base_pooler.rs
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
use std::{collections::HashMap, sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use axum::extract::FromRef;
|
||||
use sqlx::{pool::PoolConnection, postgres::PgPoolOptions, raw_sql, Executor, PgPool, Postgres};
|
||||
use tokio::sync::{OnceCell, RwLock};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{app_state::AppState, bases::Base};
|
||||
|
||||
const MAX_CONNECTIONS: u32 = 4;
|
||||
const IDLE_SECONDS: u64 = 3600;
|
||||
|
||||
// NOTE: The Arc<RwLock> this uses will probably need to be cleaned up for
|
||||
// performance eventually.
|
||||
|
||||
/// A collection of multiple SQLx Pools.
|
||||
#[derive(Clone)]
|
||||
pub struct BasePooler {
|
||||
pools: Arc<RwLock<HashMap<Uuid, OnceCell<PgPool>>>>,
|
||||
app_db: PgPool,
|
||||
}
|
||||
|
||||
impl BasePooler {
|
||||
pub fn new_with_app_db(app_db: PgPool) -> Self {
|
||||
Self {
|
||||
app_db,
|
||||
pools: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_pool_for(&mut self, base_id: Uuid) -> Result<PgPool> {
|
||||
let init_cell = || async {
|
||||
let base = Base::fetch_by_id(base_id, &self.app_db)
|
||||
.await?
|
||||
.context("no such base")?;
|
||||
Ok(PgPoolOptions::new()
|
||||
.min_connections(0)
|
||||
.max_connections(MAX_CONNECTIONS)
|
||||
.idle_timeout(Some(Duration::from_secs(IDLE_SECONDS)))
|
||||
.after_release(|conn, _| {
|
||||
Box::pin(async move {
|
||||
// Essentially "DISCARD ALL" without "DEALLOCATE ALL"
|
||||
conn.execute(raw_sql(
|
||||
"
|
||||
close all;
|
||||
set session authorization default;
|
||||
reset all;
|
||||
unlisten *;
|
||||
select pg_advisory_unlock_all();
|
||||
discard plans;
|
||||
discard temp;
|
||||
discard sequences;
|
||||
",
|
||||
))
|
||||
.await?;
|
||||
Ok(true)
|
||||
})
|
||||
})
|
||||
.connect(&base.url)
|
||||
.await?)
|
||||
};
|
||||
|
||||
// Attempt to get an existing pool without write-locking the map
|
||||
let pools = self.pools.read().await;
|
||||
if let Some(cell) = pools.get(&base_id) {
|
||||
return Ok(cell
|
||||
.get_or_try_init::<anyhow::Error, _, _>(init_cell)
|
||||
.await?
|
||||
.clone());
|
||||
}
|
||||
drop(pools); // Release read lock
|
||||
let mut pools = self.pools.write().await;
|
||||
let entry = pools.entry(base_id).or_insert(OnceCell::new());
|
||||
Ok(entry
|
||||
.get_or_try_init::<anyhow::Error, _, _>(init_cell)
|
||||
.await?
|
||||
.clone())
|
||||
}
|
||||
|
||||
pub async fn acquire_for(&mut self, base_id: Uuid) -> Result<PoolConnection<Postgres>> {
|
||||
let pool = self.get_pool_for(base_id).await?;
|
||||
Ok(pool.acquire().await?)
|
||||
}
|
||||
|
||||
pub async fn close_for(&mut self, base_id: Uuid) -> Result<()> {
|
||||
let pools = self.pools.read().await;
|
||||
if let Some(cell) = pools.get(&base_id) {
|
||||
if let Some(pool) = cell.get() {
|
||||
let pool = pool.clone();
|
||||
drop(pools); // Release read lock
|
||||
let mut pools = self.pools.write().await;
|
||||
pools.remove(&base_id);
|
||||
drop(pools); // Release write lock
|
||||
pool.close().await;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: Add a cleanup method to remove entries with no connections
|
||||
}
|
||||
|
||||
impl<S> FromRef<S> for BasePooler
|
||||
where
|
||||
S: Into<AppState> + Clone,
|
||||
{
|
||||
fn from_ref(state: &S) -> Self {
|
||||
Into::<AppState>::into(state.clone()).base_pooler.clone()
|
||||
}
|
||||
}
|
||||
80
src/base_user_perms.rs
Normal file
80
src/base_user_perms.rs
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use sqlx::{query, PgConnection};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
bases::Base,
|
||||
pg_acls::PgPrivilegeType,
|
||||
pg_databases::PgDatabase,
|
||||
pg_roles::{user_id_from_rolname, PgRole, RoleTree},
|
||||
};
|
||||
|
||||
pub struct BaseUserPerm {
|
||||
pub id: Uuid,
|
||||
pub base_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub perm: String,
|
||||
}
|
||||
|
||||
pub async fn sync_perms_for_base(
|
||||
base_id: Uuid,
|
||||
app_db: &mut PgConnection,
|
||||
client: &mut PgConnection,
|
||||
) -> Result<()> {
|
||||
let db = PgDatabase::fetch_current(&mut *client).await?;
|
||||
let explicit_roles = PgRole::fetch_by_names_any(
|
||||
db.datacl
|
||||
.unwrap_or(vec![])
|
||||
.into_iter()
|
||||
.filter(|item| {
|
||||
item.privileges
|
||||
.iter()
|
||||
.any(|privilege| privilege.privilege == PgPrivilegeType::Connect)
|
||||
})
|
||||
.map(|item| item.grantee)
|
||||
.collect(),
|
||||
&mut *client,
|
||||
)
|
||||
.await?;
|
||||
let mut all_roles: HashSet<PgRole> = HashSet::new();
|
||||
for explicit_role in explicit_roles {
|
||||
if let Some(role_tree) = RoleTree::fetch_members(explicit_role.oid, &mut *client).await? {
|
||||
for implicit_role in role_tree.flatten_inherited() {
|
||||
all_roles.insert(implicit_role.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
let base = Base::fetch_by_id(base_id, &mut *app_db)
|
||||
.await?
|
||||
.context("base with that id not found")?;
|
||||
let user_ids: Vec<Uuid> = all_roles
|
||||
.iter()
|
||||
.filter_map(|role| user_id_from_rolname(&role.rolname, &base.user_role_prefix).ok())
|
||||
.collect();
|
||||
dbg!(&all_roles);
|
||||
query!(
|
||||
"delete from base_user_perms where base_id = $1 and not (user_id = any($2))",
|
||||
base_id,
|
||||
user_ids.as_slice(),
|
||||
)
|
||||
.execute(&mut *app_db)
|
||||
.await?;
|
||||
for user_id in user_ids {
|
||||
query!(
|
||||
"
|
||||
insert into base_user_perms
|
||||
(id, base_id, user_id, perm)
|
||||
values ($1, $2, $3, 'connect')
|
||||
on conflict (base_id, user_id, perm) do nothing
|
||||
",
|
||||
Uuid::now_v7(),
|
||||
base.id,
|
||||
user_id
|
||||
)
|
||||
.execute(&mut *app_db)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
75
src/bases.rs
Normal file
75
src/bases.rs
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
use derive_builder::Builder;
|
||||
use sqlx::{query_as, PgExecutor};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct Base {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub url: String,
|
||||
pub owner_id: Uuid,
|
||||
pub user_role_prefix: String,
|
||||
}
|
||||
|
||||
impl Base {
|
||||
pub fn insertable_builder() -> InsertableBaseBuilder {
|
||||
InsertableBaseBuilder::default()
|
||||
}
|
||||
|
||||
pub async fn fetch_by_id<'a, E: PgExecutor<'a>>(
|
||||
id: Uuid,
|
||||
client: E,
|
||||
) -> Result<Option<Base>, sqlx::Error> {
|
||||
query_as!(Self, "select * from bases where id = $1", &id)
|
||||
.fetch_optional(client)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn fetch_by_perm_any<'a, E: PgExecutor<'a>>(
|
||||
user_id: Uuid,
|
||||
perms: Vec<&str>,
|
||||
client: E,
|
||||
) -> Result<Vec<Base>, sqlx::Error> {
|
||||
let perms = perms
|
||||
.into_iter()
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<String>>();
|
||||
query_as!(
|
||||
Self,
|
||||
"
|
||||
select bases.*
|
||||
from bases inner join base_user_perms as p
|
||||
on p.base_id = bases.id
|
||||
where p.user_id = $1 and perm = ANY($2)
|
||||
",
|
||||
user_id,
|
||||
perms.as_slice(),
|
||||
)
|
||||
.fetch_all(client)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Builder)]
|
||||
pub struct InsertableBase {
|
||||
url: String,
|
||||
owner_id: Uuid,
|
||||
}
|
||||
|
||||
impl InsertableBase {
|
||||
pub async fn insert<'a, E: PgExecutor<'a>>(self, client: E) -> Result<Base, sqlx::Error> {
|
||||
query_as!(
|
||||
Base,
|
||||
"
|
||||
insert into bases
|
||||
(id, url, owner_id)
|
||||
values ($1, $2, $3)
|
||||
returning *
|
||||
",
|
||||
Uuid::now_v7(),
|
||||
self.url,
|
||||
self.owner_id
|
||||
)
|
||||
.fetch_one(client)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
|
@ -2,23 +2,21 @@ use std::fmt::Display;
|
|||
|
||||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use deadpool_postgres::tokio_postgres::{
|
||||
row::RowIndex,
|
||||
types::{FromSql, Type},
|
||||
Row,
|
||||
};
|
||||
use derive_builder::Builder;
|
||||
use sqlx::{
|
||||
error::BoxDynError,
|
||||
postgres::{PgRow, PgTypeInfo, PgValueRef},
|
||||
ColumnIndex, Decode, Postgres, Row as _, TypeInfo as _, ValueRef as _,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub enum Contents {
|
||||
Text(String),
|
||||
Integer(i32),
|
||||
Timestamptz(DateTime<Utc>),
|
||||
Uuid(Uuid),
|
||||
pub enum Value {
|
||||
Text(Option<String>),
|
||||
Integer(Option<i32>),
|
||||
Timestamptz(Option<DateTime<Utc>>),
|
||||
Uuid(Option<Uuid>),
|
||||
}
|
||||
|
||||
pub struct Value(Option<Contents>);
|
||||
|
||||
#[derive(Builder)]
|
||||
#[builder(pattern = "owned", setter(prefix = "with"))]
|
||||
pub struct FieldOptions {
|
||||
|
|
@ -64,48 +62,85 @@ impl FromSqlError {
|
|||
}
|
||||
|
||||
impl Value {
|
||||
pub fn get_from_row<I: RowIndex + Display>(row: &Row, idx: I) -> Result<Self> {
|
||||
Ok(Self(row.try_get::<_, Option<Contents>>(idx)?))
|
||||
pub fn get_from_row<I: ColumnIndex<PgRow> + Display>(
|
||||
row: &PgRow,
|
||||
idx: I,
|
||||
) -> Result<Self, BoxDynError> {
|
||||
let value_ref = row.try_get_raw(idx)?;
|
||||
Self::decode(value_ref)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToHtmlString for Value {
|
||||
fn to_html_string(&self, options: &FieldOptions) -> String {
|
||||
if let Self(Some(contents)) = self {
|
||||
match contents {
|
||||
Contents::Text(value) => value.clone(),
|
||||
&Contents::Integer(value) => value.to_string(),
|
||||
&Contents::Timestamptz(value) => value.format(&options.date_format).to_string(),
|
||||
&Contents::Uuid(value) => format!(
|
||||
"<span class=\"pg-value-uuid\">{}</span>",
|
||||
value.hyphenated()
|
||||
),
|
||||
macro_rules! cell_html {
|
||||
($component:expr, $value:expr$(, $attr_name:expr => $attr_val:expr)*) => {
|
||||
{
|
||||
let value = $value.clone();
|
||||
let attrs: Vec<String> = vec![
|
||||
format!("value=\"{}\"", serde_json::to_string(&value).unwrap().replace('"', "\\\"")),
|
||||
$(format!("{}=\"{}\"", $attr_name, $attr_val.replace('"', "\\\"")),)*
|
||||
];
|
||||
format!(
|
||||
"<{} {}>{}</{}>",
|
||||
$component,
|
||||
attrs.join(" "),
|
||||
value.map(|value| value.to_string()).unwrap_or("-".to_owned()),
|
||||
$component,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
"<span class=\"pg-value-null\">NULL</span>".to_owned()
|
||||
};
|
||||
}
|
||||
match self {
|
||||
Self::Text(value) => cell_html!("cell-text", value),
|
||||
Self::Integer(value) => cell_html!("cell-integer", value),
|
||||
Self::Timestamptz(value) => cell_html!(
|
||||
"cell-timestamptz",
|
||||
value,
|
||||
"format" => options.date_format
|
||||
),
|
||||
Self::Uuid(value) => cell_html!("cell-uuid", value),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> FromSql<'a> for Contents {
|
||||
fn from_sql(
|
||||
ty: &Type,
|
||||
raw: &'a [u8],
|
||||
) -> Result<Self, Box<dyn std::error::Error + Sync + Send>> {
|
||||
impl sqlx::Type<Postgres> for Value {
|
||||
fn type_info() -> <Postgres as sqlx::Database>::TypeInfo {
|
||||
PgTypeInfo::with_name("XXX");
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Decode<'a, Postgres> for Value {
|
||||
fn decode(value: PgValueRef<'a>) -> Result<Self, BoxDynError> {
|
||||
let type_info = value.type_info();
|
||||
let ty = type_info.name();
|
||||
match ty {
|
||||
&Type::INT4 => Ok(Self::Integer(i32::from_sql(ty, raw)?)),
|
||||
&Type::TEXT | &Type::VARCHAR => Ok(Self::Text(String::from_sql(ty, raw)?)),
|
||||
&Type::TIMESTAMPTZ => Ok(Self::Timestamptz(DateTime::<Utc>::from_sql(ty, raw)?)),
|
||||
&Type::UUID => Ok(Self::Uuid(<Uuid as FromSql>::from_sql(ty, raw)?)),
|
||||
"INT" | "INT4" => Ok(Self::Integer(if value.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(<i32 as Decode<Postgres>>::decode(value)?)
|
||||
})),
|
||||
"TEXT" | "VARCHAR" => Ok(Self::Text(if value.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(<String as Decode<Postgres>>::decode(value)?)
|
||||
})),
|
||||
"TIMESTAMPTZ" => Ok(Self::Timestamptz(if value.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(<DateTime<Utc> as Decode<Postgres>>::decode(value)?)
|
||||
})),
|
||||
"UUID" => Ok(Self::Uuid(if value.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(<Uuid as Decode<Postgres>>::decode(value)?)
|
||||
})),
|
||||
_ => Err(Box::new(FromSqlError::new(
|
||||
"unsupported pg type for interim Value",
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn accepts(ty: &Type) -> bool {
|
||||
matches!(ty, &Type::TEXT | &Type::VARCHAR | &Type::UUID)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Lens {
|
||||
|
|
|
|||
39
src/db_conns.rs
Normal file
39
src/db_conns.rs
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
use sqlx::{query, PgConnection, Row as _};
|
||||
|
||||
pub fn escape_identifier(identifier: &str) -> String {
|
||||
// Escaping identifiers for Postgres is fairly easy, provided that the input is
|
||||
// already known to contain no invalid multi-byte sequences. Backslashes may
|
||||
// remain as-is, and embedded double quotes are escaped simply by doubling
|
||||
// them (`"` becomes `""`). Refer to the PQescapeInternal() function in
|
||||
// libpq (fe-exec.c) and Diesel's PgQueryBuilder::push_identifier().
|
||||
format!("\"{}\"", identifier.replace('"', "\"\""))
|
||||
}
|
||||
|
||||
pub async fn init_role(rolname: &str, client: &mut PgConnection) -> Result<(), sqlx::Error> {
|
||||
let session_user = query!("select session_user;")
|
||||
.fetch_one(&mut *client)
|
||||
.await?
|
||||
.session_user
|
||||
.unwrap();
|
||||
if !query("select exists(select 1 from pg_roles where rolname = $1)")
|
||||
.bind(rolname)
|
||||
.fetch_one(&mut *client)
|
||||
.await?
|
||||
.try_get(0)?
|
||||
{
|
||||
query(&format!("create role {}", escape_identifier(rolname)))
|
||||
.execute(&mut *client)
|
||||
.await?;
|
||||
query(&format!(
|
||||
"grant {} to {}",
|
||||
escape_identifier(rolname),
|
||||
escape_identifier(&session_user),
|
||||
))
|
||||
.execute(&mut *client)
|
||||
.await?;
|
||||
}
|
||||
query(&format!("set role {}", escape_identifier(rolname)))
|
||||
.execute(&mut *client)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
33
src/flexi_row.rs
Normal file
33
src/flexi_row.rs
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use diesel::{
|
||||
pg::{Pg,
|
||||
row::{NamedRow, Row},
|
||||
QueryableByName,
|
||||
};
|
||||
|
||||
/// Internally a HashMap mapping field names to a custom sum type capable of
|
||||
/// deserializing common SQL types. This allows Diesel to load rows without a
|
||||
/// hard-coded structure.
|
||||
pub struct FlexiRow {
|
||||
internal: HashMap<String, FlexiField>,
|
||||
}
|
||||
|
||||
/// Sum type representing a range of SQL data types.
|
||||
pub enum FlexiField {
|
||||
Text(String),
|
||||
Int(i32),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl QueryableByName<Pg> for FlexiRow {
|
||||
fn build<'a>(row: &impl NamedRow<'a, Pg>) -> diesel::deserialize::Result<Self> {
|
||||
let mut hm: HashMap<String, FlexiField> = HashMap::new();
|
||||
for i in 0..row.field_count() {
|
||||
if let Some(field) = diesel::row::Row::<'a, Pg>::get(&row, i) {
|
||||
let name = field.field_name().or("Unnamed");
|
||||
}
|
||||
}
|
||||
diesel::deserialize::Result::Ok(FlexiRow { internal: hm })
|
||||
}
|
||||
}
|
||||
50
src/iclient.rs
Normal file
50
src/iclient.rs
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
use anyhow::Result;
|
||||
use sqlx::{postgres::types::Oid, query, query_as, PgConnection, Row as _};
|
||||
|
||||
use crate::abstract_::escape_identifier;
|
||||
|
||||
pub struct PgRole {
|
||||
oid: Option<Oid>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DbSession {
|
||||
conn: PgConnection,
|
||||
}
|
||||
|
||||
impl DbSession {
|
||||
pub async fn set_role(&mut self, rolname: &str) -> Result<()> {
|
||||
if !query("select exists(select 1 from pg_roles where rolname = $1)")
|
||||
.bind(&rolname)
|
||||
.fetch_one(&mut self.conn)
|
||||
.await?
|
||||
.try_get(0)?
|
||||
{
|
||||
query(&format!("create role {}", escape_identifier(&rolname)))
|
||||
.execute(&mut self.conn)
|
||||
.await?;
|
||||
}
|
||||
query(&format!("set role {}", escape_identifier(&rolname)))
|
||||
.execute(&mut self.conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_users_with_db_connect(
|
||||
&mut self,
|
||||
user_role_prefix: &str,
|
||||
) -> Result<Vec<PgRole>> {
|
||||
Ok(query_as!(
|
||||
PgRole,
|
||||
"
|
||||
select oid
|
||||
from pg_roles
|
||||
where has_database_privilege(rolname, current_database(), 'connect')
|
||||
and starts_with(rolname, $1)
|
||||
",
|
||||
&user_role_prefix,
|
||||
)
|
||||
.fetch_all(&mut self.conn)
|
||||
.await?)
|
||||
}
|
||||
}
|
||||
23
src/main.rs
23
src/main.rs
|
|
@ -1,26 +1,30 @@
|
|||
use clap::Parser as _;
|
||||
use diesel_migrations::MigrationHarness;
|
||||
use dotenvy::dotenv;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
use crate::{
|
||||
app_state::{App, AppState},
|
||||
cli::{serve_command, worker_command, Cli, Commands},
|
||||
migrations::MIGRATIONS,
|
||||
settings::Settings,
|
||||
};
|
||||
|
||||
mod abstract_;
|
||||
mod app_error;
|
||||
mod app_state;
|
||||
mod auth;
|
||||
mod base_pooler;
|
||||
mod base_user_perms;
|
||||
mod bases;
|
||||
mod cli;
|
||||
mod data_layer;
|
||||
mod db_conns;
|
||||
mod middleware;
|
||||
mod migrations;
|
||||
mod nav;
|
||||
mod pg_acls;
|
||||
mod pg_attributes;
|
||||
mod pg_classes;
|
||||
mod pg_databases;
|
||||
mod pg_roles;
|
||||
mod router;
|
||||
mod schema;
|
||||
mod routes;
|
||||
mod sessions;
|
||||
mod settings;
|
||||
mod users;
|
||||
|
|
@ -40,12 +44,7 @@ async fn main() {
|
|||
let state: AppState = App::from_settings(settings.clone()).await.unwrap().into();
|
||||
|
||||
if settings.run_database_migrations == Some(1) {
|
||||
// Run migrations on server startup
|
||||
let conn = state.diesel_pool.get().await.unwrap();
|
||||
conn.interact(|conn| conn.run_pending_migrations(MIGRATIONS).and(Ok(())))
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
sqlx::migrate!().run(&state.app_db).await.unwrap();
|
||||
}
|
||||
|
||||
let cli = Cli::parse();
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
use diesel_migrations::{embed_migrations, EmbeddedMigrations};
|
||||
|
||||
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/");
|
||||
154
src/pg_acls.rs
Normal file
154
src/pg_acls.rs
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
use nom::{
|
||||
branch::alt,
|
||||
bytes::complete::{is_not, tag, take_till},
|
||||
character::char,
|
||||
combinator::{opt, value},
|
||||
error::ParseError,
|
||||
multi::{many0, many1},
|
||||
sequence::delimited,
|
||||
AsChar as _, IResult, Parser,
|
||||
};
|
||||
use sqlx::{
|
||||
error::BoxDynError,
|
||||
postgres::{PgHasArrayType, PgTypeInfo, PgValueRef},
|
||||
Decode, Postgres,
|
||||
};
|
||||
|
||||
/// This type will automatically decode Postgres "aclitem" values, provided that
|
||||
/// the query is cast to a TEXT type and selected with type annotations. For
|
||||
/// example:
|
||||
/// ```sql
|
||||
/// select datacl::text[] as "datacl: Vec<PgAclItem>" from pg_database;
|
||||
/// ```
|
||||
/// The TEXT cast is necessary because the aclitem type itself is incompatible
|
||||
/// with binary value format, which makes it incompatible with SQLx.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct PgAclItem {
|
||||
pub grantee: String,
|
||||
pub privileges: Vec<PgPrivilege>,
|
||||
pub grantor: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct PgPrivilege {
|
||||
pub grant_option: bool,
|
||||
pub privilege: PgPrivilegeType,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub enum PgPrivilegeType {
|
||||
Select,
|
||||
Insert,
|
||||
Update,
|
||||
Delete,
|
||||
Truncate,
|
||||
References,
|
||||
Trigger,
|
||||
Create,
|
||||
Connect,
|
||||
Temporary,
|
||||
Execute,
|
||||
Usage,
|
||||
Set,
|
||||
AlterSystem,
|
||||
Maintain,
|
||||
}
|
||||
|
||||
impl<'a> Decode<'a, Postgres> for PgAclItem {
|
||||
fn decode(value: PgValueRef<'a>) -> Result<Self, BoxDynError> {
|
||||
let acl_item_str = <&str as Decode<Postgres>>::decode(value)?;
|
||||
let (remainder, acl_item) = parse_acl_item::<(_, nom::error::ErrorKind)>(acl_item_str)
|
||||
.map_err(|err| err.to_owned())?;
|
||||
assert_eq!(remainder, "");
|
||||
Ok(acl_item)
|
||||
}
|
||||
}
|
||||
|
||||
impl sqlx::Type<Postgres> for PgAclItem {
|
||||
fn type_info() -> PgTypeInfo {
|
||||
PgTypeInfo::with_name("aclitem")
|
||||
}
|
||||
}
|
||||
|
||||
impl PgHasArrayType for PgAclItem {
|
||||
fn array_type_info() -> PgTypeInfo {
|
||||
PgTypeInfo::array_of("aclitem")
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_acl_item<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, PgAclItem, E> {
|
||||
let (remainder, grantee) = parse_identifier(input)?;
|
||||
let (remainder, _) = char('=').parse(remainder)?;
|
||||
let (remainder, privileges) = parse_privileges(remainder)?;
|
||||
let (remainder, _) = char('/').parse(remainder)?;
|
||||
let (remainder, grantor) = parse_identifier(remainder)?;
|
||||
Ok((
|
||||
remainder,
|
||||
PgAclItem {
|
||||
grantee,
|
||||
privileges,
|
||||
grantor,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_identifier<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, String, E> {
|
||||
alt((
|
||||
delimited(char('"'), parse_escaped_identifier, char('"')),
|
||||
parse_plain_identifier,
|
||||
))
|
||||
.parse(input)
|
||||
}
|
||||
|
||||
/// WARNING: This works correctly only for identifiers read from aclitem
|
||||
/// strings, as they are quote-escaped whenever the identifier contains
|
||||
/// characters other than alphanumerics and underscores.
|
||||
fn parse_plain_identifier<'a, E: ParseError<&'a str>>(
|
||||
input: &'a str,
|
||||
) -> IResult<&'a str, String, E> {
|
||||
take_till(|c: char| !c.is_alphanum() && c != '_')
|
||||
.parse(input)
|
||||
.map(|(remainder, parsed)| (remainder, parsed.to_owned()))
|
||||
}
|
||||
|
||||
fn parse_escaped_identifier<'a, E: ParseError<&'a str>>(
|
||||
input: &'a str,
|
||||
) -> IResult<&'a str, String, E> {
|
||||
let (remainder, parsed) = many1(alt((value("\"", tag("\"\"")), is_not("\"")))).parse(input)?;
|
||||
Ok((remainder, parsed.join("")))
|
||||
}
|
||||
|
||||
fn parse_privileges<'a, E: ParseError<&'a str>>(
|
||||
input: &'a str,
|
||||
) -> IResult<&'a str, Vec<PgPrivilege>, E> {
|
||||
many0(parse_privilege).parse(input)
|
||||
}
|
||||
|
||||
fn parse_privilege<'a, E: ParseError<&'a str>>(input: &'a str) -> IResult<&'a str, PgPrivilege, E> {
|
||||
let (remainder, priv_type) = alt((
|
||||
value(PgPrivilegeType::Select, char('r')),
|
||||
value(PgPrivilegeType::Insert, char('a')),
|
||||
value(PgPrivilegeType::Update, char('w')),
|
||||
value(PgPrivilegeType::Delete, char('d')),
|
||||
value(PgPrivilegeType::Truncate, char('D')),
|
||||
value(PgPrivilegeType::References, char('x')),
|
||||
value(PgPrivilegeType::Trigger, char('t')),
|
||||
value(PgPrivilegeType::Create, char('C')),
|
||||
value(PgPrivilegeType::Connect, char('c')),
|
||||
value(PgPrivilegeType::Temporary, char('T')),
|
||||
value(PgPrivilegeType::Execute, char('X')),
|
||||
value(PgPrivilegeType::Usage, char('U')),
|
||||
value(PgPrivilegeType::Set, char('s')),
|
||||
value(PgPrivilegeType::AlterSystem, char('A')),
|
||||
value(PgPrivilegeType::Maintain, char('m')),
|
||||
))
|
||||
.parse(input)?;
|
||||
let (remainder, parsed_grant_option) = opt(char('*')).parse(remainder)?;
|
||||
Ok((
|
||||
remainder,
|
||||
PgPrivilege {
|
||||
grant_option: parsed_grant_option.is_some(),
|
||||
privilege: priv_type,
|
||||
},
|
||||
))
|
||||
}
|
||||
65
src/pg_attributes.rs
Normal file
65
src/pg_attributes.rs
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
use sqlx::{postgres::types::Oid, query_as, PgExecutor};
|
||||
|
||||
pub struct PgAttribute {
|
||||
/// The table this column belongs to
|
||||
pub attrelid: Oid,
|
||||
/// The column name
|
||||
pub attname: String,
|
||||
/// The data type of this column (zero for a dropped column)
|
||||
pub atttypid: Oid,
|
||||
/// A copy of pg_type.typlen of this column's type
|
||||
pub attlen: i16,
|
||||
/// The number of the column. Ordinary columns are numbered from 1 up. System columns, such as ctid, have (arbitrary) negative numbers.
|
||||
pub attnum: i16,
|
||||
/// This represents a not-null constraint.
|
||||
pub attnotnull: Option<bool>,
|
||||
/// This column has a default expression or generation expression, in which case there will be a corresponding entry in the pg_attrdef catalog that actually defines the expression. (Check attgenerated to determine whether this is a default or a generation expression.)
|
||||
pub atthasdef: bool,
|
||||
/// This column has a value which is used where the column is entirely missing from the row, as happens when a column is added with a non-volatile DEFAULT value after the row is created. The actual value used is stored in the attmissingval column.
|
||||
pub atthasmissing: bool,
|
||||
/// If a zero byte (''), then not an identity column. Otherwise, a = generated always, d = generated by default.
|
||||
pub attidentity: Option<i8>,
|
||||
/// If a zero byte (''), then not a generated column. Otherwise, s = stored. (Other values might be added in the future.)
|
||||
pub attgenerated: Option<i8>,
|
||||
/// This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is ignored by the parser and so cannot be accessed via SQL.
|
||||
pub attisdropped: bool,
|
||||
/// This column is defined locally in the relation. Note that a column can be locally defined and inherited simultaneously.
|
||||
pub attislocal: bool,
|
||||
// /// Column-level access privileges, if any have been granted specifically on this column
|
||||
// pub attacl: Option<Vec<String>>,
|
||||
/// Attribute-level options, as “keyword=value” strings
|
||||
pub attoptions: Option<Vec<String>>,
|
||||
/// Attribute-level foreign data wrapper options, as “keyword=value” strings
|
||||
pub attfdwoptions: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
pub async fn fetch_attributes_for_rel<'a, E: PgExecutor<'a>>(
|
||||
oid: Oid,
|
||||
client: E,
|
||||
) -> Result<Vec<PgAttribute>, sqlx::Error> {
|
||||
query_as!(
|
||||
PgAttribute,
|
||||
r#"
|
||||
select
|
||||
attrelid,
|
||||
attname,
|
||||
atttypid,
|
||||
attlen,
|
||||
attnum,
|
||||
attnotnull as "attnotnull?",
|
||||
atthasdef,
|
||||
atthasmissing,
|
||||
attidentity,
|
||||
attgenerated,
|
||||
attisdropped,
|
||||
attislocal,
|
||||
attoptions,
|
||||
attfdwoptions
|
||||
from pg_attribute
|
||||
where attrelid = $1 and attnum > 0 and not attisdropped
|
||||
"#,
|
||||
&oid
|
||||
)
|
||||
.fetch_all(client)
|
||||
.await
|
||||
}
|
||||
111
src/pg_classes.rs
Normal file
111
src/pg_classes.rs
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
use sqlx::{postgres::types::Oid, query_as, PgExecutor};
|
||||
|
||||
use crate::pg_acls::PgAclItem;
|
||||
|
||||
pub struct PgClass {
|
||||
/// Row identifier
|
||||
pub oid: Oid,
|
||||
/// Name of the table, index, view, etc.
|
||||
pub relname: String,
|
||||
/// The OID of the namespace that contains this relation
|
||||
pub relnamespace: Oid,
|
||||
/// The OID of the data type that corresponds to this table's row type, if any; zero for indexes, sequences, and toast tables, which have no pg_type entry
|
||||
pub reltype: Oid,
|
||||
/// For typed tables, the OID of the underlying composite type; zero for all other relations
|
||||
pub reloftype: Oid,
|
||||
/// Owner of the relation
|
||||
pub relowner: Oid,
|
||||
/// r = ordinary table, i = index, S = sequence, t = TOAST table, v = view, m = materialized view, c = composite type, f = foreign table, p = partitioned table, I = partitioned index
|
||||
pub relkind: i8,
|
||||
/// Number of user columns in the relation (system columns not counted). There must be this many corresponding entries in pg_attribute. See also pg_attribute.attnum.
|
||||
pub relnatts: i16,
|
||||
/// Number of CHECK constraints on the table; see pg_constraint catalog
|
||||
pub relchecks: i16,
|
||||
/// True if table has (or once had) rules; see pg_rewrite catalog
|
||||
pub relhasrules: bool,
|
||||
/// True if table has (or once had) triggers; see pg_trigger catalog
|
||||
pub relhastriggers: bool,
|
||||
/// True if table or index has (or once had) any inheritance children or partitions
|
||||
pub relhassubclass: bool,
|
||||
/// True if table has row-level security enabled; see pg_policy catalog
|
||||
pub relrowsecurity: bool,
|
||||
/// True if row-level security (when enabled) will also apply to table owner; see pg_policy catalog
|
||||
pub relforcerowsecurity: bool,
|
||||
/// True if relation is populated (this is true for all relations other than some materialized views)
|
||||
pub relispopulated: bool,
|
||||
/// True if table or index is a partition
|
||||
pub relispartition: bool,
|
||||
pub relacl: Option<Vec<PgAclItem>>,
|
||||
}
|
||||
|
||||
impl PgClass {
|
||||
pub async fn fetch_all_by_kind_any<'a, I: IntoIterator<Item = PgRelKind>, E: PgExecutor<'a>>(
|
||||
kinds: I,
|
||||
client: E,
|
||||
) -> Result<Vec<PgClass>, sqlx::Error> {
|
||||
let kinds_i8 = kinds
|
||||
.into_iter()
|
||||
.map(|kind| kind.to_u8() as i8)
|
||||
.collect::<Vec<i8>>();
|
||||
query_as!(
|
||||
PgClass,
|
||||
r#"
|
||||
select
|
||||
oid,
|
||||
relname,
|
||||
relnamespace,
|
||||
reltype,
|
||||
reloftype,
|
||||
relowner,
|
||||
relkind,
|
||||
relnatts,
|
||||
relchecks,
|
||||
relhasrules,
|
||||
relhastriggers,
|
||||
relhassubclass,
|
||||
relrowsecurity,
|
||||
relforcerowsecurity,
|
||||
relispopulated,
|
||||
relispartition,
|
||||
relacl::text[] as "relacl: Vec<PgAclItem>"
|
||||
from pg_class
|
||||
where
|
||||
relkind = any($1)
|
||||
"#,
|
||||
kinds_i8.as_slice(),
|
||||
)
|
||||
.fetch_all(client)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub enum PgRelKind {
|
||||
OrdinaryTable,
|
||||
Index,
|
||||
Sequence,
|
||||
ToastTable,
|
||||
View,
|
||||
MaterializedView,
|
||||
CompositeType,
|
||||
ForeignTable,
|
||||
PartitionedTable,
|
||||
PartitionedIndex,
|
||||
}
|
||||
|
||||
impl PgRelKind {
|
||||
pub fn to_u8(&self) -> u8 {
|
||||
let ch = match self {
|
||||
Self::OrdinaryTable => 'r',
|
||||
Self::Index => 'i',
|
||||
Self::Sequence => 'S',
|
||||
Self::ToastTable => 't',
|
||||
Self::View => 'v',
|
||||
Self::MaterializedView => 'm',
|
||||
Self::CompositeType => 'c',
|
||||
Self::ForeignTable => 'f',
|
||||
Self::PartitionedTable => 'p',
|
||||
Self::PartitionedIndex => 'I',
|
||||
};
|
||||
ch as u8
|
||||
}
|
||||
}
|
||||
72
src/pg_databases.rs
Normal file
72
src/pg_databases.rs
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
use sqlx::{postgres::types::Oid, query_as, PgExecutor};
|
||||
|
||||
use crate::pg_acls::PgAclItem;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PgDatabase {
|
||||
/// Row identifier
|
||||
pub oid: Oid,
|
||||
/// Database name
|
||||
pub datname: String,
|
||||
/// Owner of the database, usually the user who created it
|
||||
pub datdba: Oid,
|
||||
/// Character encoding for this database (pg_encoding_to_char() can translate this number to the encoding name)
|
||||
pub encoding: i32,
|
||||
/// Locale provider for this database: b = builtin, c = libc, i = icu
|
||||
pub datlocprovider: i8,
|
||||
/// If true, then this database can be cloned by any user with CREATEDB privileges; if false, then only superusers or the owner of the database can clone it.
|
||||
pub datistemplate: bool,
|
||||
/// If false then no one can connect to this database. This is used to protect the template0 database from being altered.
|
||||
pub datallowconn: bool,
|
||||
/// Indicates that there are login event triggers defined for this database. This flag is used to avoid extra lookups on the pg_event_trigger table during each backend startup. This flag is used internally by PostgreSQL and should not be manually altered or read for monitoring purposes.
|
||||
pub dathasloginevt: bool,
|
||||
/// Sets maximum number of concurrent connections that can be made to this database. -1 means no limit, -2 indicates the database is invalid.
|
||||
pub datconnlimit: i32,
|
||||
/// The default tablespace for the database. Within this database, all tables for which pg_class.reltablespace is zero will be stored in this tablespace; in particular, all the non-shared system catalogs will be there.
|
||||
pub dattablespace: Oid,
|
||||
/// LC_COLLATE for this database
|
||||
pub datcollate: String,
|
||||
/// LC_CTYPE for this database
|
||||
pub datctype: String,
|
||||
/// Collation provider locale name for this database. If the provider is libc, datlocale is NULL; datcollate and datctype are used instead.
|
||||
pub datlocale: Option<String>,
|
||||
/// ICU collation rules for this database
|
||||
pub daticurules: Option<String>,
|
||||
/// Provider-specific version of the collation. This is recorded when the database is created and then checked when it is used, to detect changes in the collation definition that could lead to data corruption.
|
||||
pub datcollversion: Option<String>,
|
||||
/// Access privileges; see Section 5.8 for details
|
||||
pub datacl: Option<Vec<PgAclItem>>,
|
||||
}
|
||||
|
||||
impl PgDatabase {
|
||||
pub async fn fetch_current<'a, E: PgExecutor<'a>>(
|
||||
client: E,
|
||||
) -> Result<PgDatabase, sqlx::Error> {
|
||||
query_as!(
|
||||
PgDatabase,
|
||||
r#"
|
||||
select
|
||||
oid,
|
||||
datname,
|
||||
datdba,
|
||||
encoding,
|
||||
datlocprovider,
|
||||
datistemplate,
|
||||
datallowconn,
|
||||
dathasloginevt,
|
||||
datconnlimit,
|
||||
dattablespace,
|
||||
datcollate,
|
||||
datctype,
|
||||
datlocale,
|
||||
daticurules,
|
||||
datcollversion,
|
||||
datacl::text[] as "datacl: Vec<PgAclItem>"
|
||||
from pg_database
|
||||
where datname = current_database()
|
||||
"#,
|
||||
)
|
||||
.fetch_one(client)
|
||||
.await
|
||||
}
|
||||
}
|
||||
187
src/pg_roles.rs
Normal file
187
src/pg_roles.rs
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use sqlx::{postgres::types::Oid, prelude::FromRow, query_as, PgExecutor};
|
||||
use thiserror::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, Eq, Hash, FromRow, PartialEq)]
|
||||
pub struct PgRole {
|
||||
/// ID of role
|
||||
pub oid: Oid,
|
||||
/// Role name
|
||||
pub rolname: String,
|
||||
/// Role has superuser privileges
|
||||
pub rolsuper: bool,
|
||||
/// Role automatically inherits privileges of roles it is a member of
|
||||
pub rolinherit: bool,
|
||||
/// Role can create more roles
|
||||
pub rolcreaterole: bool,
|
||||
/// Role can create databases
|
||||
pub rolcreatedb: bool,
|
||||
/// Role can log in. That is, this role can be given as the initial session authorization identifier
|
||||
pub rolcanlogin: bool,
|
||||
/// Role is a replication role. A replication role can initiate replication connections and create and drop replication slots.
|
||||
pub rolreplication: bool,
|
||||
/// For roles that can log in, this sets maximum number of concurrent connections this role can make. -1 means no limit.
|
||||
pub rolconnlimit: i32,
|
||||
/// Password expiry time (only used for password authentication); null if no expiration
|
||||
pub rolvaliduntil: Option<DateTime<Utc>>,
|
||||
/// Role bypasses every row-level security policy, see Section 5.9 for more information.
|
||||
pub rolbypassrls: bool,
|
||||
}
|
||||
|
||||
impl PgRole {
|
||||
pub async fn fetch_by_names_any<'a, E: PgExecutor<'a>>(
|
||||
names: Vec<String>,
|
||||
client: E,
|
||||
) -> Result<Vec<PgRole>, sqlx::Error> {
|
||||
query_as!(
|
||||
PgRole,
|
||||
r#"
|
||||
select
|
||||
oid as "oid!",
|
||||
rolname as "rolname!",
|
||||
rolsuper as "rolsuper!",
|
||||
rolinherit as "rolinherit!",
|
||||
rolcreaterole as "rolcreaterole!",
|
||||
rolcreatedb as "rolcreatedb!",
|
||||
rolcanlogin as "rolcanlogin!",
|
||||
rolreplication as "rolreplication!",
|
||||
rolconnlimit as "rolconnlimit!",
|
||||
rolvaliduntil,
|
||||
rolbypassrls as "rolbypassrls!"
|
||||
from pg_roles where rolname = any($1)"#,
|
||||
names.as_slice()
|
||||
)
|
||||
.fetch_all(client)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RoleTree {
|
||||
pub role: PgRole,
|
||||
pub branches: Vec<RoleTree>,
|
||||
pub inherit: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
struct RoleTreeRow {
|
||||
#[sqlx(flatten)]
|
||||
role: PgRole,
|
||||
branch: Option<Oid>,
|
||||
inherit: bool,
|
||||
}
|
||||
|
||||
impl RoleTree {
|
||||
pub async fn fetch_members<'a, E: PgExecutor<'a>>(
|
||||
role_oid: Oid,
|
||||
client: E,
|
||||
) -> Result<Option<RoleTree>, sqlx::Error> {
|
||||
let rows: Vec<RoleTreeRow> = query_as(
|
||||
"
|
||||
with recursive cte as (
|
||||
select $1 as roleid, null::oid as branch, true as inherit
|
||||
union all
|
||||
select m.member, m.roleid, c.inherit and m.inherit_option
|
||||
from cte as c
|
||||
join pg_auth_members m on m.roleid = c.roleid
|
||||
)
|
||||
select pg_roles.*, branch, inherit
|
||||
from (
|
||||
select roleid, branch, bool_or(inherit) as inherit
|
||||
from cte
|
||||
group by roleid, branch
|
||||
) as subquery
|
||||
join pg_roles on pg_roles.oid = subquery.roleid
|
||||
",
|
||||
)
|
||||
.bind(role_oid)
|
||||
.fetch_all(client)
|
||||
.await?;
|
||||
Ok(rows
|
||||
.iter()
|
||||
.find(|row| row.branch.is_none())
|
||||
.map(|root_row| RoleTree {
|
||||
role: root_row.role.clone(),
|
||||
branches: compute_members(&rows, root_row.role.oid),
|
||||
inherit: root_row.inherit,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn fetch_granted<'a, E: PgExecutor<'a>>(
|
||||
role_oid: Oid,
|
||||
client: E,
|
||||
) -> Result<Option<RoleTree>, sqlx::Error> {
|
||||
let rows: Vec<RoleTreeRow> = query_as(
|
||||
"
|
||||
with recursive cte as (
|
||||
select $1 as roleid, null::oid as branch, true as inherit
|
||||
union all
|
||||
select m.roleid, m.member as branch, c.inherit and m.inherit_option
|
||||
from cte as c
|
||||
join pg_auth_members m on m.member = c.roleid
|
||||
)
|
||||
select pg_roles.*, branch, inherit
|
||||
from (
|
||||
select roleid, branch, bool_or(inherit) as inherit
|
||||
from cte
|
||||
group by roleid, branch
|
||||
) as subquery
|
||||
join pg_roles on pg_roles.oid = subquery.roleid
|
||||
",
|
||||
)
|
||||
.bind(role_oid)
|
||||
.fetch_all(client)
|
||||
.await?;
|
||||
Ok(rows
|
||||
.iter()
|
||||
.find(|row| row.branch.is_none())
|
||||
.map(|root_row| RoleTree {
|
||||
role: root_row.role.clone(),
|
||||
branches: compute_members(&rows, root_row.role.oid),
|
||||
inherit: root_row.inherit,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn flatten_inherited(&self) -> Vec<&PgRole> {
|
||||
[
|
||||
vec![&self.role],
|
||||
self.branches
|
||||
.iter()
|
||||
.filter(|member| member.inherit)
|
||||
.map(|member| member.flatten_inherited())
|
||||
.collect::<Vec<_>>()
|
||||
.concat(),
|
||||
]
|
||||
.concat()
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_members(rows: &Vec<RoleTreeRow>, root: Oid) -> Vec<RoleTree> {
|
||||
rows.iter()
|
||||
.filter(|row| row.branch == Some(root))
|
||||
.map(|row| RoleTree {
|
||||
role: row.role.clone(),
|
||||
branches: compute_members(rows, row.role.oid),
|
||||
inherit: row.inherit,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RolnameParseError {
|
||||
#[error("rolname does not have interim user prefix")]
|
||||
MissingPrefix,
|
||||
#[error("unable to parse uuid from rolname: {0}")]
|
||||
BadUuid(uuid::Error),
|
||||
}
|
||||
|
||||
pub fn user_id_from_rolname(rolname: &str, role_prefix: &str) -> Result<Uuid, RolnameParseError> {
|
||||
if !rolname.starts_with(role_prefix) {
|
||||
Err(RolnameParseError::MissingPrefix)
|
||||
} else {
|
||||
let mut rolname = rolname.to_owned();
|
||||
rolname.replace_range(0..role_prefix.len(), "");
|
||||
Uuid::parse_str(&rolname).map_err(RolnameParseError::BadUuid)
|
||||
}
|
||||
}
|
||||
263
src/router.rs
263
src/router.rs
|
|
@ -1,48 +1,60 @@
|
|||
use anyhow::{Context as _, Result};
|
||||
use askama::Template;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::{header::CACHE_CONTROL, HeaderValue},
|
||||
response::{Html, IntoResponse as _, Response},
|
||||
routing::get,
|
||||
routing::{get, post},
|
||||
Router,
|
||||
};
|
||||
use deadpool_postgres::{tokio_postgres::Row, GenericClient};
|
||||
use diesel::prelude::*;
|
||||
use mdengine::{
|
||||
class_privileges_for_grantees,
|
||||
pg_attribute::{attributes_for_rel, PgAttribute},
|
||||
pg_class::{self, PgClass},
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::{
|
||||
services::{ServeDir, ServeFile},
|
||||
set_header::SetResponseHeaderLayer,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
abstract_::{diesel_set_user_id, escape_identifier},
|
||||
app_error::AppError,
|
||||
app_state::{AppState, DieselConn, PgConn},
|
||||
auth,
|
||||
data_layer::{Field, FieldOptionsBuilder, ToHtmlString as _, Value},
|
||||
settings::Settings,
|
||||
users::CurrentUser,
|
||||
};
|
||||
|
||||
const FRONTEND_ROW_LIMIT: i64 = 1000;
|
||||
use crate::{app_state::AppState, auth, routes};
|
||||
|
||||
pub fn new_router(state: AppState) -> Router<()> {
|
||||
let base_path = state.settings.base_path.clone();
|
||||
let app = Router::new()
|
||||
.route("/", get(landing_page))
|
||||
.route("/c/{oid}/viewer", get(viewer_page))
|
||||
.route("/databases", get(routes::bases::list_bases_page))
|
||||
.route("/databases/add", post(routes::bases::add_base_page))
|
||||
.route(
|
||||
"/d/{base_id}/config",
|
||||
get(routes::bases::base_config_page_get),
|
||||
)
|
||||
.route(
|
||||
"/d/{base_id}/config",
|
||||
post(routes::bases::base_config_page_post),
|
||||
)
|
||||
.route(
|
||||
"/d/{base_id}/relations",
|
||||
get(routes::relations::list_relations_page),
|
||||
)
|
||||
.route(
|
||||
"/d/{base_id}/r/{class_oid}/viewer",
|
||||
get(routes::relations::viewer_page),
|
||||
)
|
||||
.nest("/auth", auth::new_router())
|
||||
.layer(SetResponseHeaderLayer::if_not_present(
|
||||
CACHE_CONTROL,
|
||||
HeaderValue::from_static("no-cache"),
|
||||
))
|
||||
.nest_service(
|
||||
"/js_dist",
|
||||
ServiceBuilder::new()
|
||||
.layer(SetResponseHeaderLayer::if_not_present(
|
||||
CACHE_CONTROL,
|
||||
HeaderValue::from_static("max-age=21600, stale-while-revalidate=86400"),
|
||||
))
|
||||
.service(
|
||||
ServeDir::new("js_dist").not_found_service(
|
||||
ServiceBuilder::new()
|
||||
.layer(SetResponseHeaderLayer::if_not_present(
|
||||
CACHE_CONTROL,
|
||||
HeaderValue::from_static("no-cache"),
|
||||
))
|
||||
.service(ServeFile::new("static/_404.html")),
|
||||
),
|
||||
),
|
||||
)
|
||||
.fallback_service(
|
||||
ServiceBuilder::new()
|
||||
.layer(SetResponseHeaderLayer::if_not_present(
|
||||
|
|
@ -70,130 +82,75 @@ pub fn new_router(state: AppState) -> Router<()> {
|
|||
}
|
||||
}
|
||||
|
||||
async fn landing_page(
|
||||
State(Settings {
|
||||
base_path,
|
||||
pg_user_role_prefix,
|
||||
..
|
||||
}): State<Settings>,
|
||||
DieselConn(db_conn): DieselConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
) -> Result<Response, AppError> {
|
||||
let grantees = vec![format!(
|
||||
"{}{}",
|
||||
pg_user_role_prefix,
|
||||
current_user.id.simple()
|
||||
)];
|
||||
let visible_tables = db_conn
|
||||
.interact(move |conn| -> Result<Vec<_>> {
|
||||
diesel_set_user_id(&pg_user_role_prefix, current_user.id, conn)?;
|
||||
let privileges = class_privileges_for_grantees(grantees)
|
||||
.load(conn)
|
||||
.context("error reading classes")?;
|
||||
Ok(privileges.into_iter().map(|value| value.class).collect())
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
#[derive(Template)]
|
||||
#[template(path = "tmp.html")]
|
||||
struct ResponseTemplate {
|
||||
base_path: String,
|
||||
relations: Vec<PgClass>,
|
||||
}
|
||||
Ok(Html(
|
||||
ResponseTemplate {
|
||||
base_path,
|
||||
relations: visible_tables,
|
||||
}
|
||||
.render()?,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ViewerPagePath {
|
||||
oid: u32,
|
||||
}
|
||||
|
||||
async fn viewer_page(
|
||||
State(Settings {
|
||||
base_path,
|
||||
pg_user_role_prefix,
|
||||
..
|
||||
}): State<Settings>,
|
||||
DieselConn(diesel_conn): DieselConn,
|
||||
PgConn(pg_client): PgConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
Path(params): Path<ViewerPagePath>,
|
||||
) -> Result<Response, AppError> {
|
||||
pg_client
|
||||
.query(
|
||||
&format!(
|
||||
"SET ROLE {};",
|
||||
escape_identifier(&format!(
|
||||
"{}{}",
|
||||
pg_user_role_prefix,
|
||||
current_user.id.simple()
|
||||
)),
|
||||
),
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
// FIXME: Ensure user has access to relation
|
||||
|
||||
// One-off helper struct to hold Diesel results
|
||||
struct RelMeta {
|
||||
class: PgClass,
|
||||
attrs: Vec<PgAttribute>,
|
||||
}
|
||||
let RelMeta { class, attrs } = diesel_conn
|
||||
.interact(move |conn| -> Result<_> {
|
||||
Ok(RelMeta {
|
||||
class: pg_class::table
|
||||
.filter(pg_class::dsl::oid.eq(params.oid))
|
||||
.select(PgClass::as_select())
|
||||
.first(conn)?,
|
||||
attrs: attributes_for_rel(params.oid).load(conn)?,
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
let query = [
|
||||
"SELECT",
|
||||
&attrs
|
||||
.iter()
|
||||
.map(|attr| attr.attname.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
"FROM",
|
||||
&escape_identifier(&class.relname),
|
||||
"LIMIT",
|
||||
&FRONTEND_ROW_LIMIT.to_string(),
|
||||
";",
|
||||
]
|
||||
.join(" ");
|
||||
let rows = pg_client.query(&query, &[]).await?;
|
||||
#[derive(Template)]
|
||||
#[template(path = "class-viewer.html")]
|
||||
struct ResponseTemplate {
|
||||
base_path: String,
|
||||
fields: Vec<Field>,
|
||||
rows: Vec<Row>,
|
||||
}
|
||||
Ok(Html(
|
||||
ResponseTemplate {
|
||||
base_path,
|
||||
fields: attrs
|
||||
.into_iter()
|
||||
.map(|attr| Field {
|
||||
options: FieldOptionsBuilder::default().build().unwrap(),
|
||||
name: attr.attname,
|
||||
})
|
||||
.collect(),
|
||||
rows,
|
||||
}
|
||||
.render()?,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
// #[derive(Deserialize)]
|
||||
// struct RbacIndexPath {
|
||||
// oid: u32,
|
||||
// }
|
||||
//
|
||||
// async fn rbac_index(
|
||||
// State(Settings {
|
||||
// base_path,
|
||||
// pg_user_role_prefix: role_prefix,
|
||||
// ..
|
||||
// }): State<Settings>,
|
||||
// DieselConn(diesel_conn): DieselConn,
|
||||
// PgConn(pg_client): PgConn,
|
||||
// CurrentUser(current_user): CurrentUser,
|
||||
// Path(params): Path<RbacIndexPath>,
|
||||
// ) -> Result<Response, AppError> {
|
||||
// pg_set_role(&role_prefix, ¤t_user.id, &pg_client, &diesel_conn)
|
||||
// .await
|
||||
// .context("failed to set tokio_postgres role")?;
|
||||
//
|
||||
// struct UserDetails {
|
||||
// user: User,
|
||||
// roles: Vec<String>,
|
||||
// }
|
||||
// let all_users = {
|
||||
// let role_prefix = role_prefix.clone();
|
||||
// diesel_conn
|
||||
// .interact(move |conn| -> Result<_> {
|
||||
// let pg_users: Vec<PgRole> =
|
||||
// .get_results(conn)
|
||||
// .context("failed to query pg users with database access")?;
|
||||
// let user_ids: Vec<Uuid> = pg_users
|
||||
// .iter()
|
||||
// .filter_map(|role| {
|
||||
// let mut rolname = role.rolname.clone();
|
||||
// rolname.replace_range(0..role_prefix.len(), "");
|
||||
// Uuid::parse_str(&rolname).ok()
|
||||
// })
|
||||
// .collect();
|
||||
// let all_users: Vec<User> = users::table
|
||||
// .filter(users::dsl::id.eq_any(user_ids))
|
||||
// .get_results(conn)
|
||||
// .context("failed to query users with database access")?;
|
||||
// Ok(all_users)
|
||||
// })
|
||||
// .await
|
||||
// .unwrap()?
|
||||
// };
|
||||
// #[derive(Template)]
|
||||
// #[template(path = "rbac.html")]
|
||||
// struct ResponseTemplate {
|
||||
// base_path: String,
|
||||
// role_prefix: String,
|
||||
// users: Vec<UserDetails>,
|
||||
// }
|
||||
//
|
||||
// Ok(Html(
|
||||
// ResponseTemplate {
|
||||
// base_path,
|
||||
// role_prefix,
|
||||
// users: all_users
|
||||
// .into_iter()
|
||||
// .map(|user| UserDetails {
|
||||
// user,
|
||||
// roles: vec![],
|
||||
// })
|
||||
// .collect(),
|
||||
// }
|
||||
// .render()?,
|
||||
// )
|
||||
// .into_response())
|
||||
// }
|
||||
|
|
|
|||
141
src/routes/bases.rs
Normal file
141
src/routes/bases.rs
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
use anyhow::Context as _;
|
||||
use askama::Template;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::{Html, IntoResponse as _, Redirect, Response},
|
||||
};
|
||||
use axum_extra::extract::Form;
|
||||
use serde::Deserialize;
|
||||
use sqlx::{query, query_scalar};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
app_error::AppError,
|
||||
app_state::AppDbConn,
|
||||
base_pooler::BasePooler,
|
||||
base_user_perms::sync_perms_for_base,
|
||||
bases::Base,
|
||||
db_conns::{escape_identifier, init_role},
|
||||
settings::Settings,
|
||||
users::CurrentUser,
|
||||
};
|
||||
|
||||
pub async fn list_bases_page(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
) -> Result<Response, AppError> {
|
||||
let bases =
|
||||
Base::fetch_by_perm_any(current_user.id, vec!["configure", "connect"], &mut *app_db)
|
||||
.await?;
|
||||
#[derive(Template)]
|
||||
#[template(path = "list_bases.html")]
|
||||
struct ResponseTemplate {
|
||||
base_path: String,
|
||||
bases: Vec<Base>,
|
||||
}
|
||||
Ok(Html(ResponseTemplate { base_path, bases }.render()?).into_response())
|
||||
}
|
||||
|
||||
pub async fn add_base_page(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
) -> Result<Response, AppError> {
|
||||
// FIXME: CSRF
|
||||
let base = Base::insertable_builder()
|
||||
.url("".to_owned())
|
||||
.owner_id(current_user.id)
|
||||
.build()?
|
||||
.insert(&mut *app_db)
|
||||
.await?;
|
||||
query!(
|
||||
"
|
||||
insert into base_user_perms
|
||||
(id, base_id, user_id, perm)
|
||||
values ($1, $2, $3, 'configure')",
|
||||
Uuid::now_v7(),
|
||||
base.id,
|
||||
current_user.id
|
||||
)
|
||||
.execute(&mut *app_db)
|
||||
.await?;
|
||||
Ok(Redirect::to(&format!("{}/d/{}/config", base_path, base.id)).into_response())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BaseConfigPagePath {
|
||||
base_id: Uuid,
|
||||
}
|
||||
|
||||
pub async fn base_config_page_get(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
Path(params): Path<BaseConfigPagePath>,
|
||||
) -> Result<Response, AppError> {
|
||||
// FIXME: auth
|
||||
let base = Base::fetch_by_id(params.base_id, &mut *app_db)
|
||||
.await?
|
||||
.ok_or(AppError::NotFound("no base found with that id".to_owned()))?;
|
||||
#[derive(Template)]
|
||||
#[template(path = "base_config.html")]
|
||||
struct ResponseTemplate {
|
||||
base: Base,
|
||||
base_path: String,
|
||||
}
|
||||
Ok(Html(ResponseTemplate { base, base_path }.render()?).into_response())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BaseConfigPageForm {
|
||||
name: String,
|
||||
url: String,
|
||||
}
|
||||
|
||||
pub async fn base_config_page_post(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
State(mut base_pooler): State<BasePooler>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
Path(BaseConfigPagePath { base_id }): Path<BaseConfigPagePath>,
|
||||
Form(form): Form<BaseConfigPageForm>,
|
||||
) -> Result<Response, AppError> {
|
||||
// FIXME: CSRF
|
||||
// FIXME: auth
|
||||
let base = Base::fetch_by_id(base_id, &mut *app_db)
|
||||
.await?
|
||||
.ok_or(AppError::NotFound("no base found with that id".to_owned()))?;
|
||||
query!(
|
||||
"update bases set name = $1, url = $2 where id = $3",
|
||||
&form.name,
|
||||
&form.url,
|
||||
&base_id
|
||||
)
|
||||
.execute(&mut *app_db)
|
||||
.await?;
|
||||
if form.url != base.url {
|
||||
base_pooler.close_for(base_id).await?;
|
||||
let mut client = base_pooler.acquire_for(base.id).await?;
|
||||
let rolname = format!("{}{}", base.user_role_prefix, current_user.id.simple());
|
||||
// Bootstrap user role with database connect privilege. If the user was
|
||||
// able to successfully authenticate a connection string, it should be
|
||||
// safe to say that they should be allowed to connect as an Interim
|
||||
// user.
|
||||
init_role(&rolname, &mut client).await?;
|
||||
let db_name: String = query_scalar!("select current_database()")
|
||||
.fetch_one(&mut *client)
|
||||
.await?
|
||||
.context("unable to select current_database()")?;
|
||||
query!("reset role").execute(&mut *client).await?;
|
||||
query(&format!(
|
||||
"grant connect on database {} to {}",
|
||||
escape_identifier(&db_name),
|
||||
escape_identifier(&rolname)
|
||||
))
|
||||
.execute(&mut *client)
|
||||
.await?;
|
||||
sync_perms_for_base(base.id, &mut app_db, &mut client).await?;
|
||||
}
|
||||
Ok(Redirect::to(&format!("{}/d/{}/config", base_path, base_id)).into_response())
|
||||
}
|
||||
2
src/routes/mod.rs
Normal file
2
src/routes/mod.rs
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
pub mod bases;
|
||||
pub mod relations;
|
||||
170
src/routes/relations.rs
Normal file
170
src/routes/relations.rs
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use askama::Template;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::{Html, IntoResponse as _, Response},
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use sqlx::{
|
||||
postgres::{types::Oid, PgRow},
|
||||
query,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
app_error::AppError,
|
||||
app_state::AppDbConn,
|
||||
base_pooler::{self, BasePooler},
|
||||
bases::Base,
|
||||
data_layer::{Field, FieldOptionsBuilder, ToHtmlString as _, Value},
|
||||
db_conns::{escape_identifier, init_role},
|
||||
pg_acls::{PgAclItem, PgPrivilegeType},
|
||||
pg_attributes::fetch_attributes_for_rel,
|
||||
pg_classes::{PgClass, PgRelKind},
|
||||
pg_roles::{PgRole, RoleTree},
|
||||
settings::Settings,
|
||||
users::CurrentUser,
|
||||
};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ListRelationsPagePath {
|
||||
base_id: Uuid,
|
||||
}
|
||||
|
||||
pub async fn list_relations_page(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
State(mut base_pooler): State<BasePooler>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
Path(ListRelationsPagePath { base_id }): Path<ListRelationsPagePath>,
|
||||
) -> Result<Response, AppError> {
|
||||
// FIXME auth
|
||||
let base = Base::fetch_by_id(base_id, &mut *app_db)
|
||||
.await?
|
||||
.ok_or(AppError::NotFound("no base found with that id".to_owned()))?;
|
||||
let mut client = base_pooler.acquire_for(base_id).await?;
|
||||
let rolname = format!("{}{}", &base.user_role_prefix, current_user.id.simple());
|
||||
init_role(&rolname, &mut client).await?;
|
||||
|
||||
let roles = PgRole::fetch_by_names_any(vec![rolname], &mut *client).await?;
|
||||
let role = roles.first().context("role not found in pg_roles")?;
|
||||
let granted_role_tree = RoleTree::fetch_granted(role.oid, &mut *client)
|
||||
.await?
|
||||
.context("unable to construct role tree")?;
|
||||
let granted_roles: HashSet<String> = granted_role_tree
|
||||
.flatten_inherited()
|
||||
.into_iter()
|
||||
.map(|role| role.rolname.clone())
|
||||
.collect();
|
||||
|
||||
let all_rels = PgClass::fetch_all_by_kind_any([PgRelKind::OrdinaryTable], &mut *client).await?;
|
||||
let accessible_rels: Vec<PgClass> = all_rels
|
||||
.into_iter()
|
||||
.filter(|rel| {
|
||||
let privileges: HashSet<PgPrivilegeType> = rel
|
||||
.relacl
|
||||
.clone()
|
||||
.unwrap_or(vec![])
|
||||
.into_iter()
|
||||
.filter(|item| granted_roles.contains(&item.grantee))
|
||||
.flat_map(|item| item.privileges)
|
||||
.map(|privilege| privilege.privilege)
|
||||
.collect();
|
||||
privileges.contains(&PgPrivilegeType::Select)
|
||||
})
|
||||
.collect();
|
||||
|
||||
#[derive(Template)]
|
||||
#[template(path = "list_rels.html")]
|
||||
struct ResponseTemplate {
|
||||
base_path: String,
|
||||
base: Base,
|
||||
rels: Vec<PgClass>,
|
||||
}
|
||||
|
||||
Ok(Html(
|
||||
ResponseTemplate {
|
||||
base,
|
||||
base_path,
|
||||
rels: accessible_rels,
|
||||
}
|
||||
.render()?,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ViewerPagePath {
|
||||
base_id: Uuid,
|
||||
class_oid: u32,
|
||||
}
|
||||
|
||||
pub async fn viewer_page(
|
||||
State(Settings { base_path, .. }): State<Settings>,
|
||||
State(mut base_pooler): State<BasePooler>,
|
||||
AppDbConn(mut app_db): AppDbConn,
|
||||
CurrentUser(current_user): CurrentUser,
|
||||
Path(params): Path<ViewerPagePath>,
|
||||
) -> Result<Response, AppError> {
|
||||
let base = Base::fetch_by_id(params.base_id, &mut *app_db)
|
||||
.await?
|
||||
.ok_or(AppError::NotFound("no base found with that id".to_owned()))?;
|
||||
let mut client = base_pooler.acquire_for(params.base_id).await?;
|
||||
|
||||
init_role(
|
||||
&format!("{}{}", &base.user_role_prefix, ¤t_user.id.simple()),
|
||||
&mut client,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// FIXME: Ensure user has access to database and relation
|
||||
|
||||
let class = query!(
|
||||
"select relname from pg_class where oid = $1",
|
||||
Oid(params.class_oid)
|
||||
)
|
||||
.fetch_optional(&mut *client)
|
||||
.await?
|
||||
.ok_or(AppError::NotFound(
|
||||
"no relation found with that oid".to_owned(),
|
||||
))?;
|
||||
let attrs = fetch_attributes_for_rel(Oid(params.class_oid), &mut *client).await?;
|
||||
|
||||
const FRONTEND_ROW_LIMIT: i64 = 1000;
|
||||
let rows = query(&format!(
|
||||
"select {} from {} limit $1",
|
||||
attrs
|
||||
.iter()
|
||||
.map(|attr| attr.attname.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
escape_identifier(&class.relname),
|
||||
))
|
||||
.bind(FRONTEND_ROW_LIMIT)
|
||||
.fetch_all(&mut *client)
|
||||
.await?;
|
||||
#[derive(Template)]
|
||||
#[template(path = "class-viewer.html")]
|
||||
struct ResponseTemplate {
|
||||
base_path: String,
|
||||
fields: Vec<Field>,
|
||||
rows: Vec<PgRow>,
|
||||
}
|
||||
Ok(Html(
|
||||
ResponseTemplate {
|
||||
base_path,
|
||||
fields: attrs
|
||||
.into_iter()
|
||||
.map(|attr| Field {
|
||||
options: FieldOptionsBuilder::default().build().unwrap(),
|
||||
name: attr.attname,
|
||||
})
|
||||
.collect(),
|
||||
rows,
|
||||
}
|
||||
.render()?,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
|
|
@ -7,29 +7,27 @@ use axum::{
|
|||
};
|
||||
use axum_extra::extract::CookieJar;
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use diesel::{pg::Pg, prelude::*, upsert::excluded};
|
||||
use sqlx::{query, query_as, Executor, PgPool};
|
||||
use tracing::{trace_span, Instrument};
|
||||
|
||||
use crate::{app_error::AppError, app_state::AppState, schema::browser_sessions};
|
||||
use crate::{app_error::AppError, app_state::AppState};
|
||||
|
||||
const EXPIRY_DAYS: i64 = 7;
|
||||
|
||||
#[derive(Clone, Debug, Identifiable, Queryable, Selectable)]
|
||||
#[diesel(table_name = browser_sessions)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
pub struct BrowserSession {
|
||||
pub id: String,
|
||||
pub serialized: String,
|
||||
pub expiry: Option<DateTime<Utc>>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PgStore {
|
||||
pool: deadpool_diesel::postgres::Pool,
|
||||
pool: PgPool,
|
||||
}
|
||||
|
||||
impl PgStore {
|
||||
pub fn new(pool: deadpool_diesel::postgres::Pool) -> PgStore {
|
||||
pub fn new(pool: PgPool) -> PgStore {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
|
@ -51,22 +49,16 @@ impl FromRef<AppState> for PgStore {
|
|||
impl SessionStore for PgStore {
|
||||
async fn load_session(&self, cookie_value: String) -> Result<Option<Session>> {
|
||||
let session_id = Session::id_from_cookie_value(&cookie_value)?;
|
||||
let conn = self.pool.get().await?;
|
||||
let row = conn
|
||||
.interact(move |conn| {
|
||||
// Drop all sessions without recent activity
|
||||
diesel::delete(
|
||||
browser_sessions::table.filter(browser_sessions::expiry.lt(diesel::dsl::now)),
|
||||
query!("delete from browser_sessions where expiry < now()")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
let row = query_as!(
|
||||
BrowserSession,
|
||||
"select * from browser_sessions where id = $1",
|
||||
session_id
|
||||
)
|
||||
.execute(conn)?;
|
||||
browser_sessions::table
|
||||
.filter(browser_sessions::id.eq(session_id))
|
||||
.select(BrowserSession::as_select())
|
||||
.first(conn)
|
||||
.optional()
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
Ok(match row {
|
||||
Some(session) => Some(serde_json::from_str::<Session>(
|
||||
session.serialized.as_str(),
|
||||
|
|
@ -79,47 +71,38 @@ impl SessionStore for PgStore {
|
|||
let serialized_data = serde_json::to_string(&session)?;
|
||||
let session_id = session.id().to_string();
|
||||
let expiry = session.expiry().copied();
|
||||
let conn = self.pool.get().await?;
|
||||
conn.interact(move |conn| {
|
||||
diesel::insert_into(browser_sessions::table)
|
||||
.values((
|
||||
browser_sessions::id.eq(session_id),
|
||||
browser_sessions::serialized.eq(serialized_data),
|
||||
browser_sessions::expiry.eq(expiry),
|
||||
))
|
||||
.on_conflict(browser_sessions::id)
|
||||
.do_update()
|
||||
.set((
|
||||
browser_sessions::serialized.eq(excluded(browser_sessions::serialized)),
|
||||
browser_sessions::expiry.eq(excluded(browser_sessions::expiry)),
|
||||
))
|
||||
.execute(conn)
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
query!(
|
||||
"
|
||||
insert into browser_sessions
|
||||
(id, serialized, expiry)
|
||||
values ($1, $2, $3)
|
||||
on conflict (id) do update set
|
||||
serialized = excluded.serialized,
|
||||
expiry = excluded.expiry
|
||||
",
|
||||
session_id,
|
||||
serialized_data,
|
||||
expiry
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(session.into_cookie_value())
|
||||
}
|
||||
|
||||
async fn destroy_session(&self, session: Session) -> Result<()> {
|
||||
let session_id = session.id().to_owned();
|
||||
let conn = self.pool.get().await?;
|
||||
conn.interact(move |conn| {
|
||||
diesel::delete(
|
||||
browser_sessions::table.filter(browser_sessions::id.eq(session.id().to_string())),
|
||||
)
|
||||
.execute(conn)
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
query!("delete from browser_sessions where id = $1", session_id)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
tracing::debug!("destroyed session {}", session_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_store(&self) -> Result<()> {
|
||||
let conn = self.pool.get().await?;
|
||||
conn.interact(move |conn| diesel::delete(browser_sessions::table).execute(conn))
|
||||
.await
|
||||
.unwrap()?;
|
||||
query!("truncate browser_sessions")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
tracing::info!("cleared session store");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,17 +14,13 @@ pub struct Settings {
|
|||
#[serde(default)]
|
||||
pub base_path: String,
|
||||
|
||||
/// postgresql:// URL.
|
||||
/// postgresql:// URL for Interim's application database.
|
||||
pub database_url: String,
|
||||
|
||||
/// Super-user role the server will use to create new user roles and manage
|
||||
/// database resources.
|
||||
pub pg_root_role: String,
|
||||
#[serde(default = "default_app_db_max_connections")]
|
||||
pub app_db_max_connections: u32,
|
||||
|
||||
#[serde(default = "default_pg_user_role_prefix")]
|
||||
pub pg_user_role_prefix: String,
|
||||
|
||||
/// When set to 1, embedded Diesel migrations will be run on startup.
|
||||
/// When set to 1, embedded SQLx migrations will be run on startup.
|
||||
pub run_database_migrations: Option<u8>,
|
||||
|
||||
/// Address for server to bind to
|
||||
|
|
@ -41,6 +37,10 @@ pub struct Settings {
|
|||
pub auth: AuthSettings,
|
||||
}
|
||||
|
||||
fn default_app_db_max_connections() -> u32 {
|
||||
16
|
||||
}
|
||||
|
||||
fn default_port() -> u16 {
|
||||
8080
|
||||
}
|
||||
|
|
@ -49,10 +49,6 @@ fn default_host() -> String {
|
|||
"127.0.0.1".to_owned()
|
||||
}
|
||||
|
||||
fn default_pg_user_role_prefix() -> String {
|
||||
"__interim_user__".to_owned()
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct AuthSettings {
|
||||
pub client_id: String,
|
||||
|
|
@ -67,7 +63,7 @@ pub struct AuthSettings {
|
|||
}
|
||||
|
||||
fn default_cookie_name() -> String {
|
||||
"INTERIM_SESSION".to_string()
|
||||
"ITM_SESSION".to_string()
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
|
|
|
|||
94
src/users.rs
94
src/users.rs
|
|
@ -1,4 +1,3 @@
|
|||
use anyhow::Context;
|
||||
use async_session::{Session, SessionStore as _};
|
||||
use axum::{
|
||||
extract::{FromRequestParts, OriginalUri},
|
||||
|
|
@ -10,47 +9,23 @@ use axum_extra::extract::{
|
|||
cookie::{Cookie, SameSite},
|
||||
CookieJar,
|
||||
};
|
||||
use diesel::{
|
||||
associations::Identifiable,
|
||||
deserialize::Queryable,
|
||||
dsl::{auto_type, insert_into, AsSelect, Eq, Select},
|
||||
pg::Pg,
|
||||
prelude::*,
|
||||
Selectable,
|
||||
};
|
||||
use sqlx::query_as;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
app_error::AppError,
|
||||
app_state::AppState,
|
||||
auth::{AuthInfo, SESSION_KEY_AUTH_INFO, SESSION_KEY_AUTH_REDIRECT},
|
||||
schema::users,
|
||||
sessions::AppSession,
|
||||
};
|
||||
|
||||
#[allow(unused_imports)]
|
||||
pub use crate::schema::users::{dsl, table};
|
||||
|
||||
#[derive(Clone, Debug, Identifiable, Insertable, Queryable, Selectable)]
|
||||
#[diesel(table_name = users)]
|
||||
#[diesel(check_for_backend(Pg))]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct User {
|
||||
pub id: Uuid,
|
||||
pub uid: String,
|
||||
pub email: String,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn all() -> Select<users::table, AsSelect<User, Pg>> {
|
||||
users::table.select(User::as_select())
|
||||
}
|
||||
|
||||
#[auto_type(no_type_alias)]
|
||||
pub fn with_uid(uid_value: &str) -> _ {
|
||||
users::uid.eq(uid_value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CurrentUser(pub User);
|
||||
|
||||
|
|
@ -104,46 +79,35 @@ where
|
|||
format!("{}/auth/login", app_state.settings.base_path),
|
||||
));
|
||||
};
|
||||
let db_conn = app_state.diesel_pool.get().await?;
|
||||
let current_user = db_conn
|
||||
.interact(move |conn| {
|
||||
let maybe_current_user = User::all()
|
||||
.filter(User::with_uid(&auth_info.sub))
|
||||
.first(conn)
|
||||
.optional()
|
||||
.context("failed to load maybe_current_user")?;
|
||||
if let Some(current_user) = maybe_current_user {
|
||||
return Ok(current_user);
|
||||
}
|
||||
let new_user = User {
|
||||
id: Uuid::now_v7(),
|
||||
uid: auth_info.sub.clone(),
|
||||
email: auth_info.email,
|
||||
};
|
||||
match insert_into(users::table)
|
||||
.values(new_user)
|
||||
.on_conflict(users::uid)
|
||||
.do_nothing()
|
||||
.returning(User::as_returning())
|
||||
.get_result(conn)
|
||||
let current_user = if let Some(value) =
|
||||
query_as!(User, "select * from users where uid = $1", &auth_info.sub)
|
||||
.fetch_optional(&app_state.app_db)
|
||||
.await?
|
||||
{
|
||||
QueryResult::Err(diesel::result::Error::NotFound) => {
|
||||
tracing::debug!("detected race to insert current user record");
|
||||
User::all()
|
||||
.filter(User::with_uid(&auth_info.sub))
|
||||
.first(conn)
|
||||
.context(
|
||||
"failed to load record after detecting race to insert current user",
|
||||
value
|
||||
} else if let Some(value) = query_as!(
|
||||
User,
|
||||
"
|
||||
insert into users
|
||||
(id, uid, email)
|
||||
values ($1, $2, $3)
|
||||
on conflict (uid) do nothing
|
||||
returning *
|
||||
",
|
||||
Uuid::now_v7(),
|
||||
&auth_info.sub,
|
||||
&auth_info.email
|
||||
)
|
||||
}
|
||||
QueryResult::Err(err) => {
|
||||
Err(err).context("failed to insert current user record")
|
||||
}
|
||||
QueryResult::Ok(result) => Ok(result),
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap()?;
|
||||
.fetch_optional(&app_state.app_db)
|
||||
.await?
|
||||
{
|
||||
value
|
||||
} else {
|
||||
tracing::debug!("detected race to insert current user record");
|
||||
query_as!(User, "select * from users where uid = $1", &auth_info.sub)
|
||||
.fetch_one(&app_state.app_db)
|
||||
.await?
|
||||
};
|
||||
Ok(CurrentUser(current_user))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
32
templates/class-viewer.html
Normal file
32
templates/class-viewer.html
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
{% extends "base.html" %}
|
||||
|
||||
{% block main %}
|
||||
<script type="module" src="{{ base_path }}/js_dist/cells.mjs"></script>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
{% for field in fields %}
|
||||
<th>
|
||||
<div>{{ field.options.label.clone().unwrap_or(field.name.clone()) }}</div>
|
||||
</th>
|
||||
{% endfor %}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for row in rows %}
|
||||
<tr>
|
||||
{% for field in fields %}
|
||||
<td>
|
||||
{% match Value::get_from_row(row, field.name.as_str()) %}
|
||||
{% when Ok with (value) %}
|
||||
{{ value.to_html_string(&field.options) | safe }}
|
||||
{% when Err with (err) %}
|
||||
<span class="pg-value-error">{{ err }}</span>
|
||||
{% endmatch %}
|
||||
</td>
|
||||
{% endfor %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
{% endblock %}
|
||||
20
templates/list_bases.html
Normal file
20
templates/list_bases.html
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{% extends "base.html" %}
|
||||
|
||||
{% block main %}
|
||||
<form method="post" action="{{ base_path }}/databases/add">
|
||||
<button type="submit">Add Database</button>
|
||||
</form>
|
||||
<table>
|
||||
<tbody>
|
||||
{% for base in bases %}
|
||||
<tr>
|
||||
<td>
|
||||
<a href="{{ base_path }}/d/{{ base.id.simple() }}/config">
|
||||
{{ base.name }}
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
{% endblock %}
|
||||
17
templates/list_rels.html
Normal file
17
templates/list_rels.html
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
{% extends "base.html" %}
|
||||
|
||||
{% block main %}
|
||||
<table>
|
||||
<tbody>
|
||||
{% for rel in rels %}
|
||||
<tr>
|
||||
<td>
|
||||
<a href="{{ base_path }}/d/{{ base.id.simple() }}/r/{{ rel.oid.0 }}">
|
||||
{{ rel.relname }}
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
{% endblock %}
|
||||
Loading…
Add table
Reference in a new issue