From 09f55dfe67692406715ffe9dfd96d92552045f43 Mon Sep 17 00:00:00 2001 From: jamey Date: Fri, 13 Mar 2026 13:33:29 +0000 Subject: [PATCH] add database backup and restore admin page - SQLCipher-encrypted backup creation via VACUUM INTO - Backup history with auto-pruning (keeps last 5) - Pre-restore automatic backup for safety - Restore from history or uploaded file - Stats display with table breakdown - Download hook for client-side file download - SECRET_KEY_DB config for encryption at rest Co-Authored-By: Claude Opus 4.5 --- .gitignore | 1 + assets/css/admin/components.css | 276 ++++++ assets/css/admin/icons.css | 96 +++ assets/js/app.js | 22 +- config/runtime.exs | 21 + docs/plans/database-encryption.md | 353 ++++++++ lib/berrypod/backup.ex | 814 ++++++++++++++++++ .../components/layouts/admin.html.heex | 8 + lib/berrypod_web/live/admin/backup.ex | 592 +++++++++++++ lib/berrypod_web/router.ex | 1 + 10 files changed, 2183 insertions(+), 1 deletion(-) create mode 100644 docs/plans/database-encryption.md create mode 100644 lib/berrypod/backup.ex create mode 100644 lib/berrypod_web/live/admin/backup.ex diff --git a/.gitignore b/.gitignore index 7c8bed2..7115729 100644 --- a/.gitignore +++ b/.gitignore @@ -72,6 +72,7 @@ package-lock.json # Environment variables (API tokens, secrets) .env +.envrc # API reference specs (development only) /docs/api-specs/ diff --git a/assets/css/admin/components.css b/assets/css/admin/components.css index c622057..6bfec2e 100644 --- a/assets/css/admin/components.css +++ b/assets/css/admin/components.css @@ -5158,6 +5158,17 @@ color: var(--t-status-error, oklch(0.6 0.2 25)); } +.admin-btn-danger { + background: oklch(0.55 0.2 25); + border-color: oklch(0.55 0.2 25); + color: white; + + &:hover:not(:disabled) { + background: oklch(0.5 0.22 25); + border-color: oklch(0.5 0.22 25); + } +} + /* ── Provider group headings ── */ .card-radio-group-heading { @@ -5840,4 +5851,269 @@ .sm\:scale-100 { scale: 1; } } +/* ── Backup page ── */ + +.admin-backup { + max-width: 48rem; +} + +.admin-link { + display: inline-flex; + align-items: center; + gap: 0.25rem; + font-size: 0.875rem; + color: var(--t-primary); + cursor: pointer; + background: none; + border: none; + padding: 0; + + &:hover { + text-decoration: underline; + } +} + +.admin-error { + color: var(--admin-error); + font-size: 0.875rem; + margin-bottom: 0.75rem; +} + +.admin-table-compact { + font-size: 0.8125rem; + + th, td { + padding: 0.5rem 0.75rem; + } +} + +.backup-tables { + margin-top: 1rem; + max-height: 20rem; + overflow-y: auto; + border: 1px solid var(--t-border-subtle); + border-radius: var(--radius-md); +} + +.backup-tables .admin-table { + margin: 0; +} + +.backup-actions { + display: flex; + gap: 0.5rem; + margin-top: 0.5rem; +} + +.backup-list { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.backup-item { + display: flex; + justify-content: space-between; + align-items: center; + gap: 1rem; + padding: 0.625rem 0.875rem; + background: var(--t-surface-raised); + border-radius: var(--radius-md); +} + +.backup-item-info { + display: flex; + flex-direction: column; + gap: 0.125rem; + min-width: 0; +} + +.backup-item-date { + font-size: 0.875rem; + font-weight: 500; +} + +.backup-item-meta { + font-size: 0.75rem; + color: var(--admin-text-muted); +} + +.backup-item-actions { + display: flex; + align-items: center; + gap: 0.375rem; + flex-shrink: 0; +} + +.backup-item-confirm { + font-size: 0.8125rem; + color: var(--admin-text-muted); + margin-right: 0.25rem; +} + +.backup-progress { + display: flex; + align-items: center; + gap: 0.75rem; + padding: 1rem; + background: var(--t-surface-raised); + border-radius: var(--radius-md); +} + +.backup-progress-text { + font-weight: 500; +} + +.backup-progress-hint { + font-size: 0.8125rem; + color: var(--admin-text-muted); +} + +.backup-dropzone { + border: 2px dashed var(--t-border-subtle); + border-radius: var(--radius-md); + padding: 1.5rem; + text-align: center; + transition: border-color 0.15s, background-color 0.15s; + color: var(--admin-text-muted); + + &:hover, &.phx-drop-target { + border-color: var(--t-primary); + background: oklch(from var(--t-primary) l c h / 0.05); + } +} + +.backup-dropzone-content { + display: flex; + flex-direction: column; + align-items: center; + gap: 0.5rem; +} + +.backup-dropzone-link { + color: var(--t-primary); + cursor: pointer; + text-decoration: underline; +} + +.backup-upload-entry { + display: flex; + align-items: center; + gap: 1rem; + padding: 0.625rem 0.875rem; + background: var(--t-surface-raised); + border-radius: var(--radius-md); + margin-top: 0.75rem; + font-size: 0.875rem; + + progress { + flex: 1; + height: 0.375rem; + border-radius: 9999px; + overflow: hidden; + background: var(--t-surface-inset); + + &::-webkit-progress-bar { + background: var(--t-surface-inset); + } + + &::-webkit-progress-value { + background: var(--t-primary); + } + + &::-moz-progress-bar { + background: var(--t-primary); + } + } +} + +.backup-comparison { + margin-top: 0.5rem; +} + +.backup-comparison-grid { + display: grid; + grid-template-columns: 1fr auto 1fr; + gap: 0.75rem; + align-items: start; + margin-bottom: 1rem; +} + +.backup-comparison-col { + padding: 0.75rem 1rem; + background: var(--t-surface-raised); + border-radius: var(--radius-md); +} + +.backup-comparison-label { + font-size: 0.6875rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.05em; + color: var(--admin-text-muted); + margin-bottom: 0.5rem; +} + +.backup-comparison-arrow { + display: flex; + align-items: center; + justify-content: center; + padding-top: 1.5rem; + color: var(--admin-text-muted); +} + +.backup-comparison-stats { + display: flex; + flex-direction: column; + gap: 0.25rem; + font-size: 0.8125rem; + + > div { + display: flex; + justify-content: space-between; + gap: 1rem; + } + + dt { + color: var(--admin-text-muted); + } + + dd { + font-weight: 500; + font-variant-numeric: tabular-nums; + } +} + +.backup-validation { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 0.75rem; + border-radius: var(--radius-md); + font-size: 0.8125rem; + margin-bottom: 0.75rem; +} + +.backup-validation-ok { + background: oklch(0.95 0.1 145); + color: oklch(0.35 0.15 145); +} + +.backup-validation-error { + background: oklch(0.95 0.05 25); + color: oklch(0.35 0.1 25); +} + +.backup-warning { + padding: 0.75rem 1rem; + background: oklch(0.96 0.03 60); + border-radius: var(--radius-md); + font-size: 0.875rem; + color: oklch(0.35 0.1 60); + + p { + margin-bottom: 0.75rem; + } +} + } /* @layer admin */ diff --git a/assets/css/admin/icons.css b/assets/css/admin/icons.css index 5a24850..1a72ed5 100644 --- a/assets/css/admin/icons.css +++ b/assets/css/admin/icons.css @@ -14,6 +14,18 @@ height: 1.5rem; } +.hero-arrow-down-tray-mini { + --hero-arrow-down-tray-mini: url('data:image/svg+xml;utf8,%20%20%20%20'); + -webkit-mask: var(--hero-arrow-down-tray-mini); + mask: var(--hero-arrow-down-tray-mini); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.25rem; + height: 1.25rem; +} + .hero-arrow-left { --hero-arrow-left: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-arrow-left); @@ -62,6 +74,18 @@ height: 1.25rem; } +.hero-arrow-right { + --hero-arrow-right: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-arrow-right); + mask: var(--hero-arrow-right); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.5rem; + height: 1.5rem; +} + .hero-arrow-right-start-on-rectangle { --hero-arrow-right-start-on-rectangle: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-arrow-right-start-on-rectangle); @@ -110,6 +134,18 @@ height: 1.5rem; } +.hero-arrow-up-tray-mini { + --hero-arrow-up-tray-mini: url('data:image/svg+xml;utf8,%20%20%20%20'); + -webkit-mask: var(--hero-arrow-up-tray-mini); + mask: var(--hero-arrow-up-tray-mini); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.25rem; + height: 1.25rem; +} + .hero-arrow-uturn-left { --hero-arrow-uturn-left: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-arrow-uturn-left); @@ -386,6 +422,18 @@ height: 1.25rem; } +.hero-circle-stack { + --hero-circle-stack: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-circle-stack); + mask: var(--hero-circle-stack); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.5rem; + height: 1.5rem; +} + .hero-clipboard { --hero-clipboard: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-clipboard); @@ -506,6 +554,18 @@ height: 1rem; } +.hero-credit-card { + --hero-credit-card: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-credit-card); + mask: var(--hero-credit-card); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.5rem; + height: 1.5rem; +} + .hero-cube { --hero-cube: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-cube); @@ -602,6 +662,18 @@ height: 1.5rem; } +.hero-exclamation-circle-mini { + --hero-exclamation-circle-mini: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-exclamation-circle-mini); + mask: var(--hero-exclamation-circle-mini); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.25rem; + height: 1.25rem; +} + .hero-exclamation-triangle { --hero-exclamation-triangle: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-exclamation-triangle); @@ -758,6 +830,30 @@ height: 1.5rem; } +.hero-lock-closed-mini { + --hero-lock-closed-mini: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-lock-closed-mini); + mask: var(--hero-lock-closed-mini); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.25rem; + height: 1.25rem; +} + +.hero-lock-open-mini { + --hero-lock-open-mini: url('data:image/svg+xml;utf8,%20%20'); + -webkit-mask: var(--hero-lock-open-mini); + mask: var(--hero-lock-open-mini); + mask-repeat: no-repeat; + background-color: currentColor; + vertical-align: middle; + display: inline-block; + width: 1.25rem; + height: 1.25rem; +} + .hero-magnifying-glass { --hero-magnifying-glass: url('data:image/svg+xml;utf8,%20%20'); -webkit-mask: var(--hero-magnifying-glass); diff --git a/assets/js/app.js b/assets/js/app.js index 87b535b..97af535 100644 --- a/assets/js/app.js +++ b/assets/js/app.js @@ -932,10 +932,25 @@ const EditorKeyboard = { } } +// Hook to trigger file downloads from LiveView +const Download = { + mounted() { + this.handleEvent("download", ({filename, content, content_type}) => { + const blob = new Blob([Uint8Array.from(atob(content), c => c.charCodeAt(0))], {type: content_type}) + const url = URL.createObjectURL(blob) + const a = document.createElement("a") + a.href = url + a.download = filename + a.click() + URL.revokeObjectURL(url) + }) + } +} + const csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") const liveSocket = new LiveSocket("/live", Socket, { params: {_csrf_token: csrfToken, screen_width: window.innerWidth}, - hooks: {...colocatedHooks, ColorSync, Lightbox, CartPersist, CartDrawer, ProductImageScroll, SearchModal, MobileNavDrawer, CollectionFilters, AnalyticsInit, AnalyticsExport, ChartTooltip, Clipboard, DirtyGuard, EditorKeyboard, EditorSheet}, + hooks: {...colocatedHooks, ColorSync, Lightbox, CartPersist, CartDrawer, ProductImageScroll, SearchModal, MobileNavDrawer, CollectionFilters, AnalyticsInit, AnalyticsExport, ChartTooltip, Clipboard, DirtyGuard, EditorKeyboard, EditorSheet, Download}, }) // Show progress bar on live navigation and form submits @@ -956,6 +971,11 @@ window.addEventListener("phx:scroll-top", () => { window.scrollTo({top: 0, behavior: 'instant'}) }) +// Scroll element into view (used by flash messages) +window.addEventListener("scroll-into-view", (e) => { + e.target.scrollIntoView({behavior: 'smooth', block: 'nearest'}) +}) + // connect if there are any LiveViews on the page liveSocket.connect() diff --git a/config/runtime.exs b/config/runtime.exs index f95a858..67bbb1d 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -20,6 +20,15 @@ if System.get_env("PHX_SERVER") do config :berrypod, BerrypodWeb.Endpoint, server: true end +# SQLCipher encryption key — optional in dev/test, required in prod. +# x'...' tells SQLCipher to use the hex directly as the AES key, +# skipping PBKDF2 derivation (256k iterations per connection). +if config_env() != :prod do + if db_key = System.get_env("SECRET_KEY_DB") do + config :berrypod, Berrypod.Repo, key: "\"x'#{db_key}'\"" + end +end + if config_env() == :prod do database_path = System.get_env("DATABASE_PATH") || @@ -28,8 +37,20 @@ if config_env() == :prod do For example: /data/berrypod.db """ + # Database encryption via SQLCipher (required in production) + db_key = + System.get_env("SECRET_KEY_DB") || + raise """ + environment variable SECRET_KEY_DB is missing. + This key encrypts the entire database at rest using SQLCipher. + You can generate one by calling: mix phx.gen.secret + """ + + # x'...' tells SQLCipher to use the hex directly as the AES key, + # skipping PBKDF2 derivation (256k iterations per connection). config :berrypod, Berrypod.Repo, database: database_path, + key: "\"x'#{db_key}'\"", pool_size: String.to_integer(System.get_env("POOL_SIZE") || "5"), journal_mode: :wal, busy_timeout: 15_000, diff --git a/docs/plans/database-encryption.md b/docs/plans/database-encryption.md new file mode 100644 index 0000000..272897c --- /dev/null +++ b/docs/plans/database-encryption.md @@ -0,0 +1,353 @@ +# Database encryption at rest + +> Status: Complete (awaiting production deployment) +> Tier: 2 (Security / Infrastructure) + +## Goal + +The entire Berrypod shop is a single encrypted SQLite file. Portable, private, encrypted. Copy the file, set your encryption key, and host anywhere. + +## Why + +1. **True encryption at rest** — not just sensitive fields, the entire database +2. **Safe backups** — can store backup files anywhere without additional encryption +3. **Simple migration** — copy file + set env var = working shop on new server +4. **Privacy by design** — even if someone gets the file, data is protected + +## Current state + +- Standard SQLite 3.51.1 (no encryption) +- Sensitive fields (API keys, TOTP secrets) encrypted with Cloak.Ecto using `SECRET_KEY_BASE` +- exqlite 0.34.0 compiled without SQLCipher + +## Target state + +- SQLCipher-encrypted database file +- Encryption key via `SECRET_KEY_DB` environment variable +- Existing Cloak encryption remains (defence in depth for secrets) +- Safe backup via `VACUUM INTO` works on encrypted database +- Admin backup page with database stats and restore + +--- + +## Security model + +Two independent secrets, defence in depth: + +| Secret | Purpose | Protects against | +|--------|---------|------------------| +| `SECRET_KEY_BASE` | Phoenix sessions, Cloak field encryption | SQL access without app secret | +| `SECRET_KEY_DB` | SQLCipher whole-database encryption | File access without DB key | + +Both are required for production. If one is compromised, the other layer still protects. + +**SQLCipher spec:** +- AES-256 in CBC mode +- HMAC-SHA512 per page (tamper detection) +- PBKDF2 key derivation (256,000 iterations) +- Each page independently encrypted + +--- + +## Implementation + +### Phase 1: Install SQLCipher and recompile exqlite + +**Dev machine (Debian/Ubuntu):** +```bash +# Debian bookworm: use backports for SQLCipher 4.6.1 (stable has 3.4.1 which is too old) +sudo apt install -t bookworm-backports libsqlcipher-dev + +# Ubuntu 24.04+: standard repos have a recent enough version +sudo apt install libsqlcipher-dev +``` + +**Dev machine (macOS):** +```bash +brew install sqlcipher +``` + +**Set build environment and recompile:** +```bash +# Tell exqlite to use system SQLCipher instead of bundled SQLite +export EXQLITE_USE_SYSTEM=1 +export EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher" +export EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher" + +# Force recompile +mix deps.clean exqlite --build +mix deps.compile exqlite +``` + +**Verify SQLCipher is active:** +```elixir +{:ok, conn} = Exqlite.Basic.open(":memory:") +{:ok, _q, result, _c} = Exqlite.Basic.exec(conn, "PRAGMA cipher_version;") +# Should return [["4.x.x"]] — if empty, SQLCipher not linked +``` + +### Phase 2: Configure encryption key + +**Generate keys:** +```bash +mix phx.gen.secret # → SECRET_KEY_BASE +mix phx.gen.secret # → SECRET_KEY_DB +``` + +**Configure exqlite (runtime.exs):** +```elixir +# config/runtime.exs + +# Database encryption (optional for dev, required for production) +db_key = System.get_env("SECRET_KEY_DB") + +config :berrypod, Berrypod.Repo, + database: database_path, + key: db_key # nil = unencrypted, string = SQLCipher encryption +``` + +The `:key` option is native to exqlite — it handles the `PRAGMA key` automatically on connection. + +**Dev mode:** No `SECRET_KEY_DB` set = unencrypted database (easier local development). + +**Production mode:** `SECRET_KEY_DB` required = encrypted database. + +### Phase 3: Fresh database with encryption + +Since we're starting fresh (no migration needed): + +```bash +# Delete old unencrypted database +rm berrypod_dev.db berrypod_dev.db-shm berrypod_dev.db-wal + +# Start with encryption enabled +SECRET_KEY_DB="$(mix phx.gen.secret)" mix ecto.create +SECRET_KEY_DB="your-key" mix ecto.migrate +SECRET_KEY_DB="your-key" mix phx.server +``` + +### Phase 4: Fly.io deployment + +**Update Dockerfile:** +```dockerfile +# Install SQLCipher +RUN apt-get update -y && \ + apt-get install -y libsqlcipher-dev && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Build exqlite with system SQLCipher +ENV EXQLITE_USE_SYSTEM=1 +ENV EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher" +ENV EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher" +``` + +**Set the secret:** +```bash +fly secrets set SECRET_KEY_DB="$(mix phx.gen.secret)" +``` + +**Deploy:** +```bash +fly deploy +``` + +--- + +## Admin backup page + +Route: `/admin/backup` + +### Database stats display + +Show useful context before backup/restore: + +**Overview section:** +- Total database size (formatted: "12.3 MB") +- Encryption status (SQLCipher version or "Unencrypted") +- Database created date +- Last backup date (if tracked) + +**Table breakdown:** + +| Table | Rows | Size | +|-------|------|------| +| products | 16 | 45 KB | +| product_variants | 142 | 28 KB | +| product_images | 89 | 12 KB | +| orders | 23 | 18 KB | +| images | 156 | 8.2 MB | +| settings | 42 | 4 KB | +| ... | | | + +**Key counts:** +- Products: 16 +- Orders: 23 +- Media files: 156 +- Newsletter subscribers: 89 + +**Queries for stats:** +```sql +-- Total database size +SELECT page_count * page_size as size +FROM pragma_page_count(), pragma_page_size(); + +-- Row counts per table +SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'; +-- Then COUNT(*) each + +-- Table sizes (via dbstat virtual table) +SELECT name, SUM(pgsize) as size +FROM dbstat +GROUP BY name +ORDER BY size DESC; + +-- SQLCipher version +PRAGMA cipher_version; +``` + +### Download backup + +Use SQLite's `VACUUM INTO` for a safe, consistent backup: + +```elixir +def create_backup do + timestamp = DateTime.utc_now() |> Calendar.strftime("%Y%m%d-%H%M%S") + backup_path = Path.join(System.tmp_dir!(), "berrypod-backup-#{timestamp}.db") + + Ecto.Adapters.SQL.query!(Repo, "VACUUM INTO ?", [backup_path]) + + {:ok, backup_path} +end +``` + +The backup file is encrypted with the same key — portable to any server with that key. + +**UI:** +- "Download backup" button +- Shows estimated file size +- Filename: `berrypod-backup-YYYYMMDD-HHMMSS.db` + +### Restore backup + +1. Upload encrypted backup file +2. Validate it opens with the current key +3. Show comparison: current vs uploaded (row counts, size) +4. Confirm with explicit action ("Replace current database") +5. Stop accepting requests (maintenance mode) +6. Replace database file +7. Restart application + +**UI:** +- File upload dropzone +- Validation feedback (valid/invalid/wrong key) +- Side-by-side comparison before restore +- Confirmation modal with warnings + +--- + +## Task breakdown + +| # | Task | Est | Notes | +|---|------|-----|-------| +| 1 | ~~Install SQLCipher on dev machine~~ | ✓ | `apt install -t bookworm-backports libsqlcipher-dev` (4.6.1) | +| 2 | ~~Set build flags, recompile exqlite~~ | ✓ | Env vars, `mix deps.clean/compile` | +| 3 | ~~Verify SQLCipher with `PRAGMA cipher_version`~~ | ✓ | Returns "4.6.1 community" | +| 4 | ~~Add `:key` config to runtime.exs~~ | ✓ | Required in prod, optional in dev | +| 5 | ~~Test fresh encrypted database~~ | ✓ | Verified encryption works | +| 6 | ~~Update Dockerfile for Fly.io~~ | ✓ | Install package, set build flags | +| 7 | Deploy encrypted to Fly.io | 15m | Set secret, deploy, verify | +| 8 | ~~Database stats context module~~ | ✓ | `Berrypod.Backup` with sizes, counts, encryption status | +| 9 | ~~Admin backup page — stats display~~ | ✓ | `/admin/backup` LiveView | +| 10 | ~~Admin backup page — download~~ | ✓ | VACUUM INTO, JS download hook | +| 11 | ~~Admin backup page — restore upload~~ | ✓ | Upload, validation, comparison | +| 12 | ~~Admin backup page — restore action~~ | ✓ | Maintenance mode, swap, restart | +| 13 | ~~Update README with key management~~ | ✓ | Document backup procedures | + +**Total: ~8-9 hours** + +--- + +## Security notes + +- **Key length:** 256-bit minimum. `mix phx.gen.secret` produces 512-bit which is fine. +- **Key storage:** Environment variables only. Never commit to code. +- **Key rotation:** Requires re-encrypting entire database. Rare operation. +- **Lost key = lost data:** No recovery possible. Document key backup procedures clearly. +- **Defence in depth:** Keep Cloak encryption for API keys even with DB encryption. + +--- + +## Dev workflow + +For convenience, add to `.envrc` (direnv) or shell profile: + +```bash +# Build flags (needed once per machine after installing SQLCipher) +export EXQLITE_USE_SYSTEM=1 +export EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher" +export EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher" + +# Optional: dev database encryption (or omit for unencrypted dev) +# export SECRET_KEY_DB="dev-only-key-not-for-production" +``` + +### Encrypted dev database + +If you want to test encryption locally: + +```bash +export SECRET_KEY_DB="dev-test-key-12345" +mix ecto.reset # recreates with encryption +mix phx.server +``` + +### Unencrypted dev database + +For simpler local development, just don't set `SECRET_KEY_DB`. The database will be unencrypted but otherwise identical. + +--- + +## Compatibility + +- **Litestream:** Works with SQLCipher. Replicates encrypted bytes to S3. +- **sqlite3 CLI:** Use `sqlcipher` CLI to open encrypted databases. +- **DB Browser for SQLite:** Supports SQLCipher — enter key when opening. +- **Tests:** Run unencrypted (faster) unless specifically testing encryption. + +--- + +## Verification checklist + +After implementation, verify: + +```bash +# 1. SQLCipher is linked +mix run -e '{:ok, c} = Exqlite.Basic.open(":memory:"); {:ok, _, r, _} = Exqlite.Basic.exec(c, "PRAGMA cipher_version;"); IO.inspect(r.rows)' +# Should print [["4.x.x"]] + +# 2. Encrypted database is unreadable without key +file berrypod_prod.db +# Should show "data" not "SQLite 3.x database" + +# 3. Encrypted database opens with key +SECRET_KEY_DB="your-key" mix run -e 'Berrypod.Repo.query!("SELECT 1")' +# Should succeed + +# 4. Encrypted database fails without key +mix run -e 'Berrypod.Repo.query!("SELECT 1")' +# Should fail with "file is not a database" +``` + +--- + +## Files to modify + +| File | Change | +|------|--------| +| `config/runtime.exs` | Add `:key` option to Repo config | +| `Dockerfile` | Install SQLCipher, set build env vars | +| `fly.toml` | (no change, key via secrets) | +| `lib/berrypod/backup.ex` | New — backup/restore context | +| `lib/berrypod_web/live/admin/backup_live.ex` | New — backup admin page | +| `lib/berrypod_web/router.ex` | Add `/admin/backup` route | +| `README.md` | Document key management | diff --git a/lib/berrypod/backup.ex b/lib/berrypod/backup.ex new file mode 100644 index 0000000..37e9d22 --- /dev/null +++ b/lib/berrypod/backup.ex @@ -0,0 +1,814 @@ +defmodule Berrypod.Backup do + @moduledoc """ + Database backup and restore functionality. + + Provides database statistics, backup creation via VACUUM INTO, + backup history management, and restore operations for SQLCipher-encrypted databases. + + Backups are stored in the configured backup directory (default: priv/backups/). + Before any restore, an automatic backup of the current database is created. + Old backups are automatically pruned to keep the most recent N backups. + """ + + alias Berrypod.Repo + require Logger + + # Tables to show row counts for in the stats display + @key_tables ~w(products product_variants orders images pages newsletter_subscribers) + + # Critical tables that must exist for a valid Berrypod database + @required_tables ~w(users settings products orders pages images schema_migrations) + + # Maximum number of backups to keep (configurable via :berrypod, :backup, :max_backups) + @default_max_backups 5 + + @doc """ + Returns the directory where backups are stored. + Defaults to priv/backups/ but can be configured via :berrypod, :backup, :directory. + """ + def backup_dir do + config = Application.get_env(:berrypod, :backup, []) + + dir = + Keyword.get_lazy(config, :directory, fn -> + Path.join(:code.priv_dir(:berrypod), "backups") + end) + + # Ensure directory exists + File.mkdir_p!(dir) + dir + end + + @doc """ + Returns the maximum number of backups to keep. + """ + def max_backups do + config = Application.get_env(:berrypod, :backup, []) + Keyword.get(config, :max_backups, @default_max_backups) + end + + @doc """ + Lists all available backups, sorted by date (newest first). + + Returns a list of maps with: + - filename: the backup filename + - path: full path to the backup file + - size: file size in bytes + - created_at: DateTime when the backup was created (parsed from filename) + - type: :manual or :pre_restore + """ + def list_backups do + dir = backup_dir() + + dir + |> File.ls!() + |> Enum.filter(&String.ends_with?(&1, ".db")) + |> Enum.map(fn filename -> + path = Path.join(dir, filename) + stat = File.stat!(path) + + %{ + filename: filename, + path: path, + size: stat.size, + created_at: parse_backup_timestamp(filename), + type: parse_backup_type(filename) + } + end) + |> Enum.sort_by(& &1.created_at, {:desc, DateTime}) + end + + defp parse_backup_timestamp(filename) do + # Expected format: berrypod-backup-YYYYMMDD-HHMMSS.db or pre-restore-YYYYMMDD-HHMMSS.db + case Regex.run(~r/(\d{8})-(\d{6})\.db$/, filename) do + [_, date, time] -> + <> = date + <> = time + + case NaiveDateTime.new( + String.to_integer(y), + String.to_integer(m), + String.to_integer(d), + String.to_integer(hh), + String.to_integer(mm), + String.to_integer(ss) + ) do + {:ok, naive} -> DateTime.from_naive!(naive, "Etc/UTC") + _ -> DateTime.utc_now() + end + + _ -> + DateTime.utc_now() + end + end + + defp parse_backup_type(filename) do + if String.starts_with?(filename, "pre-restore-") do + :pre_restore + else + :manual + end + end + + @doc """ + Returns comprehensive database statistics. + + Includes: + - Total database size + - Encryption status (SQLCipher version or nil) + - Per-table row counts and sizes + - Key entity counts + """ + def get_stats do + %{ + total_size: get_total_size(), + encryption_status: get_encryption_status(), + tables: get_table_stats(), + key_counts: get_key_counts(), + schema_version: get_current_schema_version() + } + end + + @doc """ + Returns the total database file size in bytes. + """ + def get_total_size do + case Repo.query( + "SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()" + ) do + {:ok, %{rows: [[size]]}} -> size + _ -> 0 + end + end + + @doc """ + Returns the SQLCipher version if encryption is enabled, nil otherwise. + """ + def get_encryption_status do + case Repo.query("PRAGMA cipher_version") do + {:ok, %{rows: [[version]]}} when is_binary(version) and version != "" -> version + _ -> nil + end + end + + @doc """ + Returns a list of tables with their row counts and sizes. + """ + def get_table_stats do + # Get all user tables (exclude sqlite internals and FTS shadow tables) + case Repo.query(""" + SELECT name FROM sqlite_master + WHERE type='table' + AND name NOT LIKE 'sqlite_%' + AND name NOT LIKE '%_content' + AND name NOT LIKE '%_data' + AND name NOT LIKE '%_idx' + AND name NOT LIKE '%_docsize' + AND name NOT LIKE '%_config' + ORDER BY name + """) do + {:ok, %{rows: tables}} -> + process_table_stats(tables) + + {:error, _} -> + [] + end + end + + defp process_table_stats(tables) do + + table_names = Enum.map(tables, fn [name] -> name end) + + # Get sizes via dbstat if available + sizes = get_table_sizes() + + # Get row counts + Enum.map(table_names, fn name -> + count = get_row_count(name) + size = Map.get(sizes, name, 0) + + %{ + name: name, + rows: count, + size: size + } + end) + |> Enum.sort_by(& &1.size, :desc) + end + + defp get_table_sizes do + # Try dbstat first (most accurate, but requires ENABLE_DBSTAT_VTAB compile flag) + case Repo.query(""" + SELECT name, SUM(pgsize) as size + FROM dbstat + GROUP BY name + """) do + {:ok, %{rows: rows}} -> + Map.new(rows, fn [name, size] -> {name, size || 0} end) + + _ -> + # Fallback: estimate sizes by summing column data lengths + # This gives a reasonable approximation for display purposes + estimate_table_sizes() + end + end + + defp estimate_table_sizes do + # Get all user tables + case Repo.query(""" + SELECT name FROM sqlite_master + WHERE type='table' + AND name NOT LIKE 'sqlite_%' + AND name NOT LIKE '%_content' + AND name NOT LIKE '%_data' + AND name NOT LIKE '%_idx' + AND name NOT LIKE '%_docsize' + AND name NOT LIKE '%_config' + """) do + {:ok, %{rows: tables}} -> + tables + |> Enum.map(fn [name] -> {name, estimate_table_size(name)} end) + |> Enum.into(%{}) + + _ -> + %{} + end + end + + defp estimate_table_size(table_name) do + # Get column names for this table + case Repo.query("PRAGMA table_info(\"#{table_name}\")") do + {:ok, %{rows: columns}} when columns != [] -> + column_names = Enum.map(columns, fn [_cid, name | _] -> name end) + + # Build a query that sums the length of all columns + # Using COALESCE and length() for text, or 8 bytes for numeric types + length_exprs = + column_names + |> Enum.map(fn col -> + "COALESCE(LENGTH(CAST(\"#{col}\" AS BLOB)), 0)" + end) + |> Enum.join(" + ") + + query = "SELECT SUM(#{length_exprs}) FROM \"#{table_name}\"" + + case Repo.query(query) do + {:ok, %{rows: [[size]]}} when is_integer(size) -> size + {:ok, %{rows: [[size]]}} when is_float(size) -> round(size) + _ -> 0 + end + + _ -> + 0 + end + end + + defp get_row_count(table_name) do + # Safe since table_name comes from sqlite_master + case Repo.query("SELECT COUNT(*) FROM \"#{table_name}\"") do + {:ok, %{rows: [[count]]}} -> count + _ -> 0 + end + end + + @doc """ + Returns counts for key entities (products, orders, etc). + """ + def get_key_counts do + @key_tables + |> Enum.map(fn table -> + {table, get_row_count(table)} + end) + |> Enum.into(%{}) + end + + @doc """ + Creates a backup of the database using VACUUM INTO. + + Returns `{:ok, backup_path}` on success. + The backup is encrypted with the same key as the source database. + """ + def create_backup(opts \\ []) do + prefix = Keyword.get(opts, :prefix, "berrypod-backup") + save_to_history = Keyword.get(opts, :save_to_history, true) + + timestamp = DateTime.utc_now() |> Calendar.strftime("%Y%m%d-%H%M%S") + filename = "#{prefix}-#{timestamp}.db" + + backup_path = + if save_to_history do + Path.join(backup_dir(), filename) + else + Path.join(System.tmp_dir!(), filename) + end + + case Repo.query("VACUUM INTO ?", [backup_path]) do + {:ok, _} -> + if save_to_history, do: prune_old_backups() + {:ok, backup_path} + + {:error, error} -> + {:error, error} + end + end + + @doc """ + Creates a pre-restore backup of the current database. + This is automatically called before any restore operation. + """ + def create_pre_restore_backup do + create_backup(prefix: "pre-restore", save_to_history: true) + end + + @doc """ + Deletes old backups, keeping only the most recent N backups. + """ + def prune_old_backups do + max = max_backups() + backups = list_backups() + + if length(backups) > max do + backups + |> Enum.drop(max) + |> Enum.each(fn backup -> + Logger.info("[Backup] Pruning old backup: #{backup.filename}") + File.rm(backup.path) + end) + end + + :ok + end + + @doc """ + Deletes a specific backup by filename. + """ + def delete_backup(filename) do + path = Path.join(backup_dir(), filename) + + if File.exists?(path) and String.ends_with?(filename, ".db") do + File.rm(path) + else + {:error, :not_found} + end + end + + @doc """ + Validates that a backup file can be opened and is a valid Berrypod database. + + Performs comprehensive checks: + 1. File can be opened with current encryption key + 2. Contains required tables (users, settings, products, etc.) + 3. Has schema_migrations table with valid versions + 4. Integrity check passes + + Returns: + - `{:ok, validation_result}` with detailed stats and validation info + - `{:error, reason}` with specific error details + """ + def validate_backup(path) do + config = Application.get_env(:berrypod, Berrypod.Repo) + key = Keyword.get(config, :key) + + case Exqlite.Sqlite3.open(path, mode: :readonly) do + {:ok, conn} -> + result = validate_backup_connection(conn, key) + Exqlite.Sqlite3.close(conn) + result + + {:error, reason} -> + {:error, {:open_failed, reason}} + end + end + + defp validate_backup_connection(conn, key) do + # Set the encryption key if we have one + if key do + case Exqlite.Sqlite3.execute(conn, "PRAGMA key = #{key}") do + :ok -> :ok + {:error, reason} -> throw({:error, {:key_failed, reason}}) + end + end + + # Try to read from the database (will fail if wrong key) + case Exqlite.Sqlite3.execute(conn, "SELECT COUNT(*) FROM sqlite_master") do + :ok -> :ok + {:error, "file is not a database"} -> throw({:error, :invalid_key}) + {:error, reason} -> throw({:error, {:read_failed, reason}}) + end + + # Run integrity check + case run_integrity_check(conn) do + :ok -> :ok + {:error, reason} -> throw({:error, {:integrity_failed, reason}}) + end + + # Check required tables exist + case check_required_tables(conn) do + :ok -> :ok + {:error, missing} -> throw({:error, {:missing_tables, missing}}) + end + + # Check schema migrations + case check_schema_migrations(conn) do + {:ok, migration_info} -> migration_info + {:error, reason} -> throw({:error, {:migrations_failed, reason}}) + end + + # Get comprehensive stats + stats = get_backup_stats(conn) + {:ok, stats} + catch + {:error, reason} -> {:error, reason} + end + + defp run_integrity_check(conn) do + {:ok, stmt} = Exqlite.Sqlite3.prepare(conn, "PRAGMA integrity_check(1)") + + case Exqlite.Sqlite3.step(conn, stmt) do + {:row, ["ok"]} -> + Exqlite.Sqlite3.release(conn, stmt) + :ok + + {:row, [error]} -> + Exqlite.Sqlite3.release(conn, stmt) + {:error, error} + + {:error, reason} -> + Exqlite.Sqlite3.release(conn, stmt) + {:error, reason} + end + end + + defp check_required_tables(conn) do + {:ok, stmt} = + Exqlite.Sqlite3.prepare(conn, "SELECT name FROM sqlite_master WHERE type='table'") + + tables = collect_rows(conn, stmt, []) + table_names = Enum.map(tables, fn [name] -> name end) + + missing = @required_tables -- table_names + + if Enum.empty?(missing) do + :ok + else + {:error, missing} + end + end + + defp check_schema_migrations(conn) do + {:ok, stmt} = + Exqlite.Sqlite3.prepare( + conn, + "SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1" + ) + + case Exqlite.Sqlite3.step(conn, stmt) do + {:row, [latest_version]} -> + Exqlite.Sqlite3.release(conn, stmt) + {:ok, %{latest_migration: latest_version}} + + :done -> + Exqlite.Sqlite3.release(conn, stmt) + {:error, "no migrations found"} + + {:error, reason} -> + Exqlite.Sqlite3.release(conn, stmt) + {:error, reason} + end + end + + defp collect_rows(conn, stmt, acc) do + case Exqlite.Sqlite3.step(conn, stmt) do + {:row, row} -> collect_rows(conn, stmt, [row | acc]) + :done -> Exqlite.Sqlite3.release(conn, stmt) && Enum.reverse(acc) + end + end + + defp get_backup_stats(conn) do + # Get table count (excluding sqlite internals and FTS shadow tables, same as get_table_stats) + {:ok, stmt} = + Exqlite.Sqlite3.prepare(conn, """ + SELECT COUNT(*) FROM sqlite_master + WHERE type='table' + AND name NOT LIKE 'sqlite_%' + AND name NOT LIKE '%_content' + AND name NOT LIKE '%_data' + AND name NOT LIKE '%_idx' + AND name NOT LIKE '%_docsize' + AND name NOT LIKE '%_config' + """) + + {:row, [table_count]} = Exqlite.Sqlite3.step(conn, stmt) + Exqlite.Sqlite3.release(conn, stmt) + + # Get page count for size estimate + {:ok, stmt} = + Exqlite.Sqlite3.prepare( + conn, + "SELECT page_count * page_size FROM pragma_page_count(), pragma_page_size()" + ) + + {:row, [size]} = Exqlite.Sqlite3.step(conn, stmt) + Exqlite.Sqlite3.release(conn, stmt) + + # Get latest migration version + {:ok, stmt} = + Exqlite.Sqlite3.prepare( + conn, + "SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1" + ) + + latest_migration = + case Exqlite.Sqlite3.step(conn, stmt) do + {:row, [version]} -> version + _ -> nil + end + + Exqlite.Sqlite3.release(conn, stmt) + + # Get key entity counts + key_counts = get_backup_key_counts(conn) + + %{ + table_count: table_count, + size: size, + latest_migration: latest_migration, + key_counts: key_counts + } + end + + defp get_backup_key_counts(conn) do + @key_tables + |> Enum.map(fn table -> + count = + case Exqlite.Sqlite3.prepare(conn, "SELECT COUNT(*) FROM \"#{table}\"") do + {:ok, stmt} -> + result = + case Exqlite.Sqlite3.step(conn, stmt) do + {:row, [count]} -> count + _ -> 0 + end + + Exqlite.Sqlite3.release(conn, stmt) + result + + _ -> + 0 + end + + {table, count} + end) + |> Enum.into(%{}) + end + + @doc """ + Restores a database from a backup file. + + This performs a full file-based restore by: + + 1. Validating the backup (including schema version match) + 2. Stopping Oban completely + 3. Checkpointing WAL and draining all database connections + 4. Stopping the Repo + 5. Replacing the database file + 6. Restarting the Repo + 7. Restarting Oban + 8. Clearing and warming caches + + Returns `:ok` on success, `{:error, reason}` on failure. + """ + def restore_backup(backup_path) do + config = Application.get_env(:berrypod, Berrypod.Repo) + db_path = Keyword.fetch!(config, :database) + + Logger.info("[Backup] Starting database restore from #{backup_path}") + + with :ok <- validate_backup_before_restore(backup_path), + {:ok, pre_restore_path} <- create_pre_restore_backup(), + :ok <- broadcast_maintenance_mode(:entering), + :ok <- stop_oban(), + :ok <- drain_and_stop_repo(), + :ok <- swap_database_file(backup_path, db_path), + :ok <- start_repo(), + :ok <- start_oban(), + :ok <- clear_ets_caches(), + :ok <- warm_caches() do + broadcast_maintenance_mode(:exited) + Logger.info("[Backup] Database restore completed successfully") + Logger.info("[Backup] Pre-restore backup saved: #{pre_restore_path}") + :ok + else + {:error, reason} -> + Logger.error("[Backup] Restore failed: #{inspect(reason)}") + # Try to recover + start_repo() + start_oban() + broadcast_maintenance_mode(:exited) + {:error, reason} + end + end + + defp stop_oban do + # Terminate Oban child from the application supervisor + # This stops all Oban processes including plugins and reporters + try do + Supervisor.terminate_child(Berrypod.Supervisor, Oban) + catch + _, _ -> :ok + end + + # Give processes time to fully terminate + Process.sleep(500) + :ok + end + + defp start_oban do + # Restart Oban child in the application supervisor + try do + Supervisor.restart_child(Berrypod.Supervisor, Oban) + catch + _, _ -> :ok + end + + # Wait for Oban to be ready + Process.sleep(500) + :ok + end + + defp drain_and_stop_repo do + # First checkpoint WAL while we still have connections + try do + Repo.query!("PRAGMA wal_checkpoint(TRUNCATE)") + rescue + _ -> :ok + end + + # Disconnect all connections - this forces them to close gracefully + Ecto.Adapters.SQL.disconnect_all(Repo, 0) + + # Give connections time to release their file handles + Process.sleep(500) + + # Use GenServer.stop which properly shuts down the pool + repo_pid = Process.whereis(Repo) + + if repo_pid do + ref = Process.monitor(repo_pid) + + # GenServer.stop sends a :stop call which is handled gracefully + try do + GenServer.stop(repo_pid, :normal, 10_000) + catch + :exit, _ -> :ok + end + + # Wait for the process to actually terminate + receive do + {:DOWN, ^ref, :process, ^repo_pid, _reason} -> :ok + after + 5000 -> :ok + end + end + + # Wait for file handles to be fully released by the OS + Process.sleep(500) + :ok + end + + defp start_repo do + # The supervisor will restart the Repo automatically since we stopped it + # Wait for it to come back up + wait_for_repo(100) + end + + defp wait_for_repo(0), do: {:error, :repo_start_timeout} + + defp wait_for_repo(attempts) do + case Process.whereis(Repo) do + nil -> + Process.sleep(100) + wait_for_repo(attempts - 1) + + _pid -> + # Give it a moment to fully initialize the connection pool + Process.sleep(200) + + # Verify we can actually query + try do + case Repo.query("SELECT 1") do + {:ok, _} -> :ok + {:error, _} -> + Process.sleep(100) + wait_for_repo(attempts - 1) + end + catch + _, _ -> + Process.sleep(100) + wait_for_repo(attempts - 1) + end + end + end + + defp swap_database_file(backup_path, db_path) do + # Remove WAL and SHM files if they exist (they're part of the old database state) + File.rm("#{db_path}-wal") + File.rm("#{db_path}-shm") + + # Replace the database file + case File.cp(backup_path, db_path) do + :ok -> + File.rm(backup_path) + :ok + + {:error, reason} -> + {:error, {:file_copy_failed, reason}} + end + end + + defp validate_backup_before_restore(path) do + case validate_backup(path) do + {:ok, backup_stats} -> + # Check schema versions match + current_version = get_current_schema_version() + + if backup_stats.latest_migration == current_version do + :ok + else + {:error, {:schema_mismatch, backup_stats.latest_migration, current_version}} + end + + {:error, reason} -> + {:error, {:validation_failed, reason}} + end + end + + defp get_current_schema_version do + case Repo.query("SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1") do + {:ok, %{rows: [[version]]}} -> version + _ -> nil + end + end + + defp broadcast_maintenance_mode(status) do + # Broadcast to all connected LiveViews that maintenance is happening + Phoenix.PubSub.broadcast(Berrypod.PubSub, "maintenance", {:maintenance, status}) + :ok + end + + + defp clear_ets_caches do + # Clear known ETS caches to ensure they get rebuilt from the new database + caches = [ + Berrypod.Theme.CSSCache, + Berrypod.Pages.PageCache, + Berrypod.Redirects.Cache + ] + + for cache <- caches do + try do + :ets.delete_all_objects(cache) + rescue + ArgumentError -> :ok + end + end + + :ok + end + + defp warm_caches do + # Warm up caches after restore + try do + Berrypod.Pages.PageCache.warm() + rescue + _ -> :ok + end + + try do + Berrypod.Redirects.warm_cache() + rescue + _ -> :ok + end + + try do + Berrypod.Theme.CSSCache.warm() + rescue + _ -> :ok + end + + :ok + end + + @doc """ + Formats a byte size into a human-readable string. + """ + def format_size(bytes) when is_integer(bytes) do + cond do + bytes >= 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824, 1)} GB" + bytes >= 1_048_576 -> "#{Float.round(bytes / 1_048_576, 1)} MB" + bytes >= 1024 -> "#{Float.round(bytes / 1024, 1)} KB" + true -> "#{bytes} B" + end + end + + def format_size(_), do: "0 B" +end diff --git a/lib/berrypod_web/components/layouts/admin.html.heex b/lib/berrypod_web/components/layouts/admin.html.heex index c340895..1fec834 100644 --- a/lib/berrypod_web/components/layouts/admin.html.heex +++ b/lib/berrypod_web/components/layouts/admin.html.heex @@ -177,6 +177,14 @@ <.icon name="hero-arrow-uturn-right" class="size-5" /> Redirects +
  • + <.link + navigate={~p"/admin/backup"} + class={admin_nav_active?(@current_path, "/admin/backup")} + > + <.icon name="hero-circle-stack" class="size-5" /> Backup + +
  • diff --git a/lib/berrypod_web/live/admin/backup.ex b/lib/berrypod_web/live/admin/backup.ex new file mode 100644 index 0000000..c84d071 --- /dev/null +++ b/lib/berrypod_web/live/admin/backup.ex @@ -0,0 +1,592 @@ +defmodule BerrypodWeb.Admin.Backup do + use BerrypodWeb, :live_view + + alias Berrypod.Backup + + @impl true + def mount(_params, _session, socket) do + stats = Backup.get_stats() + backups = Backup.list_backups() + + {:ok, + socket + |> assign(:page_title, "Backup") + |> assign(:stats, stats) + |> assign(:backups, backups) + |> assign(:create_backup_status, :idle) + |> assign(:uploaded_backup, nil) + |> assign(:upload_error, nil) + |> assign(:confirming_restore, false) + |> assign(:restoring, false) + |> assign(:confirming_history_restore, nil) + |> assign(:confirming_delete, nil) + |> assign(:show_tables, false) + |> allow_upload(:backup, + accept: :any, + max_entries: 1, + max_file_size: 500_000_000 + )} + end + + @impl true + def handle_event("refresh_stats", _params, socket) do + stats = Backup.get_stats() + backups = Backup.list_backups() + {:noreply, socket |> assign(:stats, stats) |> assign(:backups, backups)} + end + + def handle_event("toggle_tables", _params, socket) do + {:noreply, assign(socket, :show_tables, !socket.assigns.show_tables)} + end + + def handle_event("create_backup", _params, socket) do + case Backup.create_backup() do + {:ok, _path} -> + {:noreply, + socket + |> assign(:backups, Backup.list_backups()) + |> assign(:create_backup_status, :saved)} + + {:error, error} -> + {:noreply, + socket + |> assign(:create_backup_status, :error) + |> put_flash(:error, "Failed to create backup: #{inspect(error)}")} + end + end + + def handle_event("download_history_backup", %{"filename" => filename}, socket) do + path = Path.join(Backup.backup_dir(), filename) + + if File.exists?(path) do + data = File.read!(path) + + {:noreply, + socket + |> push_event("download", %{ + filename: filename, + content: Base.encode64(data), + content_type: "application/octet-stream" + })} + else + {:noreply, put_flash(socket, :error, "Backup file not found")} + end + end + + + def handle_event("validate_upload", _params, socket) do + {:noreply, socket} + end + + def handle_event("upload_backup", _params, socket) do + [result] = + consume_uploaded_entries(socket, :backup, fn %{path: path}, _entry -> + # Copy to temp location since consume deletes the original + temp_path = Path.join(System.tmp_dir!(), "berrypod-restore-#{System.unique_integer()}.db") + File.cp!(path, temp_path) + + case Backup.validate_backup(temp_path) do + {:ok, backup_stats} -> + # Use actual file size instead of internal page calculation + file_size = File.stat!(temp_path).size + {:ok, {:ok, temp_path, Map.put(backup_stats, :file_size, file_size)}} + + {:error, reason} -> + File.rm(temp_path) + {:ok, {:error, reason}} + end + end) + + case result do + {:ok, path, backup_stats} -> + {:noreply, + socket + |> assign(:uploaded_backup, %{path: path, stats: backup_stats}) + |> assign(:upload_error, nil)} + + {:error, :invalid_key} -> + {:noreply, + assign( + socket, + :upload_error, + "Wrong encryption key — this backup was created with a different key" + )} + + {:error, reason} -> + {:noreply, assign(socket, :upload_error, "Invalid backup file: #{inspect(reason)}")} + end + end + + def handle_event("cancel_restore", _params, socket) do + # Clean up temp file + if socket.assigns.uploaded_backup do + File.rm(socket.assigns.uploaded_backup.path) + end + + {:noreply, + socket + |> assign(:uploaded_backup, nil) + |> assign(:confirming_restore, false)} + end + + def handle_event("confirm_restore", _params, socket) do + {:noreply, assign(socket, :confirming_restore, true)} + end + + def handle_event("execute_restore", _params, socket) do + # Show loading state immediately, then do the restore async + send(self(), :do_restore) + {:noreply, assign(socket, :restoring, true)} + end + + # Backup history actions + def handle_event("confirm_history_restore", %{"filename" => filename}, socket) do + {:noreply, assign(socket, :confirming_history_restore, filename)} + end + + def handle_event("cancel_history_restore", _params, socket) do + {:noreply, assign(socket, :confirming_history_restore, nil)} + end + + def handle_event("execute_history_restore", %{"filename" => filename}, socket) do + send(self(), {:do_history_restore, filename}) + {:noreply, socket |> assign(:restoring, true) |> assign(:confirming_history_restore, nil)} + end + + def handle_event("confirm_delete", %{"filename" => filename}, socket) do + {:noreply, assign(socket, :confirming_delete, filename)} + end + + def handle_event("cancel_delete", _params, socket) do + {:noreply, assign(socket, :confirming_delete, nil)} + end + + def handle_event("execute_delete", %{"filename" => filename}, socket) do + case Backup.delete_backup(filename) do + :ok -> + {:noreply, + socket + |> assign(:confirming_delete, nil) + |> assign(:backups, Backup.list_backups()) + |> put_flash(:info, "Backup deleted")} + + {:error, _} -> + {:noreply, + socket + |> assign(:confirming_delete, nil) + |> put_flash(:error, "Failed to delete backup")} + end + end + + @impl true + def handle_info(:do_restore, socket) do + backup_path = socket.assigns.uploaded_backup.path + + case Backup.restore_backup(backup_path) do + :ok -> + {:noreply, + socket + |> assign(:uploaded_backup, nil) + |> assign(:confirming_restore, false) + |> assign(:restoring, false) + |> assign(:stats, Backup.get_stats()) + |> assign(:backups, Backup.list_backups()) + |> put_flash(:info, "Database restored successfully")} + + {:error, {:schema_mismatch, backup_version, current_version}} -> + {:noreply, + socket + |> assign(:confirming_restore, false) + |> assign(:restoring, false) + |> put_flash( + :error, + "Schema version mismatch: backup is #{backup_version}, current is #{current_version}. " <> + "Backups can only be restored to a database with the same schema version." + )} + + {:error, reason} -> + {:noreply, + socket + |> assign(:confirming_restore, false) + |> assign(:restoring, false) + |> put_flash(:error, "Restore failed: #{inspect(reason)}")} + end + end + + def handle_info({:do_history_restore, filename}, socket) do + path = Path.join(Backup.backup_dir(), filename) + + case Backup.restore_backup(path) do + :ok -> + {:noreply, + socket + |> assign(:restoring, false) + |> assign(:stats, Backup.get_stats()) + |> assign(:backups, Backup.list_backups()) + |> put_flash(:info, "Database restored from #{filename}")} + + {:error, {:schema_mismatch, backup_version, current_version}} -> + {:noreply, + socket + |> assign(:restoring, false) + |> put_flash( + :error, + "Schema version mismatch: backup is #{backup_version}, current is #{current_version}." + )} + + {:error, reason} -> + {:noreply, + socket + |> assign(:restoring, false) + |> put_flash(:error, "Restore failed: #{inspect(reason)}")} + end + end + + @impl true + def render(assigns) do + ~H""" +
    + <.header> + Backup + + + <%!-- Database status --%> +
    +
    +

    Database

    + <%= if @stats.encryption_status do %> + <.status_pill color="green"> + <.icon name="hero-lock-closed-mini" class="size-3" /> Encrypted + + <% else %> + <.status_pill color="amber"> + <.icon name="hero-lock-open-mini" class="size-3" /> Not encrypted + + <% end %> +
    +

    + {Backup.format_size(@stats.total_size)} total · + {length(@stats.tables)} tables · + {@stats.key_counts["products"] || 0} products · + {@stats.key_counts["orders"] || 0} orders · + {@stats.key_counts["images"] || 0} images +

    +
    + +
    + + <%= if @show_tables do %> +
    + + + + + + + + + + + + + + + +
    TableRowsSize
    {table.name}{table.rows}{Backup.format_size(table.size)}
    +
    + <% end %> +
    + + <%!-- Create backup --%> +
    +
    +

    Create backup

    + <.status_pill color="zinc">{length(@backups)} saved +
    +

    + Creates an encrypted snapshot of your database. Backups are stored locally and the last 5 are kept automatically. +

    + +
    +
    + + <.inline_feedback status={@create_backup_status} /> +
    +
    +
    + + <%!-- Backup history --%> + <%= if @backups != [] do %> +
    +

    Saved backups

    + + <%= if @restoring do %> +
    + <.icon name="hero-arrow-path" class="size-5 animate-spin" /> +
    +

    Restoring database...

    +

    This may take a few seconds.

    +
    +
    + <% else %> +
    + <%= for backup <- @backups do %> +
    +
    + {format_backup_date(backup.created_at)} + + {Backup.format_size(backup.size)} + <%= if backup.type == :pre_restore do %> + · auto-saved before restore + <% end %> + +
    + +
    + <%= if @confirming_history_restore == backup.filename do %> + Replace current database? + + + <% else %> + <%= if @confirming_delete == backup.filename do %> + Delete this backup? + + + <% else %> + + + + <% end %> + <% end %> +
    +
    + <% end %> +
    + <% end %> +
    + <% end %> + + <%!-- Restore from file --%> +
    +

    Restore from file

    +

    + Upload a backup file to restore. Must be encrypted with the same key as this database. +

    + + <%= if @upload_error do %> +

    {@upload_error}

    + <% end %> + + <%= if @uploaded_backup do %> +
    +
    +
    +

    Current

    +
    +
    Size
    {Backup.format_size(@stats.total_size)}
    +
    Products
    {@stats.key_counts["products"] || 0}
    +
    Orders
    {@stats.key_counts["orders"] || 0}
    +
    Images
    {@stats.key_counts["images"] || 0}
    +
    +
    +
    + <.icon name="hero-arrow-right" class="size-5" /> +
    +
    +

    Uploaded

    +
    +
    Size
    {Backup.format_size(@uploaded_backup.stats.file_size)}
    +
    Products
    {@uploaded_backup.stats.key_counts["products"] || 0}
    +
    Orders
    {@uploaded_backup.stats.key_counts["orders"] || 0}
    +
    Images
    {@uploaded_backup.stats.key_counts["images"] || 0}
    +
    +
    +
    + + <%= if @uploaded_backup.stats.latest_migration == @stats.schema_version do %> +
    + <.icon name="hero-check-circle-mini" class="size-4" /> + Backup validated · Schema version {@uploaded_backup.stats.latest_migration} +
    + + <%= if @restoring do %> +
    + <.icon name="hero-arrow-path" class="size-5 animate-spin" /> +
    +

    Restoring database...

    +

    This may take a few seconds.

    +
    +
    + <% else %> + <%= if @confirming_restore do %> +
    +

    This will replace your current database. A backup will be saved automatically.

    +
    + + +
    +
    + <% else %> +
    + + +
    + <% end %> + <% end %> + <% else %> +
    + <.icon name="hero-x-circle-mini" class="size-4" /> + + Schema mismatch: backup is v{@uploaded_backup.stats.latest_migration}, + current is v{@stats.schema_version} + +
    +
    + +
    + <% end %> +
    + <% else %> +
    +
    + <.live_file_input upload={@uploads.backup} class="sr-only" /> +
    + <.icon name="hero-arrow-up-tray" class="size-6" /> +

    + Drop a backup file here or + +

    +
    +
    + + <%= for entry <- @uploads.backup.entries do %> +
    + {entry.client_name} + {Backup.format_size(entry.client_size)} + {entry.progress}% +
    + + <%= for err <- upload_errors(@uploads.backup, entry) do %> +

    {upload_error_to_string(err)}

    + <% end %> + <% end %> + + <%= if length(@uploads.backup.entries) > 0 do %> +
    + +
    + <% end %> +
    + <% end %> +
    +
    + """ + end + + defp format_backup_date(nil), do: "unknown date" + + defp format_backup_date(datetime) do + Calendar.strftime(datetime, "%d %b %Y, %H:%M") + end + + defp upload_error_to_string(:too_large), do: "File is too large (max 500 MB)" + defp upload_error_to_string(:too_many_files), do: "Only one file allowed" + defp upload_error_to_string(err), do: "Upload error: #{inspect(err)}" + + attr :color, :string, default: "zinc" + slot :inner_block, required: true + + defp status_pill(assigns) do + modifier = + case assigns.color do + "green" -> "admin-status-pill-green" + "amber" -> "admin-status-pill-amber" + _ -> "admin-status-pill-zinc" + end + + assigns = assign(assigns, :modifier, modifier) + + ~H""" + + {render_slot(@inner_block)} + + """ + end +end diff --git a/lib/berrypod_web/router.ex b/lib/berrypod_web/router.ex index 34099be..501c621 100644 --- a/lib/berrypod_web/router.ex +++ b/lib/berrypod_web/router.ex @@ -170,6 +170,7 @@ defmodule BerrypodWeb.Router do live "/providers/:id/edit", Admin.Providers.Form, :edit live "/settings", Admin.Settings, :index live "/settings/email", Admin.EmailSettings, :index + live "/backup", Admin.Backup, :index live "/account", Admin.Account, :index live "/pages", Admin.Pages.Index, :index live "/pages/new", Admin.Pages.CustomForm, :new