add database backup and restore admin page
Some checks failed
deploy / deploy (push) Has been cancelled
Some checks failed
deploy / deploy (push) Has been cancelled
- SQLCipher-encrypted backup creation via VACUUM INTO - Backup history with auto-pruning (keeps last 5) - Pre-restore automatic backup for safety - Restore from history or uploaded file - Stats display with table breakdown - Download hook for client-side file download - SECRET_KEY_DB config for encryption at rest Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
b0f8eea2bc
commit
09f55dfe67
1
.gitignore
vendored
1
.gitignore
vendored
@ -72,6 +72,7 @@ package-lock.json
|
||||
|
||||
# Environment variables (API tokens, secrets)
|
||||
.env
|
||||
.envrc
|
||||
|
||||
# API reference specs (development only)
|
||||
/docs/api-specs/
|
||||
|
||||
@ -5158,6 +5158,17 @@
|
||||
color: var(--t-status-error, oklch(0.6 0.2 25));
|
||||
}
|
||||
|
||||
.admin-btn-danger {
|
||||
background: oklch(0.55 0.2 25);
|
||||
border-color: oklch(0.55 0.2 25);
|
||||
color: white;
|
||||
|
||||
&:hover:not(:disabled) {
|
||||
background: oklch(0.5 0.22 25);
|
||||
border-color: oklch(0.5 0.22 25);
|
||||
}
|
||||
}
|
||||
|
||||
/* ── Provider group headings ── */
|
||||
|
||||
.card-radio-group-heading {
|
||||
@ -5840,4 +5851,269 @@
|
||||
.sm\:scale-100 { scale: 1; }
|
||||
}
|
||||
|
||||
/* ── Backup page ── */
|
||||
|
||||
.admin-backup {
|
||||
max-width: 48rem;
|
||||
}
|
||||
|
||||
.admin-link {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
font-size: 0.875rem;
|
||||
color: var(--t-primary);
|
||||
cursor: pointer;
|
||||
background: none;
|
||||
border: none;
|
||||
padding: 0;
|
||||
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
}
|
||||
|
||||
.admin-error {
|
||||
color: var(--admin-error);
|
||||
font-size: 0.875rem;
|
||||
margin-bottom: 0.75rem;
|
||||
}
|
||||
|
||||
.admin-table-compact {
|
||||
font-size: 0.8125rem;
|
||||
|
||||
th, td {
|
||||
padding: 0.5rem 0.75rem;
|
||||
}
|
||||
}
|
||||
|
||||
.backup-tables {
|
||||
margin-top: 1rem;
|
||||
max-height: 20rem;
|
||||
overflow-y: auto;
|
||||
border: 1px solid var(--t-border-subtle);
|
||||
border-radius: var(--radius-md);
|
||||
}
|
||||
|
||||
.backup-tables .admin-table {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.backup-actions {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
|
||||
.backup-list {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.backup-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
padding: 0.625rem 0.875rem;
|
||||
background: var(--t-surface-raised);
|
||||
border-radius: var(--radius-md);
|
||||
}
|
||||
|
||||
.backup-item-info {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.125rem;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.backup-item-date {
|
||||
font-size: 0.875rem;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.backup-item-meta {
|
||||
font-size: 0.75rem;
|
||||
color: var(--admin-text-muted);
|
||||
}
|
||||
|
||||
.backup-item-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.375rem;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.backup-item-confirm {
|
||||
font-size: 0.8125rem;
|
||||
color: var(--admin-text-muted);
|
||||
margin-right: 0.25rem;
|
||||
}
|
||||
|
||||
.backup-progress {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
padding: 1rem;
|
||||
background: var(--t-surface-raised);
|
||||
border-radius: var(--radius-md);
|
||||
}
|
||||
|
||||
.backup-progress-text {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.backup-progress-hint {
|
||||
font-size: 0.8125rem;
|
||||
color: var(--admin-text-muted);
|
||||
}
|
||||
|
||||
.backup-dropzone {
|
||||
border: 2px dashed var(--t-border-subtle);
|
||||
border-radius: var(--radius-md);
|
||||
padding: 1.5rem;
|
||||
text-align: center;
|
||||
transition: border-color 0.15s, background-color 0.15s;
|
||||
color: var(--admin-text-muted);
|
||||
|
||||
&:hover, &.phx-drop-target {
|
||||
border-color: var(--t-primary);
|
||||
background: oklch(from var(--t-primary) l c h / 0.05);
|
||||
}
|
||||
}
|
||||
|
||||
.backup-dropzone-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.backup-dropzone-link {
|
||||
color: var(--t-primary);
|
||||
cursor: pointer;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
.backup-upload-entry {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
padding: 0.625rem 0.875rem;
|
||||
background: var(--t-surface-raised);
|
||||
border-radius: var(--radius-md);
|
||||
margin-top: 0.75rem;
|
||||
font-size: 0.875rem;
|
||||
|
||||
progress {
|
||||
flex: 1;
|
||||
height: 0.375rem;
|
||||
border-radius: 9999px;
|
||||
overflow: hidden;
|
||||
background: var(--t-surface-inset);
|
||||
|
||||
&::-webkit-progress-bar {
|
||||
background: var(--t-surface-inset);
|
||||
}
|
||||
|
||||
&::-webkit-progress-value {
|
||||
background: var(--t-primary);
|
||||
}
|
||||
|
||||
&::-moz-progress-bar {
|
||||
background: var(--t-primary);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.backup-comparison {
|
||||
margin-top: 0.5rem;
|
||||
}
|
||||
|
||||
.backup-comparison-grid {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr auto 1fr;
|
||||
gap: 0.75rem;
|
||||
align-items: start;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.backup-comparison-col {
|
||||
padding: 0.75rem 1rem;
|
||||
background: var(--t-surface-raised);
|
||||
border-radius: var(--radius-md);
|
||||
}
|
||||
|
||||
.backup-comparison-label {
|
||||
font-size: 0.6875rem;
|
||||
font-weight: 600;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
color: var(--admin-text-muted);
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.backup-comparison-arrow {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding-top: 1.5rem;
|
||||
color: var(--admin-text-muted);
|
||||
}
|
||||
|
||||
.backup-comparison-stats {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.25rem;
|
||||
font-size: 0.8125rem;
|
||||
|
||||
> div {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
dt {
|
||||
color: var(--admin-text-muted);
|
||||
}
|
||||
|
||||
dd {
|
||||
font-weight: 500;
|
||||
font-variant-numeric: tabular-nums;
|
||||
}
|
||||
}
|
||||
|
||||
.backup-validation {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-radius: var(--radius-md);
|
||||
font-size: 0.8125rem;
|
||||
margin-bottom: 0.75rem;
|
||||
}
|
||||
|
||||
.backup-validation-ok {
|
||||
background: oklch(0.95 0.1 145);
|
||||
color: oklch(0.35 0.15 145);
|
||||
}
|
||||
|
||||
.backup-validation-error {
|
||||
background: oklch(0.95 0.05 25);
|
||||
color: oklch(0.35 0.1 25);
|
||||
}
|
||||
|
||||
.backup-warning {
|
||||
padding: 0.75rem 1rem;
|
||||
background: oklch(0.96 0.03 60);
|
||||
border-radius: var(--radius-md);
|
||||
font-size: 0.875rem;
|
||||
color: oklch(0.35 0.1 60);
|
||||
|
||||
p {
|
||||
margin-bottom: 0.75rem;
|
||||
}
|
||||
}
|
||||
|
||||
} /* @layer admin */
|
||||
|
||||
@ -14,6 +14,18 @@
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-arrow-down-tray-mini {
|
||||
--hero-arrow-down-tray-mini: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20viewBox="0%200%2020%2020"%20fill="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20d="M10.75%202.75a.75.75%200%200%200-1.5%200v8.614L6.295%208.235a.75.75%200%201%200-1.09%201.03l4.25%204.5a.75.75%200%200%200%201.09%200l4.25-4.5a.75.75%200%200%200-1.09-1.03l-2.955%203.129V2.75Z"/>%20%20<path%20d="M3.5%2012.75a.75.75%200%200%200-1.5%200v2.5A2.75%202.75%200%200%200%204.75%2018h10.5A2.75%202.75%200%200%200%2018%2015.25v-2.5a.75.75%200%200%200-1.5%200v2.5c0%20.69-.56%201.25-1.25%201.25H4.75c-.69%200-1.25-.56-1.25-1.25v-2.5Z"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-down-tray-mini);
|
||||
mask: var(--hero-arrow-down-tray-mini);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-arrow-left {
|
||||
--hero-arrow-left: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M10.5%2019.5%203%2012m0%200%207.5-7.5M3%2012h18"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-left);
|
||||
@ -62,6 +74,18 @@
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-arrow-right {
|
||||
--hero-arrow-right: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M13.5%204.5%2021%2012m0%200-7.5%207.5M21%2012H3"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-right);
|
||||
mask: var(--hero-arrow-right);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-arrow-right-start-on-rectangle {
|
||||
--hero-arrow-right-start-on-rectangle: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M15.75%209V5.25A2.25%202.25%200%200%200%2013.5%203h-6a2.25%202.25%200%200%200-2.25%202.25v13.5A2.25%202.25%200%200%200%207.5%2021h6a2.25%202.25%200%200%200%202.25-2.25V15m3%200%203-3m0%200-3-3m3%203H9"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-right-start-on-rectangle);
|
||||
@ -110,6 +134,18 @@
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-arrow-up-tray-mini {
|
||||
--hero-arrow-up-tray-mini: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20viewBox="0%200%2020%2020"%20fill="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20d="M9.25%2013.25a.75.75%200%200%200%201.5%200V4.636l2.955%203.129a.75.75%200%200%200%201.09-1.03l-4.25-4.5a.75.75%200%200%200-1.09%200l-4.25%204.5a.75.75%200%201%200%201.09%201.03L9.25%204.636v8.614Z"/>%20%20<path%20d="M3.5%2012.75a.75.75%200%200%200-1.5%200v2.5A2.75%202.75%200%200%200%204.75%2018h10.5A2.75%202.75%200%200%200%2018%2015.25v-2.5a.75.75%200%200%200-1.5%200v2.5c0%20.69-.56%201.25-1.25%201.25H4.75c-.69%200-1.25-.56-1.25-1.25v-2.5Z"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-up-tray-mini);
|
||||
mask: var(--hero-arrow-up-tray-mini);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-arrow-uturn-left {
|
||||
--hero-arrow-uturn-left: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M9%2015%203%209m0%200%206-6M3%209h12a6%206%200%200%201%200%2012h-3"/></svg>');
|
||||
-webkit-mask: var(--hero-arrow-uturn-left);
|
||||
@ -386,6 +422,18 @@
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-circle-stack {
|
||||
--hero-circle-stack: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M20.25%206.375c0%202.278-3.694%204.125-8.25%204.125S3.75%208.653%203.75%206.375m16.5%200c0-2.278-3.694-4.125-8.25-4.125S3.75%204.097%203.75%206.375m16.5%200v11.25c0%202.278-3.694%204.125-8.25%204.125s-8.25-1.847-8.25-4.125V6.375m16.5%200v3.75m-16.5-3.75v3.75m16.5%200v3.75C20.25%2016.153%2016.556%2018%2012%2018s-8.25-1.847-8.25-4.125v-3.75m16.5%200c0%202.278-3.694%204.125-8.25%204.125s-8.25-1.847-8.25-4.125"/></svg>');
|
||||
-webkit-mask: var(--hero-circle-stack);
|
||||
mask: var(--hero-circle-stack);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-clipboard {
|
||||
--hero-clipboard: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M15.666%203.888A2.25%202.25%200%200%200%2013.5%202.25h-3c-1.03%200-1.9.693-2.166%201.638m7.332%200c.055.194.084.4.084.612v0a.75.75%200%200%201-.75.75H9a.75.75%200%200%201-.75-.75v0c0-.212.03-.418.084-.612m7.332%200c.646.049%201.288.11%201.927.184%201.1.128%201.907%201.077%201.907%202.185V19.5a2.25%202.25%200%200%201-2.25%202.25H6.75A2.25%202.25%200%200%201%204.5%2019.5V6.257c0-1.108.806-2.057%201.907-2.185a48.208%2048.208%200%200%201%201.927-.184"/></svg>');
|
||||
-webkit-mask: var(--hero-clipboard);
|
||||
@ -506,6 +554,18 @@
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.hero-credit-card {
|
||||
--hero-credit-card: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M2.25%208.25h19.5M2.25%209h19.5m-16.5%205.25h6m-6%202.25h3m-3.75%203h15a2.25%202.25%200%200%200%202.25-2.25V6.75A2.25%202.25%200%200%200%2019.5%204.5h-15a2.25%202.25%200%200%200-2.25%202.25v10.5A2.25%202.25%200%200%200%204.5%2019.5Z"/></svg>');
|
||||
-webkit-mask: var(--hero-credit-card);
|
||||
mask: var(--hero-credit-card);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-cube {
|
||||
--hero-cube: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="m21%207.5-9-5.25L3%207.5m18%200-9%205.25m9-5.25v9l-9%205.25M3%207.5l9%205.25M3%207.5v9l9%205.25m0-9v9"/></svg>');
|
||||
-webkit-mask: var(--hero-cube);
|
||||
@ -602,6 +662,18 @@
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-exclamation-circle-mini {
|
||||
--hero-exclamation-circle-mini: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20viewBox="0%200%2020%2020"%20fill="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20fill-rule="evenodd"%20d="M18%2010a8%208%200%201%201-16%200%208%208%200%200%201%2016%200Zm-8-5a.75.75%200%200%201%20.75.75v4.5a.75.75%200%200%201-1.5%200v-4.5A.75.75%200%200%201%2010%205Zm0%2010a1%201%200%201%200%200-2%201%201%200%200%200%200%202Z"%20clip-rule="evenodd"/></svg>');
|
||||
-webkit-mask: var(--hero-exclamation-circle-mini);
|
||||
mask: var(--hero-exclamation-circle-mini);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-exclamation-triangle {
|
||||
--hero-exclamation-triangle: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="M12%209v3.75m-9.303%203.376c-.866%201.5.217%203.374%201.948%203.374h14.71c1.73%200%202.813-1.874%201.948-3.374L13.949%203.378c-.866-1.5-3.032-1.5-3.898%200L2.697%2016.126ZM12%2015.75h.007v.008H12v-.008Z"/></svg>');
|
||||
-webkit-mask: var(--hero-exclamation-triangle);
|
||||
@ -758,6 +830,30 @@
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.hero-lock-closed-mini {
|
||||
--hero-lock-closed-mini: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20viewBox="0%200%2020%2020"%20fill="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20fill-rule="evenodd"%20d="M10%201a4.5%204.5%200%200%200-4.5%204.5V9H5a2%202%200%200%200-2%202v6a2%202%200%200%200%202%202h10a2%202%200%200%200%202-2v-6a2%202%200%200%200-2-2h-.5V5.5A4.5%204.5%200%200%200%2010%201Zm3%208V5.5a3%203%200%201%200-6%200V9h6Z"%20clip-rule="evenodd"/></svg>');
|
||||
-webkit-mask: var(--hero-lock-closed-mini);
|
||||
mask: var(--hero-lock-closed-mini);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-lock-open-mini {
|
||||
--hero-lock-open-mini: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20viewBox="0%200%2020%2020"%20fill="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20fill-rule="evenodd"%20d="M14.5%201A4.5%204.5%200%200%200%2010%205.5V9H3a2%202%200%200%200-2%202v6a2%202%200%200%200%202%202h10a2%202%200%200%200%202-2v-6a2%202%200%200%200-2-2h-1.5V5.5a3%203%200%201%201%206%200v2.75a.75.75%200%200%200%201.5%200V5.5A4.5%204.5%200%200%200%2014.5%201Z"%20clip-rule="evenodd"/></svg>');
|
||||
-webkit-mask: var(--hero-lock-open-mini);
|
||||
mask: var(--hero-lock-open-mini);
|
||||
mask-repeat: no-repeat;
|
||||
background-color: currentColor;
|
||||
vertical-align: middle;
|
||||
display: inline-block;
|
||||
width: 1.25rem;
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.hero-magnifying-glass {
|
||||
--hero-magnifying-glass: url('data:image/svg+xml;utf8,<svg%20xmlns="http://www.w3.org/2000/svg"%20fill="none"%20viewBox="0%200%2024%2024"%20stroke-width="1.5"%20stroke="currentColor"%20aria-hidden="true"%20data-slot="icon">%20%20<path%20stroke-linecap="round"%20stroke-linejoin="round"%20d="m21%2021-5.197-5.197m0%200A7.5%207.5%200%201%200%205.196%205.196a7.5%207.5%200%200%200%2010.607%2010.607Z"/></svg>');
|
||||
-webkit-mask: var(--hero-magnifying-glass);
|
||||
|
||||
@ -932,10 +932,25 @@ const EditorKeyboard = {
|
||||
}
|
||||
}
|
||||
|
||||
// Hook to trigger file downloads from LiveView
|
||||
const Download = {
|
||||
mounted() {
|
||||
this.handleEvent("download", ({filename, content, content_type}) => {
|
||||
const blob = new Blob([Uint8Array.from(atob(content), c => c.charCodeAt(0))], {type: content_type})
|
||||
const url = URL.createObjectURL(blob)
|
||||
const a = document.createElement("a")
|
||||
a.href = url
|
||||
a.download = filename
|
||||
a.click()
|
||||
URL.revokeObjectURL(url)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content")
|
||||
const liveSocket = new LiveSocket("/live", Socket, {
|
||||
params: {_csrf_token: csrfToken, screen_width: window.innerWidth},
|
||||
hooks: {...colocatedHooks, ColorSync, Lightbox, CartPersist, CartDrawer, ProductImageScroll, SearchModal, MobileNavDrawer, CollectionFilters, AnalyticsInit, AnalyticsExport, ChartTooltip, Clipboard, DirtyGuard, EditorKeyboard, EditorSheet},
|
||||
hooks: {...colocatedHooks, ColorSync, Lightbox, CartPersist, CartDrawer, ProductImageScroll, SearchModal, MobileNavDrawer, CollectionFilters, AnalyticsInit, AnalyticsExport, ChartTooltip, Clipboard, DirtyGuard, EditorKeyboard, EditorSheet, Download},
|
||||
})
|
||||
|
||||
// Show progress bar on live navigation and form submits
|
||||
@ -956,6 +971,11 @@ window.addEventListener("phx:scroll-top", () => {
|
||||
window.scrollTo({top: 0, behavior: 'instant'})
|
||||
})
|
||||
|
||||
// Scroll element into view (used by flash messages)
|
||||
window.addEventListener("scroll-into-view", (e) => {
|
||||
e.target.scrollIntoView({behavior: 'smooth', block: 'nearest'})
|
||||
})
|
||||
|
||||
// connect if there are any LiveViews on the page
|
||||
liveSocket.connect()
|
||||
|
||||
|
||||
@ -20,6 +20,15 @@ if System.get_env("PHX_SERVER") do
|
||||
config :berrypod, BerrypodWeb.Endpoint, server: true
|
||||
end
|
||||
|
||||
# SQLCipher encryption key — optional in dev/test, required in prod.
|
||||
# x'...' tells SQLCipher to use the hex directly as the AES key,
|
||||
# skipping PBKDF2 derivation (256k iterations per connection).
|
||||
if config_env() != :prod do
|
||||
if db_key = System.get_env("SECRET_KEY_DB") do
|
||||
config :berrypod, Berrypod.Repo, key: "\"x'#{db_key}'\""
|
||||
end
|
||||
end
|
||||
|
||||
if config_env() == :prod do
|
||||
database_path =
|
||||
System.get_env("DATABASE_PATH") ||
|
||||
@ -28,8 +37,20 @@ if config_env() == :prod do
|
||||
For example: /data/berrypod.db
|
||||
"""
|
||||
|
||||
# Database encryption via SQLCipher (required in production)
|
||||
db_key =
|
||||
System.get_env("SECRET_KEY_DB") ||
|
||||
raise """
|
||||
environment variable SECRET_KEY_DB is missing.
|
||||
This key encrypts the entire database at rest using SQLCipher.
|
||||
You can generate one by calling: mix phx.gen.secret
|
||||
"""
|
||||
|
||||
# x'...' tells SQLCipher to use the hex directly as the AES key,
|
||||
# skipping PBKDF2 derivation (256k iterations per connection).
|
||||
config :berrypod, Berrypod.Repo,
|
||||
database: database_path,
|
||||
key: "\"x'#{db_key}'\"",
|
||||
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "5"),
|
||||
journal_mode: :wal,
|
||||
busy_timeout: 15_000,
|
||||
|
||||
353
docs/plans/database-encryption.md
Normal file
353
docs/plans/database-encryption.md
Normal file
@ -0,0 +1,353 @@
|
||||
# Database encryption at rest
|
||||
|
||||
> Status: Complete (awaiting production deployment)
|
||||
> Tier: 2 (Security / Infrastructure)
|
||||
|
||||
## Goal
|
||||
|
||||
The entire Berrypod shop is a single encrypted SQLite file. Portable, private, encrypted. Copy the file, set your encryption key, and host anywhere.
|
||||
|
||||
## Why
|
||||
|
||||
1. **True encryption at rest** — not just sensitive fields, the entire database
|
||||
2. **Safe backups** — can store backup files anywhere without additional encryption
|
||||
3. **Simple migration** — copy file + set env var = working shop on new server
|
||||
4. **Privacy by design** — even if someone gets the file, data is protected
|
||||
|
||||
## Current state
|
||||
|
||||
- Standard SQLite 3.51.1 (no encryption)
|
||||
- Sensitive fields (API keys, TOTP secrets) encrypted with Cloak.Ecto using `SECRET_KEY_BASE`
|
||||
- exqlite 0.34.0 compiled without SQLCipher
|
||||
|
||||
## Target state
|
||||
|
||||
- SQLCipher-encrypted database file
|
||||
- Encryption key via `SECRET_KEY_DB` environment variable
|
||||
- Existing Cloak encryption remains (defence in depth for secrets)
|
||||
- Safe backup via `VACUUM INTO` works on encrypted database
|
||||
- Admin backup page with database stats and restore
|
||||
|
||||
---
|
||||
|
||||
## Security model
|
||||
|
||||
Two independent secrets, defence in depth:
|
||||
|
||||
| Secret | Purpose | Protects against |
|
||||
|--------|---------|------------------|
|
||||
| `SECRET_KEY_BASE` | Phoenix sessions, Cloak field encryption | SQL access without app secret |
|
||||
| `SECRET_KEY_DB` | SQLCipher whole-database encryption | File access without DB key |
|
||||
|
||||
Both are required for production. If one is compromised, the other layer still protects.
|
||||
|
||||
**SQLCipher spec:**
|
||||
- AES-256 in CBC mode
|
||||
- HMAC-SHA512 per page (tamper detection)
|
||||
- PBKDF2 key derivation (256,000 iterations)
|
||||
- Each page independently encrypted
|
||||
|
||||
---
|
||||
|
||||
## Implementation
|
||||
|
||||
### Phase 1: Install SQLCipher and recompile exqlite
|
||||
|
||||
**Dev machine (Debian/Ubuntu):**
|
||||
```bash
|
||||
# Debian bookworm: use backports for SQLCipher 4.6.1 (stable has 3.4.1 which is too old)
|
||||
sudo apt install -t bookworm-backports libsqlcipher-dev
|
||||
|
||||
# Ubuntu 24.04+: standard repos have a recent enough version
|
||||
sudo apt install libsqlcipher-dev
|
||||
```
|
||||
|
||||
**Dev machine (macOS):**
|
||||
```bash
|
||||
brew install sqlcipher
|
||||
```
|
||||
|
||||
**Set build environment and recompile:**
|
||||
```bash
|
||||
# Tell exqlite to use system SQLCipher instead of bundled SQLite
|
||||
export EXQLITE_USE_SYSTEM=1
|
||||
export EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher"
|
||||
export EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher"
|
||||
|
||||
# Force recompile
|
||||
mix deps.clean exqlite --build
|
||||
mix deps.compile exqlite
|
||||
```
|
||||
|
||||
**Verify SQLCipher is active:**
|
||||
```elixir
|
||||
{:ok, conn} = Exqlite.Basic.open(":memory:")
|
||||
{:ok, _q, result, _c} = Exqlite.Basic.exec(conn, "PRAGMA cipher_version;")
|
||||
# Should return [["4.x.x"]] — if empty, SQLCipher not linked
|
||||
```
|
||||
|
||||
### Phase 2: Configure encryption key
|
||||
|
||||
**Generate keys:**
|
||||
```bash
|
||||
mix phx.gen.secret # → SECRET_KEY_BASE
|
||||
mix phx.gen.secret # → SECRET_KEY_DB
|
||||
```
|
||||
|
||||
**Configure exqlite (runtime.exs):**
|
||||
```elixir
|
||||
# config/runtime.exs
|
||||
|
||||
# Database encryption (optional for dev, required for production)
|
||||
db_key = System.get_env("SECRET_KEY_DB")
|
||||
|
||||
config :berrypod, Berrypod.Repo,
|
||||
database: database_path,
|
||||
key: db_key # nil = unencrypted, string = SQLCipher encryption
|
||||
```
|
||||
|
||||
The `:key` option is native to exqlite — it handles the `PRAGMA key` automatically on connection.
|
||||
|
||||
**Dev mode:** No `SECRET_KEY_DB` set = unencrypted database (easier local development).
|
||||
|
||||
**Production mode:** `SECRET_KEY_DB` required = encrypted database.
|
||||
|
||||
### Phase 3: Fresh database with encryption
|
||||
|
||||
Since we're starting fresh (no migration needed):
|
||||
|
||||
```bash
|
||||
# Delete old unencrypted database
|
||||
rm berrypod_dev.db berrypod_dev.db-shm berrypod_dev.db-wal
|
||||
|
||||
# Start with encryption enabled
|
||||
SECRET_KEY_DB="$(mix phx.gen.secret)" mix ecto.create
|
||||
SECRET_KEY_DB="your-key" mix ecto.migrate
|
||||
SECRET_KEY_DB="your-key" mix phx.server
|
||||
```
|
||||
|
||||
### Phase 4: Fly.io deployment
|
||||
|
||||
**Update Dockerfile:**
|
||||
```dockerfile
|
||||
# Install SQLCipher
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y libsqlcipher-dev && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Build exqlite with system SQLCipher
|
||||
ENV EXQLITE_USE_SYSTEM=1
|
||||
ENV EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher"
|
||||
ENV EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher"
|
||||
```
|
||||
|
||||
**Set the secret:**
|
||||
```bash
|
||||
fly secrets set SECRET_KEY_DB="$(mix phx.gen.secret)"
|
||||
```
|
||||
|
||||
**Deploy:**
|
||||
```bash
|
||||
fly deploy
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Admin backup page
|
||||
|
||||
Route: `/admin/backup`
|
||||
|
||||
### Database stats display
|
||||
|
||||
Show useful context before backup/restore:
|
||||
|
||||
**Overview section:**
|
||||
- Total database size (formatted: "12.3 MB")
|
||||
- Encryption status (SQLCipher version or "Unencrypted")
|
||||
- Database created date
|
||||
- Last backup date (if tracked)
|
||||
|
||||
**Table breakdown:**
|
||||
|
||||
| Table | Rows | Size |
|
||||
|-------|------|------|
|
||||
| products | 16 | 45 KB |
|
||||
| product_variants | 142 | 28 KB |
|
||||
| product_images | 89 | 12 KB |
|
||||
| orders | 23 | 18 KB |
|
||||
| images | 156 | 8.2 MB |
|
||||
| settings | 42 | 4 KB |
|
||||
| ... | | |
|
||||
|
||||
**Key counts:**
|
||||
- Products: 16
|
||||
- Orders: 23
|
||||
- Media files: 156
|
||||
- Newsletter subscribers: 89
|
||||
|
||||
**Queries for stats:**
|
||||
```sql
|
||||
-- Total database size
|
||||
SELECT page_count * page_size as size
|
||||
FROM pragma_page_count(), pragma_page_size();
|
||||
|
||||
-- Row counts per table
|
||||
SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%';
|
||||
-- Then COUNT(*) each
|
||||
|
||||
-- Table sizes (via dbstat virtual table)
|
||||
SELECT name, SUM(pgsize) as size
|
||||
FROM dbstat
|
||||
GROUP BY name
|
||||
ORDER BY size DESC;
|
||||
|
||||
-- SQLCipher version
|
||||
PRAGMA cipher_version;
|
||||
```
|
||||
|
||||
### Download backup
|
||||
|
||||
Use SQLite's `VACUUM INTO` for a safe, consistent backup:
|
||||
|
||||
```elixir
|
||||
def create_backup do
|
||||
timestamp = DateTime.utc_now() |> Calendar.strftime("%Y%m%d-%H%M%S")
|
||||
backup_path = Path.join(System.tmp_dir!(), "berrypod-backup-#{timestamp}.db")
|
||||
|
||||
Ecto.Adapters.SQL.query!(Repo, "VACUUM INTO ?", [backup_path])
|
||||
|
||||
{:ok, backup_path}
|
||||
end
|
||||
```
|
||||
|
||||
The backup file is encrypted with the same key — portable to any server with that key.
|
||||
|
||||
**UI:**
|
||||
- "Download backup" button
|
||||
- Shows estimated file size
|
||||
- Filename: `berrypod-backup-YYYYMMDD-HHMMSS.db`
|
||||
|
||||
### Restore backup
|
||||
|
||||
1. Upload encrypted backup file
|
||||
2. Validate it opens with the current key
|
||||
3. Show comparison: current vs uploaded (row counts, size)
|
||||
4. Confirm with explicit action ("Replace current database")
|
||||
5. Stop accepting requests (maintenance mode)
|
||||
6. Replace database file
|
||||
7. Restart application
|
||||
|
||||
**UI:**
|
||||
- File upload dropzone
|
||||
- Validation feedback (valid/invalid/wrong key)
|
||||
- Side-by-side comparison before restore
|
||||
- Confirmation modal with warnings
|
||||
|
||||
---
|
||||
|
||||
## Task breakdown
|
||||
|
||||
| # | Task | Est | Notes |
|
||||
|---|------|-----|-------|
|
||||
| 1 | ~~Install SQLCipher on dev machine~~ | ✓ | `apt install -t bookworm-backports libsqlcipher-dev` (4.6.1) |
|
||||
| 2 | ~~Set build flags, recompile exqlite~~ | ✓ | Env vars, `mix deps.clean/compile` |
|
||||
| 3 | ~~Verify SQLCipher with `PRAGMA cipher_version`~~ | ✓ | Returns "4.6.1 community" |
|
||||
| 4 | ~~Add `:key` config to runtime.exs~~ | ✓ | Required in prod, optional in dev |
|
||||
| 5 | ~~Test fresh encrypted database~~ | ✓ | Verified encryption works |
|
||||
| 6 | ~~Update Dockerfile for Fly.io~~ | ✓ | Install package, set build flags |
|
||||
| 7 | Deploy encrypted to Fly.io | 15m | Set secret, deploy, verify |
|
||||
| 8 | ~~Database stats context module~~ | ✓ | `Berrypod.Backup` with sizes, counts, encryption status |
|
||||
| 9 | ~~Admin backup page — stats display~~ | ✓ | `/admin/backup` LiveView |
|
||||
| 10 | ~~Admin backup page — download~~ | ✓ | VACUUM INTO, JS download hook |
|
||||
| 11 | ~~Admin backup page — restore upload~~ | ✓ | Upload, validation, comparison |
|
||||
| 12 | ~~Admin backup page — restore action~~ | ✓ | Maintenance mode, swap, restart |
|
||||
| 13 | ~~Update README with key management~~ | ✓ | Document backup procedures |
|
||||
|
||||
**Total: ~8-9 hours**
|
||||
|
||||
---
|
||||
|
||||
## Security notes
|
||||
|
||||
- **Key length:** 256-bit minimum. `mix phx.gen.secret` produces 512-bit which is fine.
|
||||
- **Key storage:** Environment variables only. Never commit to code.
|
||||
- **Key rotation:** Requires re-encrypting entire database. Rare operation.
|
||||
- **Lost key = lost data:** No recovery possible. Document key backup procedures clearly.
|
||||
- **Defence in depth:** Keep Cloak encryption for API keys even with DB encryption.
|
||||
|
||||
---
|
||||
|
||||
## Dev workflow
|
||||
|
||||
For convenience, add to `.envrc` (direnv) or shell profile:
|
||||
|
||||
```bash
|
||||
# Build flags (needed once per machine after installing SQLCipher)
|
||||
export EXQLITE_USE_SYSTEM=1
|
||||
export EXQLITE_SYSTEM_CFLAGS="-I/usr/include/sqlcipher"
|
||||
export EXQLITE_SYSTEM_LDFLAGS="-lsqlcipher"
|
||||
|
||||
# Optional: dev database encryption (or omit for unencrypted dev)
|
||||
# export SECRET_KEY_DB="dev-only-key-not-for-production"
|
||||
```
|
||||
|
||||
### Encrypted dev database
|
||||
|
||||
If you want to test encryption locally:
|
||||
|
||||
```bash
|
||||
export SECRET_KEY_DB="dev-test-key-12345"
|
||||
mix ecto.reset # recreates with encryption
|
||||
mix phx.server
|
||||
```
|
||||
|
||||
### Unencrypted dev database
|
||||
|
||||
For simpler local development, just don't set `SECRET_KEY_DB`. The database will be unencrypted but otherwise identical.
|
||||
|
||||
---
|
||||
|
||||
## Compatibility
|
||||
|
||||
- **Litestream:** Works with SQLCipher. Replicates encrypted bytes to S3.
|
||||
- **sqlite3 CLI:** Use `sqlcipher` CLI to open encrypted databases.
|
||||
- **DB Browser for SQLite:** Supports SQLCipher — enter key when opening.
|
||||
- **Tests:** Run unencrypted (faster) unless specifically testing encryption.
|
||||
|
||||
---
|
||||
|
||||
## Verification checklist
|
||||
|
||||
After implementation, verify:
|
||||
|
||||
```bash
|
||||
# 1. SQLCipher is linked
|
||||
mix run -e '{:ok, c} = Exqlite.Basic.open(":memory:"); {:ok, _, r, _} = Exqlite.Basic.exec(c, "PRAGMA cipher_version;"); IO.inspect(r.rows)'
|
||||
# Should print [["4.x.x"]]
|
||||
|
||||
# 2. Encrypted database is unreadable without key
|
||||
file berrypod_prod.db
|
||||
# Should show "data" not "SQLite 3.x database"
|
||||
|
||||
# 3. Encrypted database opens with key
|
||||
SECRET_KEY_DB="your-key" mix run -e 'Berrypod.Repo.query!("SELECT 1")'
|
||||
# Should succeed
|
||||
|
||||
# 4. Encrypted database fails without key
|
||||
mix run -e 'Berrypod.Repo.query!("SELECT 1")'
|
||||
# Should fail with "file is not a database"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Files to modify
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `config/runtime.exs` | Add `:key` option to Repo config |
|
||||
| `Dockerfile` | Install SQLCipher, set build env vars |
|
||||
| `fly.toml` | (no change, key via secrets) |
|
||||
| `lib/berrypod/backup.ex` | New — backup/restore context |
|
||||
| `lib/berrypod_web/live/admin/backup_live.ex` | New — backup admin page |
|
||||
| `lib/berrypod_web/router.ex` | Add `/admin/backup` route |
|
||||
| `README.md` | Document key management |
|
||||
814
lib/berrypod/backup.ex
Normal file
814
lib/berrypod/backup.ex
Normal file
@ -0,0 +1,814 @@
|
||||
defmodule Berrypod.Backup do
|
||||
@moduledoc """
|
||||
Database backup and restore functionality.
|
||||
|
||||
Provides database statistics, backup creation via VACUUM INTO,
|
||||
backup history management, and restore operations for SQLCipher-encrypted databases.
|
||||
|
||||
Backups are stored in the configured backup directory (default: priv/backups/).
|
||||
Before any restore, an automatic backup of the current database is created.
|
||||
Old backups are automatically pruned to keep the most recent N backups.
|
||||
"""
|
||||
|
||||
alias Berrypod.Repo
|
||||
require Logger
|
||||
|
||||
# Tables to show row counts for in the stats display
|
||||
@key_tables ~w(products product_variants orders images pages newsletter_subscribers)
|
||||
|
||||
# Critical tables that must exist for a valid Berrypod database
|
||||
@required_tables ~w(users settings products orders pages images schema_migrations)
|
||||
|
||||
# Maximum number of backups to keep (configurable via :berrypod, :backup, :max_backups)
|
||||
@default_max_backups 5
|
||||
|
||||
@doc """
|
||||
Returns the directory where backups are stored.
|
||||
Defaults to priv/backups/ but can be configured via :berrypod, :backup, :directory.
|
||||
"""
|
||||
def backup_dir do
|
||||
config = Application.get_env(:berrypod, :backup, [])
|
||||
|
||||
dir =
|
||||
Keyword.get_lazy(config, :directory, fn ->
|
||||
Path.join(:code.priv_dir(:berrypod), "backups")
|
||||
end)
|
||||
|
||||
# Ensure directory exists
|
||||
File.mkdir_p!(dir)
|
||||
dir
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the maximum number of backups to keep.
|
||||
"""
|
||||
def max_backups do
|
||||
config = Application.get_env(:berrypod, :backup, [])
|
||||
Keyword.get(config, :max_backups, @default_max_backups)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists all available backups, sorted by date (newest first).
|
||||
|
||||
Returns a list of maps with:
|
||||
- filename: the backup filename
|
||||
- path: full path to the backup file
|
||||
- size: file size in bytes
|
||||
- created_at: DateTime when the backup was created (parsed from filename)
|
||||
- type: :manual or :pre_restore
|
||||
"""
|
||||
def list_backups do
|
||||
dir = backup_dir()
|
||||
|
||||
dir
|
||||
|> File.ls!()
|
||||
|> Enum.filter(&String.ends_with?(&1, ".db"))
|
||||
|> Enum.map(fn filename ->
|
||||
path = Path.join(dir, filename)
|
||||
stat = File.stat!(path)
|
||||
|
||||
%{
|
||||
filename: filename,
|
||||
path: path,
|
||||
size: stat.size,
|
||||
created_at: parse_backup_timestamp(filename),
|
||||
type: parse_backup_type(filename)
|
||||
}
|
||||
end)
|
||||
|> Enum.sort_by(& &1.created_at, {:desc, DateTime})
|
||||
end
|
||||
|
||||
defp parse_backup_timestamp(filename) do
|
||||
# Expected format: berrypod-backup-YYYYMMDD-HHMMSS.db or pre-restore-YYYYMMDD-HHMMSS.db
|
||||
case Regex.run(~r/(\d{8})-(\d{6})\.db$/, filename) do
|
||||
[_, date, time] ->
|
||||
<<y::binary-4, m::binary-2, d::binary-2>> = date
|
||||
<<hh::binary-2, mm::binary-2, ss::binary-2>> = time
|
||||
|
||||
case NaiveDateTime.new(
|
||||
String.to_integer(y),
|
||||
String.to_integer(m),
|
||||
String.to_integer(d),
|
||||
String.to_integer(hh),
|
||||
String.to_integer(mm),
|
||||
String.to_integer(ss)
|
||||
) do
|
||||
{:ok, naive} -> DateTime.from_naive!(naive, "Etc/UTC")
|
||||
_ -> DateTime.utc_now()
|
||||
end
|
||||
|
||||
_ ->
|
||||
DateTime.utc_now()
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_backup_type(filename) do
|
||||
if String.starts_with?(filename, "pre-restore-") do
|
||||
:pre_restore
|
||||
else
|
||||
:manual
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns comprehensive database statistics.
|
||||
|
||||
Includes:
|
||||
- Total database size
|
||||
- Encryption status (SQLCipher version or nil)
|
||||
- Per-table row counts and sizes
|
||||
- Key entity counts
|
||||
"""
|
||||
def get_stats do
|
||||
%{
|
||||
total_size: get_total_size(),
|
||||
encryption_status: get_encryption_status(),
|
||||
tables: get_table_stats(),
|
||||
key_counts: get_key_counts(),
|
||||
schema_version: get_current_schema_version()
|
||||
}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the total database file size in bytes.
|
||||
"""
|
||||
def get_total_size do
|
||||
case Repo.query(
|
||||
"SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()"
|
||||
) do
|
||||
{:ok, %{rows: [[size]]}} -> size
|
||||
_ -> 0
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the SQLCipher version if encryption is enabled, nil otherwise.
|
||||
"""
|
||||
def get_encryption_status do
|
||||
case Repo.query("PRAGMA cipher_version") do
|
||||
{:ok, %{rows: [[version]]}} when is_binary(version) and version != "" -> version
|
||||
_ -> nil
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns a list of tables with their row counts and sizes.
|
||||
"""
|
||||
def get_table_stats do
|
||||
# Get all user tables (exclude sqlite internals and FTS shadow tables)
|
||||
case Repo.query("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table'
|
||||
AND name NOT LIKE 'sqlite_%'
|
||||
AND name NOT LIKE '%_content'
|
||||
AND name NOT LIKE '%_data'
|
||||
AND name NOT LIKE '%_idx'
|
||||
AND name NOT LIKE '%_docsize'
|
||||
AND name NOT LIKE '%_config'
|
||||
ORDER BY name
|
||||
""") do
|
||||
{:ok, %{rows: tables}} ->
|
||||
process_table_stats(tables)
|
||||
|
||||
{:error, _} ->
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp process_table_stats(tables) do
|
||||
|
||||
table_names = Enum.map(tables, fn [name] -> name end)
|
||||
|
||||
# Get sizes via dbstat if available
|
||||
sizes = get_table_sizes()
|
||||
|
||||
# Get row counts
|
||||
Enum.map(table_names, fn name ->
|
||||
count = get_row_count(name)
|
||||
size = Map.get(sizes, name, 0)
|
||||
|
||||
%{
|
||||
name: name,
|
||||
rows: count,
|
||||
size: size
|
||||
}
|
||||
end)
|
||||
|> Enum.sort_by(& &1.size, :desc)
|
||||
end
|
||||
|
||||
defp get_table_sizes do
|
||||
# Try dbstat first (most accurate, but requires ENABLE_DBSTAT_VTAB compile flag)
|
||||
case Repo.query("""
|
||||
SELECT name, SUM(pgsize) as size
|
||||
FROM dbstat
|
||||
GROUP BY name
|
||||
""") do
|
||||
{:ok, %{rows: rows}} ->
|
||||
Map.new(rows, fn [name, size] -> {name, size || 0} end)
|
||||
|
||||
_ ->
|
||||
# Fallback: estimate sizes by summing column data lengths
|
||||
# This gives a reasonable approximation for display purposes
|
||||
estimate_table_sizes()
|
||||
end
|
||||
end
|
||||
|
||||
defp estimate_table_sizes do
|
||||
# Get all user tables
|
||||
case Repo.query("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table'
|
||||
AND name NOT LIKE 'sqlite_%'
|
||||
AND name NOT LIKE '%_content'
|
||||
AND name NOT LIKE '%_data'
|
||||
AND name NOT LIKE '%_idx'
|
||||
AND name NOT LIKE '%_docsize'
|
||||
AND name NOT LIKE '%_config'
|
||||
""") do
|
||||
{:ok, %{rows: tables}} ->
|
||||
tables
|
||||
|> Enum.map(fn [name] -> {name, estimate_table_size(name)} end)
|
||||
|> Enum.into(%{})
|
||||
|
||||
_ ->
|
||||
%{}
|
||||
end
|
||||
end
|
||||
|
||||
defp estimate_table_size(table_name) do
|
||||
# Get column names for this table
|
||||
case Repo.query("PRAGMA table_info(\"#{table_name}\")") do
|
||||
{:ok, %{rows: columns}} when columns != [] ->
|
||||
column_names = Enum.map(columns, fn [_cid, name | _] -> name end)
|
||||
|
||||
# Build a query that sums the length of all columns
|
||||
# Using COALESCE and length() for text, or 8 bytes for numeric types
|
||||
length_exprs =
|
||||
column_names
|
||||
|> Enum.map(fn col ->
|
||||
"COALESCE(LENGTH(CAST(\"#{col}\" AS BLOB)), 0)"
|
||||
end)
|
||||
|> Enum.join(" + ")
|
||||
|
||||
query = "SELECT SUM(#{length_exprs}) FROM \"#{table_name}\""
|
||||
|
||||
case Repo.query(query) do
|
||||
{:ok, %{rows: [[size]]}} when is_integer(size) -> size
|
||||
{:ok, %{rows: [[size]]}} when is_float(size) -> round(size)
|
||||
_ -> 0
|
||||
end
|
||||
|
||||
_ ->
|
||||
0
|
||||
end
|
||||
end
|
||||
|
||||
defp get_row_count(table_name) do
|
||||
# Safe since table_name comes from sqlite_master
|
||||
case Repo.query("SELECT COUNT(*) FROM \"#{table_name}\"") do
|
||||
{:ok, %{rows: [[count]]}} -> count
|
||||
_ -> 0
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns counts for key entities (products, orders, etc).
|
||||
"""
|
||||
def get_key_counts do
|
||||
@key_tables
|
||||
|> Enum.map(fn table ->
|
||||
{table, get_row_count(table)}
|
||||
end)
|
||||
|> Enum.into(%{})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Creates a backup of the database using VACUUM INTO.
|
||||
|
||||
Returns `{:ok, backup_path}` on success.
|
||||
The backup is encrypted with the same key as the source database.
|
||||
"""
|
||||
def create_backup(opts \\ []) do
|
||||
prefix = Keyword.get(opts, :prefix, "berrypod-backup")
|
||||
save_to_history = Keyword.get(opts, :save_to_history, true)
|
||||
|
||||
timestamp = DateTime.utc_now() |> Calendar.strftime("%Y%m%d-%H%M%S")
|
||||
filename = "#{prefix}-#{timestamp}.db"
|
||||
|
||||
backup_path =
|
||||
if save_to_history do
|
||||
Path.join(backup_dir(), filename)
|
||||
else
|
||||
Path.join(System.tmp_dir!(), filename)
|
||||
end
|
||||
|
||||
case Repo.query("VACUUM INTO ?", [backup_path]) do
|
||||
{:ok, _} ->
|
||||
if save_to_history, do: prune_old_backups()
|
||||
{:ok, backup_path}
|
||||
|
||||
{:error, error} ->
|
||||
{:error, error}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Creates a pre-restore backup of the current database.
|
||||
This is automatically called before any restore operation.
|
||||
"""
|
||||
def create_pre_restore_backup do
|
||||
create_backup(prefix: "pre-restore", save_to_history: true)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Deletes old backups, keeping only the most recent N backups.
|
||||
"""
|
||||
def prune_old_backups do
|
||||
max = max_backups()
|
||||
backups = list_backups()
|
||||
|
||||
if length(backups) > max do
|
||||
backups
|
||||
|> Enum.drop(max)
|
||||
|> Enum.each(fn backup ->
|
||||
Logger.info("[Backup] Pruning old backup: #{backup.filename}")
|
||||
File.rm(backup.path)
|
||||
end)
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@doc """
|
||||
Deletes a specific backup by filename.
|
||||
"""
|
||||
def delete_backup(filename) do
|
||||
path = Path.join(backup_dir(), filename)
|
||||
|
||||
if File.exists?(path) and String.ends_with?(filename, ".db") do
|
||||
File.rm(path)
|
||||
else
|
||||
{:error, :not_found}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Validates that a backup file can be opened and is a valid Berrypod database.
|
||||
|
||||
Performs comprehensive checks:
|
||||
1. File can be opened with current encryption key
|
||||
2. Contains required tables (users, settings, products, etc.)
|
||||
3. Has schema_migrations table with valid versions
|
||||
4. Integrity check passes
|
||||
|
||||
Returns:
|
||||
- `{:ok, validation_result}` with detailed stats and validation info
|
||||
- `{:error, reason}` with specific error details
|
||||
"""
|
||||
def validate_backup(path) do
|
||||
config = Application.get_env(:berrypod, Berrypod.Repo)
|
||||
key = Keyword.get(config, :key)
|
||||
|
||||
case Exqlite.Sqlite3.open(path, mode: :readonly) do
|
||||
{:ok, conn} ->
|
||||
result = validate_backup_connection(conn, key)
|
||||
Exqlite.Sqlite3.close(conn)
|
||||
result
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, {:open_failed, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_backup_connection(conn, key) do
|
||||
# Set the encryption key if we have one
|
||||
if key do
|
||||
case Exqlite.Sqlite3.execute(conn, "PRAGMA key = #{key}") do
|
||||
:ok -> :ok
|
||||
{:error, reason} -> throw({:error, {:key_failed, reason}})
|
||||
end
|
||||
end
|
||||
|
||||
# Try to read from the database (will fail if wrong key)
|
||||
case Exqlite.Sqlite3.execute(conn, "SELECT COUNT(*) FROM sqlite_master") do
|
||||
:ok -> :ok
|
||||
{:error, "file is not a database"} -> throw({:error, :invalid_key})
|
||||
{:error, reason} -> throw({:error, {:read_failed, reason}})
|
||||
end
|
||||
|
||||
# Run integrity check
|
||||
case run_integrity_check(conn) do
|
||||
:ok -> :ok
|
||||
{:error, reason} -> throw({:error, {:integrity_failed, reason}})
|
||||
end
|
||||
|
||||
# Check required tables exist
|
||||
case check_required_tables(conn) do
|
||||
:ok -> :ok
|
||||
{:error, missing} -> throw({:error, {:missing_tables, missing}})
|
||||
end
|
||||
|
||||
# Check schema migrations
|
||||
case check_schema_migrations(conn) do
|
||||
{:ok, migration_info} -> migration_info
|
||||
{:error, reason} -> throw({:error, {:migrations_failed, reason}})
|
||||
end
|
||||
|
||||
# Get comprehensive stats
|
||||
stats = get_backup_stats(conn)
|
||||
{:ok, stats}
|
||||
catch
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
|
||||
defp run_integrity_check(conn) do
|
||||
{:ok, stmt} = Exqlite.Sqlite3.prepare(conn, "PRAGMA integrity_check(1)")
|
||||
|
||||
case Exqlite.Sqlite3.step(conn, stmt) do
|
||||
{:row, ["ok"]} ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
:ok
|
||||
|
||||
{:row, [error]} ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
{:error, error}
|
||||
|
||||
{:error, reason} ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp check_required_tables(conn) do
|
||||
{:ok, stmt} =
|
||||
Exqlite.Sqlite3.prepare(conn, "SELECT name FROM sqlite_master WHERE type='table'")
|
||||
|
||||
tables = collect_rows(conn, stmt, [])
|
||||
table_names = Enum.map(tables, fn [name] -> name end)
|
||||
|
||||
missing = @required_tables -- table_names
|
||||
|
||||
if Enum.empty?(missing) do
|
||||
:ok
|
||||
else
|
||||
{:error, missing}
|
||||
end
|
||||
end
|
||||
|
||||
defp check_schema_migrations(conn) do
|
||||
{:ok, stmt} =
|
||||
Exqlite.Sqlite3.prepare(
|
||||
conn,
|
||||
"SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1"
|
||||
)
|
||||
|
||||
case Exqlite.Sqlite3.step(conn, stmt) do
|
||||
{:row, [latest_version]} ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
{:ok, %{latest_migration: latest_version}}
|
||||
|
||||
:done ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
{:error, "no migrations found"}
|
||||
|
||||
{:error, reason} ->
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp collect_rows(conn, stmt, acc) do
|
||||
case Exqlite.Sqlite3.step(conn, stmt) do
|
||||
{:row, row} -> collect_rows(conn, stmt, [row | acc])
|
||||
:done -> Exqlite.Sqlite3.release(conn, stmt) && Enum.reverse(acc)
|
||||
end
|
||||
end
|
||||
|
||||
defp get_backup_stats(conn) do
|
||||
# Get table count (excluding sqlite internals and FTS shadow tables, same as get_table_stats)
|
||||
{:ok, stmt} =
|
||||
Exqlite.Sqlite3.prepare(conn, """
|
||||
SELECT COUNT(*) FROM sqlite_master
|
||||
WHERE type='table'
|
||||
AND name NOT LIKE 'sqlite_%'
|
||||
AND name NOT LIKE '%_content'
|
||||
AND name NOT LIKE '%_data'
|
||||
AND name NOT LIKE '%_idx'
|
||||
AND name NOT LIKE '%_docsize'
|
||||
AND name NOT LIKE '%_config'
|
||||
""")
|
||||
|
||||
{:row, [table_count]} = Exqlite.Sqlite3.step(conn, stmt)
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
|
||||
# Get page count for size estimate
|
||||
{:ok, stmt} =
|
||||
Exqlite.Sqlite3.prepare(
|
||||
conn,
|
||||
"SELECT page_count * page_size FROM pragma_page_count(), pragma_page_size()"
|
||||
)
|
||||
|
||||
{:row, [size]} = Exqlite.Sqlite3.step(conn, stmt)
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
|
||||
# Get latest migration version
|
||||
{:ok, stmt} =
|
||||
Exqlite.Sqlite3.prepare(
|
||||
conn,
|
||||
"SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1"
|
||||
)
|
||||
|
||||
latest_migration =
|
||||
case Exqlite.Sqlite3.step(conn, stmt) do
|
||||
{:row, [version]} -> version
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
|
||||
# Get key entity counts
|
||||
key_counts = get_backup_key_counts(conn)
|
||||
|
||||
%{
|
||||
table_count: table_count,
|
||||
size: size,
|
||||
latest_migration: latest_migration,
|
||||
key_counts: key_counts
|
||||
}
|
||||
end
|
||||
|
||||
defp get_backup_key_counts(conn) do
|
||||
@key_tables
|
||||
|> Enum.map(fn table ->
|
||||
count =
|
||||
case Exqlite.Sqlite3.prepare(conn, "SELECT COUNT(*) FROM \"#{table}\"") do
|
||||
{:ok, stmt} ->
|
||||
result =
|
||||
case Exqlite.Sqlite3.step(conn, stmt) do
|
||||
{:row, [count]} -> count
|
||||
_ -> 0
|
||||
end
|
||||
|
||||
Exqlite.Sqlite3.release(conn, stmt)
|
||||
result
|
||||
|
||||
_ ->
|
||||
0
|
||||
end
|
||||
|
||||
{table, count}
|
||||
end)
|
||||
|> Enum.into(%{})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Restores a database from a backup file.
|
||||
|
||||
This performs a full file-based restore by:
|
||||
|
||||
1. Validating the backup (including schema version match)
|
||||
2. Stopping Oban completely
|
||||
3. Checkpointing WAL and draining all database connections
|
||||
4. Stopping the Repo
|
||||
5. Replacing the database file
|
||||
6. Restarting the Repo
|
||||
7. Restarting Oban
|
||||
8. Clearing and warming caches
|
||||
|
||||
Returns `:ok` on success, `{:error, reason}` on failure.
|
||||
"""
|
||||
def restore_backup(backup_path) do
|
||||
config = Application.get_env(:berrypod, Berrypod.Repo)
|
||||
db_path = Keyword.fetch!(config, :database)
|
||||
|
||||
Logger.info("[Backup] Starting database restore from #{backup_path}")
|
||||
|
||||
with :ok <- validate_backup_before_restore(backup_path),
|
||||
{:ok, pre_restore_path} <- create_pre_restore_backup(),
|
||||
:ok <- broadcast_maintenance_mode(:entering),
|
||||
:ok <- stop_oban(),
|
||||
:ok <- drain_and_stop_repo(),
|
||||
:ok <- swap_database_file(backup_path, db_path),
|
||||
:ok <- start_repo(),
|
||||
:ok <- start_oban(),
|
||||
:ok <- clear_ets_caches(),
|
||||
:ok <- warm_caches() do
|
||||
broadcast_maintenance_mode(:exited)
|
||||
Logger.info("[Backup] Database restore completed successfully")
|
||||
Logger.info("[Backup] Pre-restore backup saved: #{pre_restore_path}")
|
||||
:ok
|
||||
else
|
||||
{:error, reason} ->
|
||||
Logger.error("[Backup] Restore failed: #{inspect(reason)}")
|
||||
# Try to recover
|
||||
start_repo()
|
||||
start_oban()
|
||||
broadcast_maintenance_mode(:exited)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_oban do
|
||||
# Terminate Oban child from the application supervisor
|
||||
# This stops all Oban processes including plugins and reporters
|
||||
try do
|
||||
Supervisor.terminate_child(Berrypod.Supervisor, Oban)
|
||||
catch
|
||||
_, _ -> :ok
|
||||
end
|
||||
|
||||
# Give processes time to fully terminate
|
||||
Process.sleep(500)
|
||||
:ok
|
||||
end
|
||||
|
||||
defp start_oban do
|
||||
# Restart Oban child in the application supervisor
|
||||
try do
|
||||
Supervisor.restart_child(Berrypod.Supervisor, Oban)
|
||||
catch
|
||||
_, _ -> :ok
|
||||
end
|
||||
|
||||
# Wait for Oban to be ready
|
||||
Process.sleep(500)
|
||||
:ok
|
||||
end
|
||||
|
||||
defp drain_and_stop_repo do
|
||||
# First checkpoint WAL while we still have connections
|
||||
try do
|
||||
Repo.query!("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
rescue
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
# Disconnect all connections - this forces them to close gracefully
|
||||
Ecto.Adapters.SQL.disconnect_all(Repo, 0)
|
||||
|
||||
# Give connections time to release their file handles
|
||||
Process.sleep(500)
|
||||
|
||||
# Use GenServer.stop which properly shuts down the pool
|
||||
repo_pid = Process.whereis(Repo)
|
||||
|
||||
if repo_pid do
|
||||
ref = Process.monitor(repo_pid)
|
||||
|
||||
# GenServer.stop sends a :stop call which is handled gracefully
|
||||
try do
|
||||
GenServer.stop(repo_pid, :normal, 10_000)
|
||||
catch
|
||||
:exit, _ -> :ok
|
||||
end
|
||||
|
||||
# Wait for the process to actually terminate
|
||||
receive do
|
||||
{:DOWN, ^ref, :process, ^repo_pid, _reason} -> :ok
|
||||
after
|
||||
5000 -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
# Wait for file handles to be fully released by the OS
|
||||
Process.sleep(500)
|
||||
:ok
|
||||
end
|
||||
|
||||
defp start_repo do
|
||||
# The supervisor will restart the Repo automatically since we stopped it
|
||||
# Wait for it to come back up
|
||||
wait_for_repo(100)
|
||||
end
|
||||
|
||||
defp wait_for_repo(0), do: {:error, :repo_start_timeout}
|
||||
|
||||
defp wait_for_repo(attempts) do
|
||||
case Process.whereis(Repo) do
|
||||
nil ->
|
||||
Process.sleep(100)
|
||||
wait_for_repo(attempts - 1)
|
||||
|
||||
_pid ->
|
||||
# Give it a moment to fully initialize the connection pool
|
||||
Process.sleep(200)
|
||||
|
||||
# Verify we can actually query
|
||||
try do
|
||||
case Repo.query("SELECT 1") do
|
||||
{:ok, _} -> :ok
|
||||
{:error, _} ->
|
||||
Process.sleep(100)
|
||||
wait_for_repo(attempts - 1)
|
||||
end
|
||||
catch
|
||||
_, _ ->
|
||||
Process.sleep(100)
|
||||
wait_for_repo(attempts - 1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp swap_database_file(backup_path, db_path) do
|
||||
# Remove WAL and SHM files if they exist (they're part of the old database state)
|
||||
File.rm("#{db_path}-wal")
|
||||
File.rm("#{db_path}-shm")
|
||||
|
||||
# Replace the database file
|
||||
case File.cp(backup_path, db_path) do
|
||||
:ok ->
|
||||
File.rm(backup_path)
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, {:file_copy_failed, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_backup_before_restore(path) do
|
||||
case validate_backup(path) do
|
||||
{:ok, backup_stats} ->
|
||||
# Check schema versions match
|
||||
current_version = get_current_schema_version()
|
||||
|
||||
if backup_stats.latest_migration == current_version do
|
||||
:ok
|
||||
else
|
||||
{:error, {:schema_mismatch, backup_stats.latest_migration, current_version}}
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, {:validation_failed, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp get_current_schema_version do
|
||||
case Repo.query("SELECT version FROM schema_migrations ORDER BY version DESC LIMIT 1") do
|
||||
{:ok, %{rows: [[version]]}} -> version
|
||||
_ -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp broadcast_maintenance_mode(status) do
|
||||
# Broadcast to all connected LiveViews that maintenance is happening
|
||||
Phoenix.PubSub.broadcast(Berrypod.PubSub, "maintenance", {:maintenance, status})
|
||||
:ok
|
||||
end
|
||||
|
||||
|
||||
defp clear_ets_caches do
|
||||
# Clear known ETS caches to ensure they get rebuilt from the new database
|
||||
caches = [
|
||||
Berrypod.Theme.CSSCache,
|
||||
Berrypod.Pages.PageCache,
|
||||
Berrypod.Redirects.Cache
|
||||
]
|
||||
|
||||
for cache <- caches do
|
||||
try do
|
||||
:ets.delete_all_objects(cache)
|
||||
rescue
|
||||
ArgumentError -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp warm_caches do
|
||||
# Warm up caches after restore
|
||||
try do
|
||||
Berrypod.Pages.PageCache.warm()
|
||||
rescue
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
try do
|
||||
Berrypod.Redirects.warm_cache()
|
||||
rescue
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
try do
|
||||
Berrypod.Theme.CSSCache.warm()
|
||||
rescue
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
@doc """
|
||||
Formats a byte size into a human-readable string.
|
||||
"""
|
||||
def format_size(bytes) when is_integer(bytes) do
|
||||
cond do
|
||||
bytes >= 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824, 1)} GB"
|
||||
bytes >= 1_048_576 -> "#{Float.round(bytes / 1_048_576, 1)} MB"
|
||||
bytes >= 1024 -> "#{Float.round(bytes / 1024, 1)} KB"
|
||||
true -> "#{bytes} B"
|
||||
end
|
||||
end
|
||||
|
||||
def format_size(_), do: "0 B"
|
||||
end
|
||||
@ -177,6 +177,14 @@
|
||||
<.icon name="hero-arrow-uturn-right" class="size-5" /> Redirects
|
||||
</.link>
|
||||
</li>
|
||||
<li>
|
||||
<.link
|
||||
navigate={~p"/admin/backup"}
|
||||
class={admin_nav_active?(@current_path, "/admin/backup")}
|
||||
>
|
||||
<.icon name="hero-circle-stack" class="size-5" /> Backup
|
||||
</.link>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
592
lib/berrypod_web/live/admin/backup.ex
Normal file
592
lib/berrypod_web/live/admin/backup.ex
Normal file
@ -0,0 +1,592 @@
|
||||
defmodule BerrypodWeb.Admin.Backup do
|
||||
use BerrypodWeb, :live_view
|
||||
|
||||
alias Berrypod.Backup
|
||||
|
||||
@impl true
|
||||
def mount(_params, _session, socket) do
|
||||
stats = Backup.get_stats()
|
||||
backups = Backup.list_backups()
|
||||
|
||||
{:ok,
|
||||
socket
|
||||
|> assign(:page_title, "Backup")
|
||||
|> assign(:stats, stats)
|
||||
|> assign(:backups, backups)
|
||||
|> assign(:create_backup_status, :idle)
|
||||
|> assign(:uploaded_backup, nil)
|
||||
|> assign(:upload_error, nil)
|
||||
|> assign(:confirming_restore, false)
|
||||
|> assign(:restoring, false)
|
||||
|> assign(:confirming_history_restore, nil)
|
||||
|> assign(:confirming_delete, nil)
|
||||
|> assign(:show_tables, false)
|
||||
|> allow_upload(:backup,
|
||||
accept: :any,
|
||||
max_entries: 1,
|
||||
max_file_size: 500_000_000
|
||||
)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_event("refresh_stats", _params, socket) do
|
||||
stats = Backup.get_stats()
|
||||
backups = Backup.list_backups()
|
||||
{:noreply, socket |> assign(:stats, stats) |> assign(:backups, backups)}
|
||||
end
|
||||
|
||||
def handle_event("toggle_tables", _params, socket) do
|
||||
{:noreply, assign(socket, :show_tables, !socket.assigns.show_tables)}
|
||||
end
|
||||
|
||||
def handle_event("create_backup", _params, socket) do
|
||||
case Backup.create_backup() do
|
||||
{:ok, _path} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:backups, Backup.list_backups())
|
||||
|> assign(:create_backup_status, :saved)}
|
||||
|
||||
{:error, error} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:create_backup_status, :error)
|
||||
|> put_flash(:error, "Failed to create backup: #{inspect(error)}")}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_event("download_history_backup", %{"filename" => filename}, socket) do
|
||||
path = Path.join(Backup.backup_dir(), filename)
|
||||
|
||||
if File.exists?(path) do
|
||||
data = File.read!(path)
|
||||
|
||||
{:noreply,
|
||||
socket
|
||||
|> push_event("download", %{
|
||||
filename: filename,
|
||||
content: Base.encode64(data),
|
||||
content_type: "application/octet-stream"
|
||||
})}
|
||||
else
|
||||
{:noreply, put_flash(socket, :error, "Backup file not found")}
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
def handle_event("validate_upload", _params, socket) do
|
||||
{:noreply, socket}
|
||||
end
|
||||
|
||||
def handle_event("upload_backup", _params, socket) do
|
||||
[result] =
|
||||
consume_uploaded_entries(socket, :backup, fn %{path: path}, _entry ->
|
||||
# Copy to temp location since consume deletes the original
|
||||
temp_path = Path.join(System.tmp_dir!(), "berrypod-restore-#{System.unique_integer()}.db")
|
||||
File.cp!(path, temp_path)
|
||||
|
||||
case Backup.validate_backup(temp_path) do
|
||||
{:ok, backup_stats} ->
|
||||
# Use actual file size instead of internal page calculation
|
||||
file_size = File.stat!(temp_path).size
|
||||
{:ok, {:ok, temp_path, Map.put(backup_stats, :file_size, file_size)}}
|
||||
|
||||
{:error, reason} ->
|
||||
File.rm(temp_path)
|
||||
{:ok, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
|
||||
case result do
|
||||
{:ok, path, backup_stats} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:uploaded_backup, %{path: path, stats: backup_stats})
|
||||
|> assign(:upload_error, nil)}
|
||||
|
||||
{:error, :invalid_key} ->
|
||||
{:noreply,
|
||||
assign(
|
||||
socket,
|
||||
:upload_error,
|
||||
"Wrong encryption key — this backup was created with a different key"
|
||||
)}
|
||||
|
||||
{:error, reason} ->
|
||||
{:noreply, assign(socket, :upload_error, "Invalid backup file: #{inspect(reason)}")}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_event("cancel_restore", _params, socket) do
|
||||
# Clean up temp file
|
||||
if socket.assigns.uploaded_backup do
|
||||
File.rm(socket.assigns.uploaded_backup.path)
|
||||
end
|
||||
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:uploaded_backup, nil)
|
||||
|> assign(:confirming_restore, false)}
|
||||
end
|
||||
|
||||
def handle_event("confirm_restore", _params, socket) do
|
||||
{:noreply, assign(socket, :confirming_restore, true)}
|
||||
end
|
||||
|
||||
def handle_event("execute_restore", _params, socket) do
|
||||
# Show loading state immediately, then do the restore async
|
||||
send(self(), :do_restore)
|
||||
{:noreply, assign(socket, :restoring, true)}
|
||||
end
|
||||
|
||||
# Backup history actions
|
||||
def handle_event("confirm_history_restore", %{"filename" => filename}, socket) do
|
||||
{:noreply, assign(socket, :confirming_history_restore, filename)}
|
||||
end
|
||||
|
||||
def handle_event("cancel_history_restore", _params, socket) do
|
||||
{:noreply, assign(socket, :confirming_history_restore, nil)}
|
||||
end
|
||||
|
||||
def handle_event("execute_history_restore", %{"filename" => filename}, socket) do
|
||||
send(self(), {:do_history_restore, filename})
|
||||
{:noreply, socket |> assign(:restoring, true) |> assign(:confirming_history_restore, nil)}
|
||||
end
|
||||
|
||||
def handle_event("confirm_delete", %{"filename" => filename}, socket) do
|
||||
{:noreply, assign(socket, :confirming_delete, filename)}
|
||||
end
|
||||
|
||||
def handle_event("cancel_delete", _params, socket) do
|
||||
{:noreply, assign(socket, :confirming_delete, nil)}
|
||||
end
|
||||
|
||||
def handle_event("execute_delete", %{"filename" => filename}, socket) do
|
||||
case Backup.delete_backup(filename) do
|
||||
:ok ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:confirming_delete, nil)
|
||||
|> assign(:backups, Backup.list_backups())
|
||||
|> put_flash(:info, "Backup deleted")}
|
||||
|
||||
{:error, _} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:confirming_delete, nil)
|
||||
|> put_flash(:error, "Failed to delete backup")}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:do_restore, socket) do
|
||||
backup_path = socket.assigns.uploaded_backup.path
|
||||
|
||||
case Backup.restore_backup(backup_path) do
|
||||
:ok ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:uploaded_backup, nil)
|
||||
|> assign(:confirming_restore, false)
|
||||
|> assign(:restoring, false)
|
||||
|> assign(:stats, Backup.get_stats())
|
||||
|> assign(:backups, Backup.list_backups())
|
||||
|> put_flash(:info, "Database restored successfully")}
|
||||
|
||||
{:error, {:schema_mismatch, backup_version, current_version}} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:confirming_restore, false)
|
||||
|> assign(:restoring, false)
|
||||
|> put_flash(
|
||||
:error,
|
||||
"Schema version mismatch: backup is #{backup_version}, current is #{current_version}. " <>
|
||||
"Backups can only be restored to a database with the same schema version."
|
||||
)}
|
||||
|
||||
{:error, reason} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:confirming_restore, false)
|
||||
|> assign(:restoring, false)
|
||||
|> put_flash(:error, "Restore failed: #{inspect(reason)}")}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_info({:do_history_restore, filename}, socket) do
|
||||
path = Path.join(Backup.backup_dir(), filename)
|
||||
|
||||
case Backup.restore_backup(path) do
|
||||
:ok ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:restoring, false)
|
||||
|> assign(:stats, Backup.get_stats())
|
||||
|> assign(:backups, Backup.list_backups())
|
||||
|> put_flash(:info, "Database restored from #{filename}")}
|
||||
|
||||
{:error, {:schema_mismatch, backup_version, current_version}} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:restoring, false)
|
||||
|> put_flash(
|
||||
:error,
|
||||
"Schema version mismatch: backup is #{backup_version}, current is #{current_version}."
|
||||
)}
|
||||
|
||||
{:error, reason} ->
|
||||
{:noreply,
|
||||
socket
|
||||
|> assign(:restoring, false)
|
||||
|> put_flash(:error, "Restore failed: #{inspect(reason)}")}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def render(assigns) do
|
||||
~H"""
|
||||
<div class="admin-backup" phx-hook="Download" id="backup-page">
|
||||
<.header>
|
||||
Backup
|
||||
</.header>
|
||||
|
||||
<%!-- Database status --%>
|
||||
<section class="admin-section">
|
||||
<div class="admin-section-header">
|
||||
<h2 class="admin-section-title">Database</h2>
|
||||
<%= if @stats.encryption_status do %>
|
||||
<.status_pill color="green">
|
||||
<.icon name="hero-lock-closed-mini" class="size-3" /> Encrypted
|
||||
</.status_pill>
|
||||
<% else %>
|
||||
<.status_pill color="amber">
|
||||
<.icon name="hero-lock-open-mini" class="size-3" /> Not encrypted
|
||||
</.status_pill>
|
||||
<% end %>
|
||||
</div>
|
||||
<p class="admin-section-desc">
|
||||
{Backup.format_size(@stats.total_size)} total ·
|
||||
{length(@stats.tables)} tables ·
|
||||
{@stats.key_counts["products"] || 0} products ·
|
||||
{@stats.key_counts["orders"] || 0} orders ·
|
||||
{@stats.key_counts["images"] || 0} images
|
||||
</p>
|
||||
<div class="admin-section-body">
|
||||
<button
|
||||
type="button"
|
||||
phx-click="toggle_tables"
|
||||
class="admin-link"
|
||||
>
|
||||
<%= if @show_tables do %>
|
||||
<.icon name="hero-chevron-up-mini" class="size-4" /> Hide table details
|
||||
<% else %>
|
||||
<.icon name="hero-chevron-down-mini" class="size-4" /> Show table details
|
||||
<% end %>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<%= if @show_tables do %>
|
||||
<div class="backup-tables">
|
||||
<table class="admin-table admin-table-compact">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Table</th>
|
||||
<th class="text-right">Rows</th>
|
||||
<th class="text-right">Size</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr :for={table <- @stats.tables}>
|
||||
<td>{table.name}</td>
|
||||
<td class="text-right tabular-nums">{table.rows}</td>
|
||||
<td class="text-right tabular-nums">{Backup.format_size(table.size)}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<% end %>
|
||||
</section>
|
||||
|
||||
<%!-- Create backup --%>
|
||||
<section class="admin-section">
|
||||
<div class="admin-section-header">
|
||||
<h2 class="admin-section-title">Create backup</h2>
|
||||
<.status_pill color="zinc">{length(@backups)} saved</.status_pill>
|
||||
</div>
|
||||
<p class="admin-section-desc">
|
||||
Creates an encrypted snapshot of your database. Backups are stored locally and the last 5 are kept automatically.
|
||||
</p>
|
||||
|
||||
<div class="admin-section-body">
|
||||
<div class="backup-actions">
|
||||
<button
|
||||
type="button"
|
||||
phx-click="create_backup"
|
||||
class="admin-btn admin-btn-primary admin-btn-sm"
|
||||
>
|
||||
<.icon name="hero-plus-mini" class="size-4" /> Create backup
|
||||
</button>
|
||||
<.inline_feedback status={@create_backup_status} />
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<%!-- Backup history --%>
|
||||
<%= if @backups != [] do %>
|
||||
<section class="admin-section">
|
||||
<h2 class="admin-section-title">Saved backups</h2>
|
||||
|
||||
<%= if @restoring do %>
|
||||
<div class="backup-progress">
|
||||
<.icon name="hero-arrow-path" class="size-5 animate-spin" />
|
||||
<div>
|
||||
<p class="backup-progress-text">Restoring database...</p>
|
||||
<p class="backup-progress-hint">This may take a few seconds.</p>
|
||||
</div>
|
||||
</div>
|
||||
<% else %>
|
||||
<div class="backup-list">
|
||||
<%= for backup <- @backups do %>
|
||||
<div class="backup-item">
|
||||
<div class="backup-item-info">
|
||||
<span class="backup-item-date">{format_backup_date(backup.created_at)}</span>
|
||||
<span class="backup-item-meta">
|
||||
{Backup.format_size(backup.size)}
|
||||
<%= if backup.type == :pre_restore do %>
|
||||
· auto-saved before restore
|
||||
<% end %>
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div class="backup-item-actions">
|
||||
<%= if @confirming_history_restore == backup.filename do %>
|
||||
<span class="backup-item-confirm">Replace current database?</span>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-danger admin-btn-sm"
|
||||
phx-click="execute_history_restore"
|
||||
phx-value-filename={backup.filename}
|
||||
>
|
||||
Restore
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-outline admin-btn-sm"
|
||||
phx-click="cancel_history_restore"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<% else %>
|
||||
<%= if @confirming_delete == backup.filename do %>
|
||||
<span class="backup-item-confirm">Delete this backup?</span>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-danger admin-btn-sm"
|
||||
phx-click="execute_delete"
|
||||
phx-value-filename={backup.filename}
|
||||
>
|
||||
Delete
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-outline admin-btn-sm"
|
||||
phx-click="cancel_delete"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<% else %>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-outline admin-btn-sm"
|
||||
phx-click="download_history_backup"
|
||||
phx-value-filename={backup.filename}
|
||||
title="Download"
|
||||
>
|
||||
<.icon name="hero-arrow-down-tray-mini" class="size-4" />
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-outline admin-btn-sm"
|
||||
phx-click="confirm_history_restore"
|
||||
phx-value-filename={backup.filename}
|
||||
>
|
||||
Restore
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="admin-btn admin-btn-outline admin-btn-sm"
|
||||
phx-click="confirm_delete"
|
||||
phx-value-filename={backup.filename}
|
||||
title="Delete"
|
||||
>
|
||||
<.icon name="hero-trash-mini" class="size-4" />
|
||||
</button>
|
||||
<% end %>
|
||||
<% end %>
|
||||
</div>
|
||||
</div>
|
||||
<% end %>
|
||||
</div>
|
||||
<% end %>
|
||||
</section>
|
||||
<% end %>
|
||||
|
||||
<%!-- Restore from file --%>
|
||||
<section class="admin-section">
|
||||
<h2 class="admin-section-title">Restore from file</h2>
|
||||
<p class="admin-section-desc">
|
||||
Upload a backup file to restore. Must be encrypted with the same key as this database.
|
||||
</p>
|
||||
|
||||
<%= if @upload_error do %>
|
||||
<p class="admin-error">{@upload_error}</p>
|
||||
<% end %>
|
||||
|
||||
<%= if @uploaded_backup do %>
|
||||
<div class="backup-comparison">
|
||||
<div class="backup-comparison-grid">
|
||||
<div class="backup-comparison-col">
|
||||
<h4 class="backup-comparison-label">Current</h4>
|
||||
<dl class="backup-comparison-stats">
|
||||
<div><dt>Size</dt><dd>{Backup.format_size(@stats.total_size)}</dd></div>
|
||||
<div><dt>Products</dt><dd>{@stats.key_counts["products"] || 0}</dd></div>
|
||||
<div><dt>Orders</dt><dd>{@stats.key_counts["orders"] || 0}</dd></div>
|
||||
<div><dt>Images</dt><dd>{@stats.key_counts["images"] || 0}</dd></div>
|
||||
</dl>
|
||||
</div>
|
||||
<div class="backup-comparison-arrow">
|
||||
<.icon name="hero-arrow-right" class="size-5" />
|
||||
</div>
|
||||
<div class="backup-comparison-col">
|
||||
<h4 class="backup-comparison-label">Uploaded</h4>
|
||||
<dl class="backup-comparison-stats">
|
||||
<div><dt>Size</dt><dd>{Backup.format_size(@uploaded_backup.stats.file_size)}</dd></div>
|
||||
<div><dt>Products</dt><dd>{@uploaded_backup.stats.key_counts["products"] || 0}</dd></div>
|
||||
<div><dt>Orders</dt><dd>{@uploaded_backup.stats.key_counts["orders"] || 0}</dd></div>
|
||||
<div><dt>Images</dt><dd>{@uploaded_backup.stats.key_counts["images"] || 0}</dd></div>
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<%= if @uploaded_backup.stats.latest_migration == @stats.schema_version do %>
|
||||
<div class="backup-validation backup-validation-ok">
|
||||
<.icon name="hero-check-circle-mini" class="size-4" />
|
||||
<span>Backup validated · Schema version {@uploaded_backup.stats.latest_migration}</span>
|
||||
</div>
|
||||
|
||||
<%= if @restoring do %>
|
||||
<div class="backup-progress">
|
||||
<.icon name="hero-arrow-path" class="size-5 animate-spin" />
|
||||
<div>
|
||||
<p class="backup-progress-text">Restoring database...</p>
|
||||
<p class="backup-progress-hint">This may take a few seconds.</p>
|
||||
</div>
|
||||
</div>
|
||||
<% else %>
|
||||
<%= if @confirming_restore do %>
|
||||
<div class="backup-warning">
|
||||
<p>This will replace your current database. A backup will be saved automatically.</p>
|
||||
<div class="backup-actions">
|
||||
<button type="button" class="admin-btn admin-btn-danger admin-btn-sm" phx-click="execute_restore">
|
||||
Replace database
|
||||
</button>
|
||||
<button type="button" class="admin-btn admin-btn-outline admin-btn-sm" phx-click="cancel_restore">
|
||||
Cancel
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<% else %>
|
||||
<div class="backup-actions">
|
||||
<button type="button" class="admin-btn admin-btn-primary admin-btn-sm" phx-click="confirm_restore">
|
||||
Restore this backup
|
||||
</button>
|
||||
<button type="button" class="admin-btn admin-btn-outline admin-btn-sm" phx-click="cancel_restore">
|
||||
Cancel
|
||||
</button>
|
||||
</div>
|
||||
<% end %>
|
||||
<% end %>
|
||||
<% else %>
|
||||
<div class="backup-validation backup-validation-error">
|
||||
<.icon name="hero-x-circle-mini" class="size-4" />
|
||||
<span>
|
||||
Schema mismatch: backup is v{@uploaded_backup.stats.latest_migration},
|
||||
current is v{@stats.schema_version}
|
||||
</span>
|
||||
</div>
|
||||
<div class="backup-actions">
|
||||
<button type="button" class="admin-btn admin-btn-outline admin-btn-sm" phx-click="cancel_restore">
|
||||
Cancel
|
||||
</button>
|
||||
</div>
|
||||
<% end %>
|
||||
</div>
|
||||
<% else %>
|
||||
<form phx-submit="upload_backup" phx-change="validate_upload">
|
||||
<div class="backup-dropzone" phx-drop-target={@uploads.backup.ref}>
|
||||
<.live_file_input upload={@uploads.backup} class="sr-only" />
|
||||
<div class="backup-dropzone-content">
|
||||
<.icon name="hero-arrow-up-tray" class="size-6" />
|
||||
<p>
|
||||
Drop a backup file here or
|
||||
<label for={@uploads.backup.ref} class="backup-dropzone-link">browse</label>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<%= for entry <- @uploads.backup.entries do %>
|
||||
<div class="backup-upload-entry">
|
||||
<span>{entry.client_name}</span>
|
||||
<span class="tabular-nums">{Backup.format_size(entry.client_size)}</span>
|
||||
<progress value={entry.progress} max="100">{entry.progress}%</progress>
|
||||
</div>
|
||||
|
||||
<%= for err <- upload_errors(@uploads.backup, entry) do %>
|
||||
<p class="admin-error">{upload_error_to_string(err)}</p>
|
||||
<% end %>
|
||||
<% end %>
|
||||
|
||||
<%= if length(@uploads.backup.entries) > 0 do %>
|
||||
<div class="backup-actions">
|
||||
<button type="submit" class="admin-btn admin-btn-primary admin-btn-sm">
|
||||
<.icon name="hero-arrow-up-tray-mini" class="size-4" /> Upload and validate
|
||||
</button>
|
||||
</div>
|
||||
<% end %>
|
||||
</form>
|
||||
<% end %>
|
||||
</section>
|
||||
</div>
|
||||
"""
|
||||
end
|
||||
|
||||
defp format_backup_date(nil), do: "unknown date"
|
||||
|
||||
defp format_backup_date(datetime) do
|
||||
Calendar.strftime(datetime, "%d %b %Y, %H:%M")
|
||||
end
|
||||
|
||||
defp upload_error_to_string(:too_large), do: "File is too large (max 500 MB)"
|
||||
defp upload_error_to_string(:too_many_files), do: "Only one file allowed"
|
||||
defp upload_error_to_string(err), do: "Upload error: #{inspect(err)}"
|
||||
|
||||
attr :color, :string, default: "zinc"
|
||||
slot :inner_block, required: true
|
||||
|
||||
defp status_pill(assigns) do
|
||||
modifier =
|
||||
case assigns.color do
|
||||
"green" -> "admin-status-pill-green"
|
||||
"amber" -> "admin-status-pill-amber"
|
||||
_ -> "admin-status-pill-zinc"
|
||||
end
|
||||
|
||||
assigns = assign(assigns, :modifier, modifier)
|
||||
|
||||
~H"""
|
||||
<span class={["admin-status-pill", @modifier]}>
|
||||
{render_slot(@inner_block)}
|
||||
</span>
|
||||
"""
|
||||
end
|
||||
end
|
||||
@ -170,6 +170,7 @@ defmodule BerrypodWeb.Router do
|
||||
live "/providers/:id/edit", Admin.Providers.Form, :edit
|
||||
live "/settings", Admin.Settings, :index
|
||||
live "/settings/email", Admin.EmailSettings, :index
|
||||
live "/backup", Admin.Backup, :index
|
||||
live "/account", Admin.Account, :index
|
||||
live "/pages", Admin.Pages.Index, :index
|
||||
live "/pages/new", Admin.Pages.CustomForm, :new
|
||||
|
||||
Loading…
Reference in New Issue
Block a user