1use crate::{
2 jwt::BasePayload,
3 models::{InsertQueryBuilder, UpdateQueryBuilder},
4 prelude::*,
5 storage::StorageUrlRetriever,
6};
7use compact_str::ToCompactString;
8use garde::Validate;
9use reqwest::StatusCode;
10use serde::{Deserialize, Serialize};
11use sqlx::{Row, postgres::PgRow, prelude::Type};
12use std::{
13 collections::BTreeMap,
14 sync::{Arc, LazyLock},
15};
16use utoipa::ToSchema;
17
18#[derive(Debug, ToSchema, Serialize, Deserialize, Type, PartialEq, Eq, Hash, Clone, Copy)]
19#[serde(rename_all = "kebab-case")]
20#[sqlx(type_name = "backup_disk", rename_all = "SCREAMING_SNAKE_CASE")]
21pub enum BackupDisk {
22 Local,
23 S3,
24 DdupBak,
25 Btrfs,
26 Zfs,
27 Restic,
28}
29
30impl BackupDisk {
31 #[inline]
32 pub fn to_wings_adapter(self) -> wings_api::BackupAdapter {
33 match self {
34 BackupDisk::Local => wings_api::BackupAdapter::Wings,
35 BackupDisk::S3 => wings_api::BackupAdapter::S3,
36 BackupDisk::DdupBak => wings_api::BackupAdapter::DdupBak,
37 BackupDisk::Btrfs => wings_api::BackupAdapter::Btrfs,
38 BackupDisk::Zfs => wings_api::BackupAdapter::Zfs,
39 BackupDisk::Restic => wings_api::BackupAdapter::Restic,
40 }
41 }
42}
43
44#[derive(Serialize, Deserialize, Clone)]
45pub struct ServerBackup {
46 pub uuid: uuid::Uuid,
47 pub server: Option<Fetchable<super::server::Server>>,
48 pub node: Fetchable<super::node::Node>,
49 pub backup_configuration: Option<Fetchable<super::backup_configuration::BackupConfiguration>>,
50
51 pub name: compact_str::CompactString,
52 pub successful: bool,
53 pub browsable: bool,
54 pub streaming: bool,
55 pub locked: bool,
56 pub shared: bool,
57
58 pub ignored_files: Vec<compact_str::CompactString>,
59 pub checksum: Option<compact_str::CompactString>,
60 pub bytes: i64,
61 pub files: i64,
62
63 pub disk: BackupDisk,
64 pub upload_id: Option<compact_str::CompactString>,
65 pub upload_path: Option<compact_str::CompactString>,
66
67 pub completed: Option<chrono::NaiveDateTime>,
68 pub deleted: Option<chrono::NaiveDateTime>,
69 pub created: chrono::NaiveDateTime,
70
71 extension_data: super::ModelExtensionData,
72}
73
74impl BaseModel for ServerBackup {
75 const NAME: &'static str = "server_backup";
76
77 fn get_extension_list() -> &'static super::ModelExtensionList {
78 static EXTENSIONS: LazyLock<super::ModelExtensionList> =
79 LazyLock::new(|| std::sync::RwLock::new(Vec::new()));
80
81 &EXTENSIONS
82 }
83
84 fn get_extension_data(&self) -> &super::ModelExtensionData {
85 &self.extension_data
86 }
87
88 #[inline]
89 fn base_columns(prefix: Option<&str>) -> BTreeMap<&'static str, compact_str::CompactString> {
90 let prefix = prefix.unwrap_or_default();
91
92 BTreeMap::from([
93 (
94 "server_backups.uuid",
95 compact_str::format_compact!("{prefix}uuid"),
96 ),
97 (
98 "server_backups.server_uuid",
99 compact_str::format_compact!("{prefix}server_uuid"),
100 ),
101 (
102 "server_backups.node_uuid",
103 compact_str::format_compact!("{prefix}node_uuid"),
104 ),
105 (
106 "server_backups.backup_configuration_uuid",
107 compact_str::format_compact!("{prefix}backup_configuration_uuid"),
108 ),
109 (
110 "server_backups.name",
111 compact_str::format_compact!("{prefix}name"),
112 ),
113 (
114 "server_backups.successful",
115 compact_str::format_compact!("{prefix}successful"),
116 ),
117 (
118 "server_backups.browsable",
119 compact_str::format_compact!("{prefix}browsable"),
120 ),
121 (
122 "server_backups.streaming",
123 compact_str::format_compact!("{prefix}streaming"),
124 ),
125 (
126 "server_backups.locked",
127 compact_str::format_compact!("{prefix}locked"),
128 ),
129 (
130 "server_backups.shared",
131 compact_str::format_compact!("{prefix}shared"),
132 ),
133 (
134 "server_backups.ignored_files",
135 compact_str::format_compact!("{prefix}ignored_files"),
136 ),
137 (
138 "server_backups.checksum",
139 compact_str::format_compact!("{prefix}checksum"),
140 ),
141 (
142 "server_backups.bytes",
143 compact_str::format_compact!("{prefix}bytes"),
144 ),
145 (
146 "server_backups.files",
147 compact_str::format_compact!("{prefix}files"),
148 ),
149 (
150 "server_backups.disk",
151 compact_str::format_compact!("{prefix}disk"),
152 ),
153 (
154 "server_backups.upload_id",
155 compact_str::format_compact!("{prefix}upload_id"),
156 ),
157 (
158 "server_backups.upload_path",
159 compact_str::format_compact!("{prefix}upload_path"),
160 ),
161 (
162 "server_backups.completed",
163 compact_str::format_compact!("{prefix}completed"),
164 ),
165 (
166 "server_backups.deleted",
167 compact_str::format_compact!("{prefix}deleted"),
168 ),
169 (
170 "server_backups.created",
171 compact_str::format_compact!("{prefix}created"),
172 ),
173 ])
174 }
175
176 #[inline]
177 fn map(prefix: Option<&str>, row: &PgRow) -> Result<Self, crate::database::DatabaseError> {
178 let prefix = prefix.unwrap_or_default();
179
180 Ok(Self {
181 uuid: row.try_get(compact_str::format_compact!("{prefix}uuid").as_str())?,
182 server: super::server::Server::get_fetchable_from_row(
183 row,
184 compact_str::format_compact!("{prefix}server_uuid"),
185 ),
186 backup_configuration:
187 super::backup_configuration::BackupConfiguration::get_fetchable_from_row(
188 row,
189 compact_str::format_compact!("{prefix}backup_configuration_uuid"),
190 ),
191 node: super::node::Node::get_fetchable(
192 row.try_get(compact_str::format_compact!("{prefix}node_uuid").as_str())?,
193 ),
194 name: row.try_get(compact_str::format_compact!("{prefix}name").as_str())?,
195 successful: row.try_get(compact_str::format_compact!("{prefix}successful").as_str())?,
196 browsable: row.try_get(compact_str::format_compact!("{prefix}browsable").as_str())?,
197 streaming: row.try_get(compact_str::format_compact!("{prefix}streaming").as_str())?,
198 locked: row.try_get(compact_str::format_compact!("{prefix}locked").as_str())?,
199 shared: row.try_get(compact_str::format_compact!("{prefix}shared").as_str())?,
200 ignored_files: row
201 .try_get(compact_str::format_compact!("{prefix}ignored_files").as_str())?,
202 checksum: row.try_get(compact_str::format_compact!("{prefix}checksum").as_str())?,
203 bytes: row.try_get(compact_str::format_compact!("{prefix}bytes").as_str())?,
204 files: row.try_get(compact_str::format_compact!("{prefix}files").as_str())?,
205 disk: row.try_get(compact_str::format_compact!("{prefix}disk").as_str())?,
206 upload_id: row.try_get(compact_str::format_compact!("{prefix}upload_id").as_str())?,
207 upload_path: row
208 .try_get(compact_str::format_compact!("{prefix}upload_path").as_str())?,
209 completed: row.try_get(compact_str::format_compact!("{prefix}completed").as_str())?,
210 deleted: row.try_get(compact_str::format_compact!("{prefix}deleted").as_str())?,
211 created: row.try_get(compact_str::format_compact!("{prefix}created").as_str())?,
212 extension_data: Self::map_extensions(prefix, row)?,
213 })
214 }
215}
216
217impl ServerBackup {
218 pub async fn create_raw(
219 state: &crate::State,
220 options: CreateServerBackupOptions<'_>,
221 ) -> Result<Self, anyhow::Error> {
222 let backup_configuration = options
223 .server
224 .backup_configuration(&state.database)
225 .await
226 .ok_or_else(|| {
227 crate::response::DisplayError::new(
228 "no backup configuration available, unable to create backup",
229 )
230 .with_status(StatusCode::EXPECTATION_FAILED)
231 })?;
232
233 if backup_configuration.maintenance_enabled {
234 return Err(crate::response::DisplayError::new(
235 "cannot create backup while backup configuration is in maintenance mode",
236 )
237 .with_status(StatusCode::EXPECTATION_FAILED)
238 .into());
239 }
240
241 let row = sqlx::query(&format!(
242 r#"
243 INSERT INTO server_backups (server_uuid, node_uuid, backup_configuration_uuid, name, ignored_files, bytes, disk, shared)
244 VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
245 RETURNING {}
246 "#,
247 Self::columns_sql(None)
248 ))
249 .bind(options.server.uuid)
250 .bind(options.server.node.uuid)
251 .bind(backup_configuration.uuid)
252 .bind(options.name)
253 .bind(&options.ignored_files)
254 .bind(0i64)
255 .bind(backup_configuration.backup_disk)
256 .bind(backup_configuration.shared)
257 .fetch_one(state.database.write())
258 .await?;
259
260 Ok(Self::map(None, &row)?)
261 }
262
263 pub async fn by_server_uuid_uuid(
264 database: &crate::database::Database,
265 server_uuid: uuid::Uuid,
266 uuid: uuid::Uuid,
267 ) -> Result<Option<Self>, crate::database::DatabaseError> {
268 let row = sqlx::query(&format!(
269 r#"
270 SELECT {}
271 FROM server_backups
272 WHERE server_backups.server_uuid = $1 AND server_backups.uuid = $2
273 "#,
274 Self::columns_sql(None)
275 ))
276 .bind(server_uuid)
277 .bind(uuid)
278 .fetch_optional(database.read())
279 .await?;
280
281 row.try_map(|row| Self::map(None, &row))
282 }
283
284 pub async fn by_node_uuid_uuid(
285 database: &crate::database::Database,
286 node_uuid: uuid::Uuid,
287 uuid: uuid::Uuid,
288 ) -> Result<Option<Self>, crate::database::DatabaseError> {
289 let row = sqlx::query(&format!(
290 r#"
291 SELECT {}
292 FROM server_backups
293 WHERE server_backups.node_uuid = $1 AND server_backups.uuid = $2
294 "#,
295 Self::columns_sql(None)
296 ))
297 .bind(node_uuid)
298 .bind(uuid)
299 .fetch_optional(database.read())
300 .await?;
301
302 row.try_map(|row| Self::map(None, &row))
303 }
304
305 pub async fn by_server_uuid_with_pagination(
306 database: &crate::database::Database,
307 server_uuid: uuid::Uuid,
308 page: i64,
309 per_page: i64,
310 search: Option<&str>,
311 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
312 let offset = (page - 1) * per_page;
313
314 let rows = sqlx::query(&format!(
315 r#"
316 SELECT {}, COUNT(*) OVER() AS total_count
317 FROM server_backups
318 WHERE
319 server_backups.server_uuid = $1
320 AND server_backups.deleted IS NULL
321 AND ($2 IS NULL OR server_backups.name ILIKE '%' || $2 || '%')
322 ORDER BY server_backups.created
323 LIMIT $3 OFFSET $4
324 "#,
325 Self::columns_sql(None)
326 ))
327 .bind(server_uuid)
328 .bind(search)
329 .bind(per_page)
330 .bind(offset)
331 .fetch_all(database.read())
332 .await?;
333
334 Ok(super::Pagination {
335 total: rows
336 .first()
337 .map_or(Ok(0), |row| row.try_get("total_count"))?,
338 per_page,
339 page,
340 data: rows
341 .into_iter()
342 .map(|row| Self::map(None, &row))
343 .try_collect_vec()?,
344 })
345 }
346
347 pub async fn by_server_uuid_node_uuid_with_pagination(
348 database: &crate::database::Database,
349 server_uuid: uuid::Uuid,
350 node_uuid: uuid::Uuid,
351 page: i64,
352 per_page: i64,
353 search: Option<&str>,
354 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
355 let offset = (page - 1) * per_page;
356
357 let rows = sqlx::query(&format!(
358 r#"
359 SELECT {}, COUNT(*) OVER() AS total_count
360 FROM server_backups
361 WHERE
362 server_backups.server_uuid = $1
363 AND server_backups.node_uuid = $2
364 AND server_backups.deleted IS NULL
365 AND ($3 IS NULL OR server_backups.name ILIKE '%' || $3 || '%')
366 ORDER BY server_backups.created
367 LIMIT $4 OFFSET $5
368 "#,
369 Self::columns_sql(None)
370 ))
371 .bind(server_uuid)
372 .bind(node_uuid)
373 .bind(search)
374 .bind(per_page)
375 .bind(offset)
376 .fetch_all(database.read())
377 .await?;
378
379 Ok(super::Pagination {
380 total: rows
381 .first()
382 .map_or(Ok(0), |row| row.try_get("total_count"))?,
383 per_page,
384 page,
385 data: rows
386 .into_iter()
387 .map(|row| Self::map(None, &row))
388 .try_collect_vec()?,
389 })
390 }
391
392 pub async fn by_partially_detached_server_uuid_node_uuid_with_pagination(
393 database: &crate::database::Database,
394 server_uuid: uuid::Uuid,
395 node_uuid: uuid::Uuid,
396 page: i64,
397 per_page: i64,
398 search: Option<&str>,
399 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
400 let offset = (page - 1) * per_page;
401
402 let rows = sqlx::query(&format!(
403 r#"
404 SELECT {}, COUNT(*) OVER() AS total_count
405 FROM server_backups
406 WHERE
407 server_backups.server_uuid = $1
408 AND server_backups.node_uuid != $2
409 AND server_backups.deleted IS NULL
410 AND ($3 IS NULL OR server_backups.name ILIKE '%' || $3 || '%')
411 ORDER BY server_backups.created
412 LIMIT $4 OFFSET $5
413 "#,
414 Self::columns_sql(None)
415 ))
416 .bind(server_uuid)
417 .bind(node_uuid)
418 .bind(search)
419 .bind(per_page)
420 .bind(offset)
421 .fetch_all(database.read())
422 .await?;
423
424 Ok(super::Pagination {
425 total: rows
426 .first()
427 .map_or(Ok(0), |row| row.try_get("total_count"))?,
428 per_page,
429 page,
430 data: rows
431 .into_iter()
432 .map(|row| Self::map(None, &row))
433 .try_collect_vec()?,
434 })
435 }
436
437 pub async fn by_node_uuid_with_pagination(
438 database: &crate::database::Database,
439 node_uuid: uuid::Uuid,
440 page: i64,
441 per_page: i64,
442 search: Option<&str>,
443 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
444 let offset = (page - 1) * per_page;
445
446 let rows = sqlx::query(&format!(
447 r#"
448 SELECT {}, COUNT(*) OVER() AS total_count
449 FROM server_backups
450 WHERE
451 server_backups.node_uuid = $1
452 AND server_backups.deleted IS NULL
453 AND ($2 IS NULL OR server_backups.name ILIKE '%' || $2 || '%')
454 ORDER BY server_backups.created
455 LIMIT $3 OFFSET $4
456 "#,
457 Self::columns_sql(None)
458 ))
459 .bind(node_uuid)
460 .bind(search)
461 .bind(per_page)
462 .bind(offset)
463 .fetch_all(database.read())
464 .await?;
465
466 Ok(super::Pagination {
467 total: rows
468 .first()
469 .map_or(Ok(0), |row| row.try_get("total_count"))?,
470 per_page,
471 page,
472 data: rows
473 .into_iter()
474 .map(|row| Self::map(None, &row))
475 .try_collect_vec()?,
476 })
477 }
478
479 pub async fn by_backup_configuration_uuid_with_pagination(
480 database: &crate::database::Database,
481 backup_configuration_uuid: uuid::Uuid,
482 page: i64,
483 per_page: i64,
484 search: Option<&str>,
485 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
486 let offset = (page - 1) * per_page;
487
488 let rows = sqlx::query(&format!(
489 r#"
490 SELECT {}, COUNT(*) OVER() AS total_count
491 FROM server_backups
492 WHERE
493 server_backups.backup_configuration_uuid = $1
494 AND server_backups.deleted IS NULL
495 AND ($2 IS NULL OR server_backups.name ILIKE '%' || $2 || '%')
496 ORDER BY server_backups.created
497 LIMIT $3 OFFSET $4
498 "#,
499 Self::columns_sql(None)
500 ))
501 .bind(backup_configuration_uuid)
502 .bind(search)
503 .bind(per_page)
504 .bind(offset)
505 .fetch_all(database.read())
506 .await?;
507
508 Ok(super::Pagination {
509 total: rows
510 .first()
511 .map_or(Ok(0), |row| row.try_get("total_count"))?,
512 per_page,
513 page,
514 data: rows
515 .into_iter()
516 .map(|row| Self::map(None, &row))
517 .try_collect_vec()?,
518 })
519 }
520
521 pub async fn by_detached_node_uuid_with_pagination(
522 database: &crate::database::Database,
523 node_uuid: uuid::Uuid,
524 page: i64,
525 per_page: i64,
526 search: Option<&str>,
527 ) -> Result<super::Pagination<Self>, crate::database::DatabaseError> {
528 let offset = (page - 1) * per_page;
529
530 let rows = sqlx::query(&format!(
531 r#"
532 SELECT {}, COUNT(*) OVER() AS total_count
533 FROM server_backups
534 WHERE
535 server_backups.node_uuid = $1
536 AND server_backups.server_uuid IS NULL
537 AND server_backups.deleted IS NULL
538 AND ($2 IS NULL OR server_backups.name ILIKE '%' || $2 || '%')
539 ORDER BY server_backups.created
540 LIMIT $3 OFFSET $4
541 "#,
542 Self::columns_sql(None)
543 ))
544 .bind(node_uuid)
545 .bind(search)
546 .bind(per_page)
547 .bind(offset)
548 .fetch_all(database.read())
549 .await?;
550
551 Ok(super::Pagination {
552 total: rows
553 .first()
554 .map_or(Ok(0), |row| row.try_get("total_count"))?,
555 per_page,
556 page,
557 data: rows
558 .into_iter()
559 .map(|row| Self::map(None, &row))
560 .try_collect_vec()?,
561 })
562 }
563
564 pub async fn all_uuids_by_server_uuid(
565 database: &crate::database::Database,
566 server_uuid: uuid::Uuid,
567 ) -> Result<Vec<uuid::Uuid>, crate::database::DatabaseError> {
568 let rows = sqlx::query(
569 r#"
570 SELECT server_backups.uuid
571 FROM server_backups
572 WHERE server_backups.server_uuid = $1 AND server_backups.deleted IS NULL
573 "#,
574 )
575 .bind(server_uuid)
576 .fetch_all(database.read())
577 .await?;
578
579 Ok(rows
580 .into_iter()
581 .map(|row| row.get::<uuid::Uuid, _>("uuid"))
582 .collect())
583 }
584
585 pub async fn all_uuids_by_server_uuid_not_shared(
586 database: &crate::database::Database,
587 server_uuid: uuid::Uuid,
588 ) -> Result<Vec<uuid::Uuid>, crate::database::DatabaseError> {
589 let rows = sqlx::query(
590 r#"
591 SELECT server_backups.uuid
592 FROM server_backups
593 WHERE server_backups.server_uuid = $1 AND server_backups.deleted IS NULL AND server_backups.shared = false
594 "#,
595 )
596 .bind(server_uuid)
597 .fetch_all(database.read())
598 .await?;
599
600 Ok(rows
601 .into_iter()
602 .map(|row| row.get::<uuid::Uuid, _>("uuid"))
603 .collect())
604 }
605
606 pub async fn all_by_server_uuid(
607 database: &crate::database::Database,
608 server_uuid: uuid::Uuid,
609 ) -> Result<Vec<Self>, crate::database::DatabaseError> {
610 let rows = sqlx::query(&format!(
611 r#"
612 SELECT {}
613 FROM server_backups
614 WHERE server_backups.server_uuid = $1 AND server_backups.deleted IS NULL
615 "#,
616 Self::columns_sql(None)
617 ))
618 .bind(server_uuid)
619 .fetch_all(database.read())
620 .await?;
621
622 rows.into_iter()
623 .map(|row| Self::map(None, &row))
624 .try_collect_vec()
625 }
626
627 pub async fn count_by_server_uuid(
628 database: &crate::database::Database,
629 server_uuid: uuid::Uuid,
630 ) -> Result<i64, sqlx::Error> {
631 sqlx::query_scalar(
632 r#"
633 SELECT COUNT(*)
634 FROM server_backups
635 WHERE server_backups.server_uuid = $1 AND server_backups.deleted IS NULL
636 "#,
637 )
638 .bind(server_uuid)
639 .fetch_one(database.read())
640 .await
641 }
642
643 pub async fn download_url(
644 &self,
645 state: &crate::State,
646 user: &super::user::User,
647 node: &super::node::Node,
648 archive_format: wings_api::StreamableArchiveFormat,
649 ) -> Result<String, anyhow::Error> {
650 let backup_configuration = self
651 .backup_configuration
652 .as_ref()
653 .ok_or_else(|| {
654 crate::response::DisplayError::new(
655 "no backup configuration available, unable to restore backup",
656 )
657 .with_status(StatusCode::EXPECTATION_FAILED)
658 })?
659 .fetch_cached(&state.database)
660 .await?;
661
662 if backup_configuration.maintenance_enabled {
663 return Err(crate::response::DisplayError::new(
664 "cannot restore backup while backup configuration is in maintenance mode",
665 )
666 .with_status(StatusCode::EXPECTATION_FAILED)
667 .into());
668 }
669
670 if matches!(self.disk, BackupDisk::S3)
671 && let Some(mut s3_configuration) = backup_configuration.backup_configs.s3
672 {
673 s3_configuration.decrypt(&state.database).await?;
674
675 let client = match s3_configuration.into_client() {
676 Ok(client) => client,
677 Err(err) => {
678 return Err(anyhow::Error::from(err).context("failed to create s3 client"));
679 }
680 };
681 let file_path = match &self.upload_path {
682 Some(path) => path,
683 None => {
684 return Err(crate::response::DisplayError::new(
685 "backup does not have an upload path",
686 )
687 .with_status(StatusCode::EXPECTATION_FAILED)
688 .into());
689 }
690 };
691
692 let url = client.presign_get(file_path, 15 * 60, None).await?;
693
694 return Ok(url);
695 }
696
697 #[derive(Serialize)]
698 struct BackupDownloadJwt {
699 #[serde(flatten)]
700 base: BasePayload,
701
702 backup_uuid: uuid::Uuid,
703 unique_id: uuid::Uuid,
704 }
705
706 let token = node.create_jwt(
707 &state.database,
708 &state.jwt,
709 &BackupDownloadJwt {
710 base: BasePayload {
711 issuer: "panel".into(),
712 subject: None,
713 audience: Vec::new(),
714 expiration_time: Some(chrono::Utc::now().timestamp() + 900),
715 not_before: None,
716 issued_at: Some(chrono::Utc::now().timestamp()),
717 jwt_id: user.uuid.to_string(),
718 },
719 backup_uuid: self.uuid,
720 unique_id: uuid::Uuid::new_v4(),
721 },
722 )?;
723
724 let mut url = node.public_url(state, "/download/backup").await?;
725 url.set_query(Some(&format!(
726 "token={}&archive_format={}",
727 urlencoding::encode(&token),
728 archive_format
729 )));
730
731 Ok(url.to_string())
732 }
733
734 pub async fn restore(
735 self,
736 database: &crate::database::Database,
737 server: super::server::Server,
738 truncate_directory: bool,
739 ) -> Result<(), anyhow::Error> {
740 let backup_configuration = self
741 .backup_configuration
742 .ok_or_else(|| {
743 crate::response::DisplayError::new(
744 "no backup configuration available, unable to restore backup",
745 )
746 .with_status(StatusCode::EXPECTATION_FAILED)
747 })?
748 .fetch_cached(database)
749 .await?;
750
751 if backup_configuration.maintenance_enabled {
752 return Err(crate::response::DisplayError::new(
753 "cannot restore backup while backup configuration is in maintenance mode",
754 )
755 .with_status(StatusCode::EXPECTATION_FAILED)
756 .into());
757 }
758
759 server
760 .node
761 .fetch_cached(database)
762 .await?
763 .api_client(database)
764 .await?
765 .post_servers_server_backup_backup_restore(
766 server.uuid,
767 self.uuid,
768 &wings_api::servers_server_backup_backup_restore::post::RequestBody {
769 adapter: self.disk.to_wings_adapter(),
770 download_url: match self.disk {
771 BackupDisk::S3 => {
772 if let Some(mut s3_configuration) =
773 backup_configuration.backup_configs.s3
774 {
775 s3_configuration.decrypt(database).await?;
776
777 let client = s3_configuration.into_client()?;
778 let file_path = match &self.upload_path {
779 Some(path) => path.as_str(),
780 None => &Self::s3_path(server.uuid, self.uuid),
781 };
782
783 Some(client.presign_get(file_path, 60 * 60, None).await?.into())
784 } else {
785 None
786 }
787 }
788 _ => None,
789 },
790 truncate_directory,
791 },
792 )
793 .await?;
794
795 Ok(())
796 }
797
798 pub async fn delete_oldest_by_server_uuid(
799 state: &crate::State,
800 server: &super::server::Server,
801 ) -> Result<(), anyhow::Error> {
802 let row = sqlx::query(&format!(
803 r#"
804 SELECT {}
805 FROM server_backups
806 WHERE server_backups.server_uuid = $1
807 AND server_backups.locked = false
808 AND server_backups.completed IS NOT NULL
809 AND server_backups.deleted IS NULL
810 ORDER BY server_backups.created ASC
811 LIMIT 1
812 "#,
813 Self::columns_sql(None)
814 ))
815 .bind(server.uuid)
816 .fetch_optional(state.database.read())
817 .await?;
818
819 if let Some(row) = row {
820 let backup = Self::map(None, &row)?;
821
822 backup.delete(state, Default::default()).await
823 } else {
824 Err(sqlx::Error::RowNotFound.into())
825 }
826 }
827
828 #[inline]
829 pub fn default_name() -> compact_str::CompactString {
830 let now = chrono::Local::now();
831
832 now.format("%Y-%m-%d %H:%M:%S %z").to_compact_string()
833 }
834
835 #[inline]
836 pub fn s3_path(server_uuid: uuid::Uuid, backup_uuid: uuid::Uuid) -> compact_str::CompactString {
837 compact_str::format_compact!("{server_uuid}/{backup_uuid}.tar.gz")
838 }
839
840 #[inline]
841 pub fn s3_content_type(name: &str) -> &'static str {
842 if name.ends_with(".tar.gz") {
843 "application/x-gzip"
844 } else {
845 "application/octet-stream"
846 }
847 }
848
849 pub async fn into_admin_node_api_object(
850 self,
851 state: &crate::State,
852 storage_url_retriever: &StorageUrlRetriever<'_>,
853 ) -> Result<AdminApiNodeServerBackup, crate::database::DatabaseError> {
854 Ok(AdminApiNodeServerBackup {
855 uuid: self.uuid,
856 server: match self.server {
857 Some(server) => Some(
858 server
859 .fetch_cached(&state.database)
860 .await?
861 .into_admin_api_object(state, storage_url_retriever)
862 .await?,
863 ),
864 None => None,
865 },
866 node: self
867 .node
868 .fetch_cached(&state.database)
869 .await?
870 .into_admin_api_object(state, ())
871 .await?,
872 name: self.name,
873 ignored_files: self.ignored_files,
874 is_successful: self.successful,
875 is_locked: self.locked,
876 is_browsable: self.browsable,
877 is_streaming: self.streaming,
878 is_shared: self.shared,
879 checksum: self.checksum,
880 bytes: self.bytes,
881 files: self.files,
882 completed: self.completed.map(|dt| dt.and_utc()),
883 created: self.created.and_utc(),
884 })
885 }
886}
887
888#[async_trait::async_trait]
889impl IntoAdminApiObject for ServerBackup {
890 type AdminApiObject = AdminApiServerBackup;
891 type ExtraArgs<'a> = &'a crate::storage::StorageUrlRetriever<'a>;
892
893 async fn into_admin_api_object<'a>(
894 self,
895 state: &crate::State,
896 storage_url_retriever: Self::ExtraArgs<'a>,
897 ) -> Result<Self::AdminApiObject, crate::database::DatabaseError> {
898 let api_object = AdminApiServerBackup::init_hooks(&self, state).await?;
899
900 let api_object = finish_extendible!(
901 AdminApiServerBackup {
902 uuid: self.uuid,
903 server: match self.server {
904 Some(server) => Some(
905 server
906 .fetch_cached(&state.database)
907 .await?
908 .into_admin_api_object(state, storage_url_retriever)
909 .await?,
910 ),
911 None => None,
912 },
913 name: self.name,
914 ignored_files: self.ignored_files,
915 is_successful: self.successful,
916 is_locked: self.locked,
917 is_browsable: self.browsable,
918 is_streaming: self.streaming,
919 is_shared: self.shared,
920 checksum: self.checksum,
921 bytes: self.bytes,
922 files: self.files,
923 completed: self.completed.map(|dt| dt.and_utc()),
924 created: self.created.and_utc(),
925 },
926 api_object,
927 state
928 )?;
929
930 Ok(api_object)
931 }
932}
933
934#[async_trait::async_trait]
935impl IntoApiObject for ServerBackup {
936 type ApiObject = ApiServerBackup;
937 type ExtraArgs<'a> = ();
938
939 async fn into_api_object<'a>(
940 self,
941 state: &crate::State,
942 _args: Self::ExtraArgs<'a>,
943 ) -> Result<Self::ApiObject, crate::database::DatabaseError> {
944 let api_object = ApiServerBackup::init_hooks(&self, state).await?;
945
946 let api_object = finish_extendible!(
947 ApiServerBackup {
948 uuid: self.uuid,
949 name: self.name,
950 ignored_files: self.ignored_files,
951 is_successful: self.successful,
952 is_locked: self.locked,
953 is_browsable: self.browsable,
954 is_streaming: self.streaming,
955 checksum: self.checksum,
956 bytes: self.bytes,
957 files: self.files,
958 completed: self.completed.map(|dt| dt.and_utc()),
959 created: self.created.and_utc(),
960 },
961 api_object,
962 state
963 )?;
964
965 Ok(api_object)
966 }
967}
968
969#[derive(Validate)]
970pub struct CreateServerBackupOptions<'a> {
971 #[garde(skip)]
972 pub server: &'a super::server::Server,
973 #[garde(length(chars, min = 1, max = 255))]
974 pub name: compact_str::CompactString,
975 #[garde(skip)]
976 pub ignored_files: Vec<compact_str::CompactString>,
977}
978
979#[async_trait::async_trait]
980impl CreatableModel for ServerBackup {
981 type CreateOptions<'a> = CreateServerBackupOptions<'a>;
982 type CreateResult = Self;
983
984 fn get_create_handlers() -> &'static LazyLock<CreateListenerList<Self>> {
985 static CREATE_LISTENERS: LazyLock<CreateListenerList<ServerBackup>> =
986 LazyLock::new(|| Arc::new(ModelHandlerList::default()));
987
988 &CREATE_LISTENERS
989 }
990
991 async fn create_with_transaction(
992 _state: &crate::State,
993 _options: Self::CreateOptions<'_>,
994 _transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
995 ) -> Result<Self, crate::database::DatabaseError> {
996 Err(anyhow::anyhow!("create_with_transaction is not supported for ServerBackup").into())
997 }
998
999 async fn create(
1000 state: &crate::State,
1001 mut options: Self::CreateOptions<'_>,
1002 ) -> Result<Self, crate::database::DatabaseError> {
1003 options.validate()?;
1004
1005 let backup_configuration = options
1006 .server
1007 .backup_configuration(&state.database)
1008 .await
1009 .ok_or_else(|| {
1010 anyhow::Error::new(
1011 crate::response::DisplayError::new(
1012 "no backup configuration available, unable to create backup",
1013 )
1014 .with_status(StatusCode::EXPECTATION_FAILED),
1015 )
1016 })?;
1017
1018 if backup_configuration.maintenance_enabled {
1019 return Err(anyhow::Error::new(
1020 crate::response::DisplayError::new(
1021 "cannot create backup while backup configuration is in maintenance mode",
1022 )
1023 .with_status(StatusCode::EXPECTATION_FAILED),
1024 )
1025 .into());
1026 }
1027
1028 let mut transaction = state.database.write().begin().await?;
1029
1030 let mut query_builder = InsertQueryBuilder::new("server_backups");
1031
1032 Self::run_create_handlers(&mut options, &mut query_builder, state, &mut transaction)
1033 .await?;
1034
1035 query_builder
1036 .set("server_uuid", options.server.uuid)
1037 .set("node_uuid", options.server.node.uuid)
1038 .set("backup_configuration_uuid", backup_configuration.uuid)
1039 .set("name", &options.name)
1040 .set("ignored_files", &options.ignored_files)
1041 .set("bytes", 0i64)
1042 .set("disk", backup_configuration.backup_disk)
1043 .set("shared", backup_configuration.shared);
1044
1045 let row = query_builder
1046 .returning(&Self::columns_sql(None))
1047 .fetch_one(&mut *transaction)
1048 .await?;
1049 let mut backup = Self::map(None, &row)?;
1050
1051 Self::run_after_create_handlers(&mut backup, &options, state, &mut transaction).await?;
1052
1053 transaction.commit().await?;
1054
1055 let server = options.server.clone();
1056 let database = Arc::clone(&state.database);
1057 let backup_uuid = backup.uuid;
1058 let backup_disk = backup_configuration.backup_disk;
1059 let ignored_files_str = options
1060 .ignored_files
1061 .iter()
1062 .map(|s| s.as_str())
1063 .collect::<Vec<_>>()
1064 .join("\n");
1065
1066 tokio::spawn(async move {
1067 tracing::debug!(backup = %backup_uuid, "creating server backup");
1068
1069 let node = match server.node.fetch_cached(&database).await {
1070 Ok(node) => node,
1071 Err(err) => {
1072 tracing::error!(backup = %backup_uuid, "failed to create server backup: {:?}", err);
1073
1074 if let Err(err) = sqlx::query!(
1075 "UPDATE server_backups
1076 SET successful = false, completed = NOW()
1077 WHERE server_backups.uuid = $1",
1078 backup_uuid
1079 )
1080 .execute(database.write())
1081 .await
1082 {
1083 tracing::error!(backup = %backup_uuid, "failed to update server backup status: {:?}", err);
1084 }
1085
1086 return;
1087 }
1088 };
1089
1090 let api_client = match node.api_client(&database).await {
1091 Ok(api_client) => api_client,
1092 Err(err) => {
1093 tracing::error!(backup = %backup_uuid, "failed to create server backup: {:?}", err);
1094
1095 if let Err(err) = sqlx::query!(
1096 "UPDATE server_backups
1097 SET successful = false, completed = NOW()
1098 WHERE server_backups.uuid = $1",
1099 backup_uuid
1100 )
1101 .execute(database.write())
1102 .await
1103 {
1104 tracing::error!(backup = %backup_uuid, "failed to update server backup status: {:?}", err);
1105 }
1106
1107 return;
1108 }
1109 };
1110
1111 if let Err(err) = api_client
1112 .post_servers_server_backup(
1113 server.uuid,
1114 &wings_api::servers_server_backup::post::RequestBody {
1115 adapter: backup_disk.to_wings_adapter(),
1116 uuid: backup_uuid,
1117 ignore: ignored_files_str.into(),
1118 },
1119 )
1120 .await
1121 {
1122 tracing::error!(backup = %backup_uuid, "failed to create server backup: {:?}", err);
1123
1124 if let Err(err) = sqlx::query!(
1125 "UPDATE server_backups
1126 SET successful = false, completed = NOW()
1127 WHERE server_backups.uuid = $1",
1128 backup_uuid
1129 )
1130 .execute(database.write())
1131 .await
1132 {
1133 tracing::error!(backup = %backup_uuid, "failed to update server backup status: {:?}", err);
1134 }
1135 }
1136 });
1137
1138 Ok(backup)
1139 }
1140}
1141
1142#[derive(ToSchema, Serialize, Deserialize, Validate, Default)]
1143pub struct UpdateServerBackupOptions {
1144 #[garde(length(chars, min = 1, max = 255))]
1145 #[schema(min_length = 1, max_length = 255)]
1146 pub name: Option<compact_str::CompactString>,
1147 #[garde(skip)]
1148 pub locked: Option<bool>,
1149}
1150
1151#[async_trait::async_trait]
1152impl UpdatableModel for ServerBackup {
1153 type UpdateOptions = UpdateServerBackupOptions;
1154
1155 fn get_update_handlers() -> &'static LazyLock<UpdateHandlerList<Self>> {
1156 static UPDATE_LISTENERS: LazyLock<UpdateHandlerList<ServerBackup>> =
1157 LazyLock::new(|| Arc::new(ModelHandlerList::default()));
1158
1159 &UPDATE_LISTENERS
1160 }
1161
1162 async fn update_with_transaction(
1163 &mut self,
1164 state: &crate::State,
1165 mut options: Self::UpdateOptions,
1166 transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
1167 ) -> Result<(), crate::database::DatabaseError> {
1168 options.validate()?;
1169
1170 let mut query_builder = UpdateQueryBuilder::new("server_backups");
1171
1172 self.run_update_handlers(&mut options, &mut query_builder, state, transaction)
1173 .await?;
1174
1175 query_builder
1176 .set("name", options.name.as_ref())
1177 .set("locked", options.locked)
1178 .where_eq("uuid", self.uuid);
1179
1180 query_builder.execute(&mut **transaction).await?;
1181
1182 if let Some(name) = options.name {
1183 self.name = name;
1184 }
1185 if let Some(locked) = options.locked {
1186 self.locked = locked;
1187 }
1188
1189 self.run_after_update_handlers(state, transaction).await?;
1190
1191 Ok(())
1192 }
1193}
1194
1195#[async_trait::async_trait]
1196impl ByUuid for ServerBackup {
1197 async fn by_uuid(
1198 database: &crate::database::Database,
1199 uuid: uuid::Uuid,
1200 ) -> Result<Self, crate::database::DatabaseError> {
1201 let row = sqlx::query(&format!(
1202 r#"
1203 SELECT {}
1204 FROM server_backups
1205 WHERE server_backups.uuid = $1
1206 "#,
1207 Self::columns_sql(None)
1208 ))
1209 .bind(uuid)
1210 .fetch_one(database.read())
1211 .await?;
1212
1213 Self::map(None, &row)
1214 }
1215
1216 async fn by_uuid_with_transaction(
1217 transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
1218 uuid: uuid::Uuid,
1219 ) -> Result<Self, crate::database::DatabaseError> {
1220 let row = sqlx::query(&format!(
1221 r#"
1222 SELECT {}
1223 FROM server_backups
1224 WHERE server_backups.uuid = $1
1225 "#,
1226 Self::columns_sql(None)
1227 ))
1228 .bind(uuid)
1229 .fetch_one(&mut **transaction)
1230 .await?;
1231
1232 Self::map(None, &row)
1233 }
1234}
1235
1236#[derive(Clone, Default)]
1237pub struct DeleteServerBackupOptions {
1238 pub force: bool,
1239}
1240
1241#[async_trait::async_trait]
1242impl DeletableModel for ServerBackup {
1243 type DeleteOptions = DeleteServerBackupOptions;
1244
1245 fn get_delete_handlers() -> &'static LazyLock<DeleteHandlerList<Self>> {
1246 static DELETE_LISTENERS: LazyLock<DeleteHandlerList<ServerBackup>> =
1247 LazyLock::new(|| Arc::new(ModelHandlerList::default()));
1248
1249 &DELETE_LISTENERS
1250 }
1251
1252 async fn delete_with_transaction(
1253 &self,
1254 _state: &crate::State,
1255 _options: Self::DeleteOptions,
1256 _transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
1257 ) -> Result<(), anyhow::Error> {
1258 Err(anyhow::anyhow!(
1259 "delete_with_transaction is not supported for ServerBackup"
1260 ))
1261 }
1262
1263 async fn delete(
1264 &self,
1265 state: &crate::State,
1266 options: Self::DeleteOptions,
1267 ) -> Result<(), anyhow::Error> {
1268 let mut transaction = state.database.write().begin().await?;
1269
1270 self.run_delete_handlers(&options, state, &mut transaction)
1271 .await?;
1272
1273 let node = self.node.fetch_cached(&state.database).await?;
1274
1275 let backup_configuration = match &self.backup_configuration {
1276 Some(backup_configuration) => {
1277 backup_configuration.fetch_cached(&state.database).await?
1278 }
1279 None if options.force => {
1280 let database = Arc::clone(&state.database);
1281 let backup_uuid = self.uuid;
1282 let backup_disk = self.disk;
1283
1284 return tokio::spawn(async move {
1285 if backup_disk != BackupDisk::S3
1286 && let Err(err) = node
1287 .api_client(&database)
1288 .await?
1289 .delete_backups_backup(
1290 backup_uuid,
1291 &wings_api::backups_backup::delete::RequestBody {
1292 adapter: backup_disk.to_wings_adapter(),
1293 },
1294 )
1295 .await
1296 && !matches!(
1297 err,
1298 wings_api::client::ApiHttpError::Http(StatusCode::NOT_FOUND, _)
1299 )
1300 {
1301 tracing::error!(node = %node.uuid, backup = %backup_uuid, "unable to delete backup on node: {:?}", err)
1302 }
1303
1304 sqlx::query(
1305 r#"
1306 UPDATE server_backups
1307 SET deleted = NOW()
1308 WHERE server_backups.uuid = $1
1309 "#,
1310 )
1311 .bind(backup_uuid)
1312 .execute(&mut *transaction)
1313 .await?;
1314
1315 transaction.commit().await?;
1316
1317 Ok(())
1318 })
1319 .await?;
1320 }
1321 None => {
1322 return Err(crate::response::DisplayError::new(
1323 "no backup configuration available, unable to delete backup",
1324 )
1325 .with_status(StatusCode::EXPECTATION_FAILED)
1326 .into());
1327 }
1328 };
1329
1330 if backup_configuration.maintenance_enabled {
1331 return Err(crate::response::DisplayError::new(
1332 "cannot delete backup while backup configuration is in maintenance mode",
1333 )
1334 .with_status(StatusCode::EXPECTATION_FAILED)
1335 .into());
1336 }
1337
1338 let backup = self.clone();
1339 let state = state.clone();
1340
1341 tokio::spawn(async move {
1342 match backup.disk {
1343 BackupDisk::S3 => {
1344 if let Some(mut s3_configuration) = backup_configuration.backup_configs.s3 {
1345 s3_configuration.decrypt(&state.database).await?;
1346
1347 let client = s3_configuration
1348 .into_client()
1349 .map_err(|err| sqlx::Error::Io(std::io::Error::other(err)))?;
1350 let file_path = match &backup.upload_path {
1351 Some(path) => path,
1352 None => if let Some(server) = &backup.server {
1353 &Self::s3_path(server.uuid, backup.uuid)
1354 } else {
1355 return Err(anyhow::anyhow!("backup upload path not found"))
1356 }
1357 };
1358
1359 if let Err(err) = client.delete_object(file_path).await {
1360 if options.force {
1361 tracing::error!(server = ?backup.server.as_ref().map(|s| s.uuid), backup = %backup.uuid, "failed to delete S3 backup, ignoring: {:?}", err);
1362 } else {
1363 return Err(err.into());
1364 }
1365 }
1366 } else if options.force {
1367 tracing::warn!(server = ?backup.server.as_ref().map(|s| s.uuid), backup = %backup.uuid, "S3 backup deletion attempted but no S3 configuration found, ignoring");
1368 } else {
1369 return Err(anyhow::anyhow!("s3 backup deletion attempted but no S3 configuration found"));
1370 }
1371 }
1372 _ => {
1373 if let Err(err) = node
1374 .api_client(&state.database)
1375 .await?
1376 .delete_backups_backup(
1377 backup.uuid,
1378 &wings_api::backups_backup::delete::RequestBody {
1379 adapter: backup.disk.to_wings_adapter(),
1380 },
1381 )
1382 .await
1383 && !matches!(err, wings_api::client::ApiHttpError::Http(StatusCode::NOT_FOUND, _))
1384 {
1385 return Err(err.into());
1386 }
1387 }
1388 }
1389
1390 sqlx::query(
1391 r#"
1392 UPDATE server_backups
1393 SET deleted = NOW()
1394 WHERE server_backups.uuid = $1
1395 "#,
1396 )
1397 .bind(backup.uuid)
1398 .execute(&mut *transaction)
1399 .await?;
1400
1401 backup.run_after_delete_handlers(&options, &state, &mut transaction).await?;
1402
1403 transaction.commit().await?;
1404
1405 Ok(())
1406 }).await?
1407 }
1408}
1409
1410#[derive(ToSchema, Serialize)]
1411#[schema(title = "AdminNodeServerBackup")]
1412pub struct AdminApiNodeServerBackup {
1413 pub uuid: uuid::Uuid,
1414 pub server: Option<super::server::AdminApiServer>,
1415 pub node: super::node::AdminApiNode,
1416
1417 pub name: compact_str::CompactString,
1418 pub ignored_files: Vec<compact_str::CompactString>,
1419
1420 pub is_successful: bool,
1421 pub is_locked: bool,
1422 pub is_browsable: bool,
1423 pub is_streaming: bool,
1424 pub is_shared: bool,
1425
1426 pub checksum: Option<compact_str::CompactString>,
1427 pub bytes: i64,
1428 pub files: i64,
1429
1430 pub completed: Option<chrono::DateTime<chrono::Utc>>,
1431 pub created: chrono::DateTime<chrono::Utc>,
1432}
1433
1434#[schema_extension_derive::extendible]
1435#[init_args(ServerBackup, crate::State)]
1436#[hook_args(crate::State)]
1437#[derive(ToSchema, Serialize)]
1438#[schema(title = "AdminServerBackup")]
1439pub struct AdminApiServerBackup {
1440 pub uuid: uuid::Uuid,
1441 pub server: Option<super::server::AdminApiServer>,
1442
1443 pub name: compact_str::CompactString,
1444 pub ignored_files: Vec<compact_str::CompactString>,
1445
1446 pub is_successful: bool,
1447 pub is_locked: bool,
1448 pub is_browsable: bool,
1449 pub is_streaming: bool,
1450 pub is_shared: bool,
1451
1452 pub checksum: Option<compact_str::CompactString>,
1453 pub bytes: i64,
1454 pub files: i64,
1455
1456 pub completed: Option<chrono::DateTime<chrono::Utc>>,
1457 pub created: chrono::DateTime<chrono::Utc>,
1458}
1459
1460#[schema_extension_derive::extendible]
1461#[init_args(ServerBackup, crate::State)]
1462#[hook_args(crate::State)]
1463#[derive(ToSchema, Serialize)]
1464#[schema(title = "ServerBackup")]
1465pub struct ApiServerBackup {
1466 pub uuid: uuid::Uuid,
1467
1468 pub name: compact_str::CompactString,
1469 pub ignored_files: Vec<compact_str::CompactString>,
1470
1471 pub is_successful: bool,
1472 pub is_locked: bool,
1473 pub is_browsable: bool,
1474 pub is_streaming: bool,
1475
1476 pub checksum: Option<compact_str::CompactString>,
1477 pub bytes: i64,
1478 pub files: i64,
1479
1480 pub completed: Option<chrono::DateTime<chrono::Utc>>,
1481 pub created: chrono::DateTime<chrono::Utc>,
1482}