Compare commits
No commits in common. "bfd278abbe379e9c027b7ef4b6c9c0e268c29551" and "a4a03ca4c502e5e3b763d69521ed7c2c498a0168" have entirely different histories.
bfd278abbe
...
a4a03ca4c5
|
|
@ -13,8 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||||
* Chain length describes how many incremental backups to create from the
|
* Chain length describes how many incremental backups to create from the
|
||||||
same full backup
|
same full backup
|
||||||
* "backups to keep" has been replaced by "chains to keep"
|
* "backups to keep" has been replaced by "chains to keep"
|
||||||
* Server type & version and backup size are now stored as metadata in the
|
* Server type & version is now stored as metadata in the metadata file
|
||||||
metadata file
|
|
||||||
* Backup layers
|
* Backup layers
|
||||||
* Store multiple chains of backups in parallel, configuring each with
|
* Store multiple chains of backups in parallel, configuring each with
|
||||||
different parameters (son-father-grandfather principle)
|
different parameters (son-father-grandfather principle)
|
||||||
|
|
|
||||||
14
Dockerfile
14
Dockerfile
|
|
@ -47,15 +47,13 @@ COPY --from=builder /app/target/debug/alex /bin/alex
|
||||||
RUN chmod +x /bin/alex
|
RUN chmod +x /bin/alex
|
||||||
|
|
||||||
# Default value to keep users from eating up all ram accidentally
|
# Default value to keep users from eating up all ram accidentally
|
||||||
ENV ALEX_CONFIG_DIR=/app/config \
|
ENV ALEX_XMS=1024 \
|
||||||
ALEX_WORLD_DIR=/app/worlds \
|
|
||||||
ALEX_BACKUP_DIR=/app/backups \
|
|
||||||
ALEX_SERVER=paper \
|
|
||||||
ALEX_XMS=1024 \
|
|
||||||
ALEX_XMX=2048 \
|
ALEX_XMX=2048 \
|
||||||
ALEX_JAR=/app/server.jar \
|
ALEX_JAR=/app/server.jar \
|
||||||
ALEX_SERVER_VERSION="${MC_VERSION}-${PAPERMC_VERSION}" \
|
ALEX_CONFIG_DIR=/app/config \
|
||||||
ALEX_LAYERS="2min,2,4,4;3min,3,2,2"
|
ALEX_WORLD_DIR=/app/worlds \
|
||||||
|
ALEX_BACKUPS_DIR=/app/backups \
|
||||||
|
ALEX_SERVER_VERSION="${MC_VERSION}-${PAPERMC_VERSION}"
|
||||||
|
|
||||||
# Document exposed ports
|
# Document exposed ports
|
||||||
EXPOSE 25565
|
EXPOSE 25565
|
||||||
|
|
@ -64,4 +62,4 @@ EXPOSE 25565
|
||||||
USER paper:paper
|
USER paper:paper
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/dumb-init", "--"]
|
ENTRYPOINT ["/bin/dumb-init", "--"]
|
||||||
CMD ["/bin/alex", "run"]
|
CMD ["/bin/alex", "paper"]
|
||||||
|
|
|
||||||
|
|
@ -1,43 +0,0 @@
|
||||||
use std::io::{self, Write};
|
|
||||||
|
|
||||||
/// Wrapper around the Write trait that counts how many bytes have been written in total.
|
|
||||||
/// Heavily inspired by https://stackoverflow.com/a/42189386
|
|
||||||
pub struct CountingWrite<W> {
|
|
||||||
inner: W,
|
|
||||||
count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<W> CountingWrite<W>
|
|
||||||
where
|
|
||||||
W: Write,
|
|
||||||
{
|
|
||||||
pub fn new(writer: W) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: writer,
|
|
||||||
count: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bytes_written(&self) -> usize {
|
|
||||||
self.count
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<W> Write for CountingWrite<W>
|
|
||||||
where
|
|
||||||
W: Write,
|
|
||||||
{
|
|
||||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
|
||||||
let res = self.inner.write(buf);
|
|
||||||
|
|
||||||
if let Ok(count) = res {
|
|
||||||
self.count += count;
|
|
||||||
}
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> io::Result<()> {
|
|
||||||
self.inner.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
mod delta;
|
mod delta;
|
||||||
mod io_ext;
|
|
||||||
pub mod manager;
|
pub mod manager;
|
||||||
mod path;
|
mod path;
|
||||||
|
|
||||||
|
|
@ -20,8 +19,6 @@ use std::fs::File;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
const BYTE_SUFFIXES: [&str; 5] = ["B", "KiB", "MiB", "GiB", "TiB"];
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
pub enum BackupType {
|
pub enum BackupType {
|
||||||
Full,
|
Full,
|
||||||
|
|
@ -33,7 +30,6 @@ pub enum BackupType {
|
||||||
pub struct Backup<T: Clone> {
|
pub struct Backup<T: Clone> {
|
||||||
/// When the backup was started (also corresponds to the name)
|
/// When the backup was started (also corresponds to the name)
|
||||||
pub start_time: chrono::DateTime<Utc>,
|
pub start_time: chrono::DateTime<Utc>,
|
||||||
pub size: usize,
|
|
||||||
/// Type of the backup
|
/// Type of the backup
|
||||||
pub type_: BackupType,
|
pub type_: BackupType,
|
||||||
pub delta: Delta,
|
pub delta: Delta,
|
||||||
|
|
@ -88,7 +84,7 @@ impl<T: Clone> Backup<T> {
|
||||||
let start_time = chrono::offset::Utc::now();
|
let start_time = chrono::offset::Utc::now();
|
||||||
|
|
||||||
let path = Backup::path(backup_dir, start_time);
|
let path = Backup::path(backup_dir, start_time);
|
||||||
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
|
let tar_gz = File::create(path)?;
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||||
let mut ar = tar::Builder::new(enc);
|
let mut ar = tar::Builder::new(enc);
|
||||||
|
|
||||||
|
|
@ -108,16 +104,9 @@ impl<T: Clone> Backup<T> {
|
||||||
delta.added.insert(dir_in_tar.to_path_buf(), added_files);
|
delta.added.insert(dir_in_tar.to_path_buf(), added_files);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut enc = ar.into_inner()?;
|
|
||||||
|
|
||||||
// The docs recommend running try_finish before unwrapping using finish
|
|
||||||
enc.try_finish()?;
|
|
||||||
let tar_gz = enc.finish()?;
|
|
||||||
|
|
||||||
Ok(Backup {
|
Ok(Backup {
|
||||||
type_: BackupType::Full,
|
type_: BackupType::Full,
|
||||||
start_time,
|
start_time,
|
||||||
size: tar_gz.bytes_written(),
|
|
||||||
delta,
|
delta,
|
||||||
metadata: None,
|
metadata: None,
|
||||||
})
|
})
|
||||||
|
|
@ -133,7 +122,7 @@ impl<T: Clone> Backup<T> {
|
||||||
let start_time = chrono::offset::Utc::now();
|
let start_time = chrono::offset::Utc::now();
|
||||||
|
|
||||||
let path = Backup::path(backup_dir, start_time);
|
let path = Backup::path(backup_dir, start_time);
|
||||||
let tar_gz = io_ext::CountingWrite::new(File::create(path)?);
|
let tar_gz = File::create(path)?;
|
||||||
let enc = GzEncoder::new(tar_gz, Compression::default());
|
let enc = GzEncoder::new(tar_gz, Compression::default());
|
||||||
let mut ar = tar::Builder::new(enc);
|
let mut ar = tar::Builder::new(enc);
|
||||||
|
|
||||||
|
|
@ -165,16 +154,9 @@ impl<T: Clone> Backup<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut enc = ar.into_inner()?;
|
|
||||||
|
|
||||||
// The docs recommend running try_finish before unwrapping using finish
|
|
||||||
enc.try_finish()?;
|
|
||||||
let tar_gz = enc.finish()?;
|
|
||||||
|
|
||||||
Ok(Backup {
|
Ok(Backup {
|
||||||
type_: BackupType::Incremental,
|
type_: BackupType::Incremental,
|
||||||
start_time,
|
start_time,
|
||||||
size: tar_gz.bytes_written(),
|
|
||||||
delta,
|
delta,
|
||||||
metadata: None,
|
metadata: None,
|
||||||
})
|
})
|
||||||
|
|
@ -232,24 +214,11 @@ impl<T: Clone> fmt::Display for Backup<T> {
|
||||||
BackupType::Incremental => 'I',
|
BackupType::Incremental => 'I',
|
||||||
};
|
};
|
||||||
|
|
||||||
// Pretty-print size
|
|
||||||
// If your backup is a petabyte or larger, this will crash and you need to re-evaluate your
|
|
||||||
// life choices
|
|
||||||
let mut index = 0;
|
|
||||||
let mut size = self.size as f64;
|
|
||||||
|
|
||||||
while size >= 1024.0 {
|
|
||||||
index += 1;
|
|
||||||
size /= 1024.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"{} ({}, {:.2}{}, {})",
|
"{} ({}, {})",
|
||||||
self.start_time.format(Backup::FILENAME_FORMAT),
|
self.start_time.format(Backup::FILENAME_FORMAT),
|
||||||
letter,
|
letter,
|
||||||
size,
|
|
||||||
BYTE_SUFFIXES[index],
|
|
||||||
self.delta
|
self.delta
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue