200 lines
7.0 KiB
Rust
Raw Normal View History

2022-10-10 23:07:40 +02:00
use std::collections::VecDeque;
use std::iter::Iterator;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering};
use lazy_static::lazy_static;
use warp::Filter;
use futures::TryFutureExt;
use futures::TryStreamExt;
use crate::db::DBPool;
mod routes;
pub fn build_routes(db: DBPool) -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> + Clone {
{
if !std::path::Path::new("temp").is_dir() {
std::fs::create_dir("temp").expect("Failed to create temp dir");
}
std::fs::read_dir("temp")
.expect("Failed to iter temp dir")
.for_each(|dir| {
std::fs::remove_file(dir.expect("Failed to retrieve temp dir entry").path()).expect("Failed to delete file in temp dir");
});
DELETE_RT.spawn(async {});
ZIP_RT.spawn(async {});
}
routes::build_routes(db)
}
pub static WINDOWS_INVALID_CHARS: &str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F<>:\"/\\|";
pub struct ZipProgressEntry {
temp_id: u64,
done: AtomicBool,
progress: AtomicU64,
total: AtomicU64,
delete_after: AtomicI64
}
#[derive(Debug)]
pub enum CreateNodeResult {
InvalidName,
InvalidParent,
Exists(bool, i32)
}
lazy_static! {
static ref DELETE_RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(1).enable_time().build().expect("Failed to create delete runtime");
static ref ZIP_RT: tokio::runtime::Runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(3).enable_time().build().expect("Failed to create zip runtime");
pub static ref ZIP_TO_PROGRESS: tokio::sync::RwLock<std::collections::HashMap<std::collections::BTreeSet<i32>, Arc<ZipProgressEntry>>> = tokio::sync::RwLock::new(std::collections::HashMap::new());
}
static NEXT_TEMP_ID: AtomicU64 = AtomicU64::new(0);
async fn cleanup_temp_zips() {
let mut existing = ZIP_TO_PROGRESS.write().await;
existing.retain(|_, v| {
if Arc::strong_count(v) == 1 && v.done.load(Ordering::Relaxed) && v.delete_after.load(Ordering::Relaxed) <= chrono::Utc::now().timestamp() {
std::fs::remove_file(std::path::Path::new(&format!("./temp/{}", v.temp_id))).expect("Failed to delete temp file");
false
} else {
true
}
});
}
fn get_nodes_recursive(root: crate::db::Inode, db: &mut crate::db::DBConnection) -> VecDeque<crate::db::Inode> {
let mut nodes = VecDeque::from(vec![root.clone()]);
if root.is_file { return nodes; }
let mut nodes_to_check = VecDeque::from(vec![root]);
while !nodes_to_check.is_empty() {
let node = nodes_to_check.pop_front().unwrap();
db.get_children(node.id).iter().for_each(|node| {
nodes.push_back(node.clone());
if !node.is_file { nodes_to_check.push_front(node.clone()); }
});
}
nodes
}
fn get_node_path(node: crate::db::Inode, db: &mut crate::db::DBConnection) -> VecDeque<crate::db::Inode> {
let mut path = VecDeque::from(vec![node.clone()]);
let mut node = node;
while let Some(parent) = node.parent_id {
node = db.get_node(parent).expect("Failed to get node parent");
path.push_front(node.clone());
}
path
}
fn get_total_size(node: crate::db::Inode, db: &mut crate::db::DBConnection) -> u64 {
let nodes = get_nodes_recursive(node, db);
nodes.iter().fold(0_u64, |acc, node| acc + node.size.unwrap_or(0) as u64)
}
pub fn get_node_and_validate(user: &crate::db::User, node: i32, db: &mut crate::db::DBConnection) -> Option<crate::db::Inode> {
let node = db.get_node(node)?;
if node.owner_id != user.id {
None
} else {
Some(node)
}
}
pub fn create_node(name: String, owner: &crate::db::User, file: bool, parent: Option<i32>, force: bool, db: &mut crate::db::DBConnection)
-> Result<crate::db::Inode, CreateNodeResult> {
if !force && (name.is_empty() || name.starts_with(' ') || name.contains(|c| {
WINDOWS_INVALID_CHARS.contains(c)
} || name.ends_with(' ') || name.ends_with('.') || name == "." || name == "..")) {
return Err(CreateNodeResult::InvalidName);
}
if let Some(parent) = parent {
let parent = match get_node_and_validate(owner, parent, db) {
None => { return Err(CreateNodeResult::InvalidParent); }
Some(v) => v
};
if parent.is_file { return Err(CreateNodeResult::InvalidParent); }
let children = db.get_children(parent.id);
for child in children {
if child.name == name {
return Err(CreateNodeResult::Exists(child.is_file, child.id));
}
}
}
Ok(db.create_node(file, name, parent, owner.id))
}
pub fn delete_node_root(node: &crate::db::Inode, db: &mut crate::db::DBConnection) {
get_nodes_recursive(node.clone(), db).iter().rev().for_each(|node| {
db.delete_node(node);
});
}
pub async fn delete_node(node: &crate::db::Inode, sender: &mut warp::hyper::body::Sender, db: &mut crate::db::DBConnection) {
if node.parent_id.is_none() { return; }
for node in get_nodes_recursive(node.clone(), db).iter().rev() {
sender.send_data(warp::hyper::body::Bytes::from(format!("Deleting {}...", generate_path(node, db)))).await.unwrap();
db.delete_node(node);
sender.send_data(warp::hyper::body::Bytes::from(" Done \n")).await.unwrap();
}
}
pub fn generate_path(node: &crate::db::Inode, db: &mut crate::db::DBConnection) -> String {
let mut path = String::new();
get_node_path(node.clone(), db).iter().for_each(|node| {
if node.parent_id.is_none() {
path += "/";
} else {
path += &node.name;
if !node.is_file {
path += "/";
}
}
});
path
}
pub fn generate_path_dto(node: &crate::db::Inode, db: &mut crate::db::DBConnection) -> crate::dto::responses::GetPath {
let mut get_path = crate::dto::responses::GetPath {
segments: Vec::new()
};
get_node_path(node.clone(), db).iter().for_each(|node| {
if node.parent_id.is_none() {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: "/".to_owned(),
node: Some(node.id)
});
} else {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: node.name.clone(),
node: Some(node.id)
});
if !node.is_file {
get_path.segments.push(crate::dto::responses::GetPathSegment {
path: "/".to_owned(),
node: None
});
}
}
});
get_path
}
pub fn get_file_stream_body(path: String) -> warp::hyper::Body {
warp::hyper::Body::wrap_stream(
tokio::fs::File::open(path)
.map_ok(|file|
tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(bytes::BytesMut::freeze)
)
.try_flatten_stream()
)
}